diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..96e93e796a --- /dev/null +++ b/.clang-format @@ -0,0 +1,20 @@ +--- +# This section defines defaults for all languages. Currently we derive ANTLR style from LLVM. +BasedOnStyle: LLVM +# Only use clang-format for C++ for now. +DisableFormat: true + +--- +# This section configures C++ formatting. +Language: Cpp +DisableFormat: false +Standard: c++17 +# Prevent clang-format from attempting to pick the alignment and always use right alignment. +DerivePointerAlignment: false +# ANTLR existing style is to right align pointers and references. +PointerAlignment: Right +ReferenceAlignment: Right +# Some of ANTLR existing code is longer than the default 80, so use 100 for now. +ColumnLimit: 100 +# Historically ANTLR has used indentation within namespaces, so replicate it. +NamespaceIndentation: Inner diff --git a/.editorconfig b/.editorconfig index daa6da0fb6..1c32f7d661 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,3 +8,11 @@ charset = utf-8 insert_final_newline = true trim_trailing_whitespace = true indent_style = tab +ij_java_else_on_new_line = true + +[*.{h,cpp}] +charset = utf-8 +insert_final_newline = true +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 diff --git a/.gitattributes b/.gitattributes index 5edc9289fe..3bacbd399f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,2 @@ # This rule applies to all files which don't match another line below -* text=auto +* text=auto \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 30d0c32aa6..820db5243d 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,8 +1,15 @@ diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8ac81421f4..686759a7a9 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,23 @@ \ No newline at end of file +(Please make sure your PR is in a branch other than dev or master + and also make sure that you derive this branch from dev.) + +As of 4.10, ANTLR uses the Linux Foundation's Developer +Certificate of Origin, DCO, version 1.1. See either +https://developercertificate.org/ or file +contributors-cert-of-origin.txt in the main directory. + +Each commit requires a "signature", which is simple as +using `-s` (not `-S`) to the git commit command: + +git commit -s -m 'This is my commit message' + +Github's pull request process enforces the sig and gives +instructions on how to fix any commits that lack the sig. +See https://github.com/apps/dco for more info. + +No signature is required in this file (unlike the +previous ANTLR contributor's certificate of origin.) +--> diff --git a/.github/workflows/hosted.yml b/.github/workflows/hosted.yml new file mode 100644 index 0000000000..43a2e0f985 --- /dev/null +++ b/.github/workflows/hosted.yml @@ -0,0 +1,342 @@ +name: antlr4 + +concurrency: + group: ${{ github.repository }}-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + push: + branches: [ master, dev, hostedci ] + pull_request: + branches: [ master, dev ] + +permissions: + contents: read + +jobs: + cpp-lib-build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ + macos-15, + ubuntu-22.04, + windows-2022 + ] + compiler: [ clang, gcc ] + unity_build: [ ON, OFF ] + exclude: + - os: windows-2022 + compiler: gcc + include: + - os: windows-2022 + compiler: cl + + steps: + - name: Install dependencies (Ubuntu) + if: startswith(matrix.os, 'ubuntu') + run: | + sudo apt-get update -qq + sudo apt install -y ninja-build + + - name: Install dependencies (MacOS) + if: startswith(matrix.os, 'macos') + run: brew install ninja + + - name: Setup Clang + if: (matrix.compiler == 'clang') && !startswith(matrix.os, 'macos') + uses: egor-tensin/setup-clang@v1 + with: + version: 13 + platform: x64 + cygwin: 0 + + - name: Check out code + uses: actions/checkout@v3 + + - name: Use ccache + if: startswith(matrix.os, 'macos') || startswith(matrix.os, 'ubuntu') + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ matrix.os }}-${{ matrix.compiler }} + + - name: Configure shell (Ubuntu) + if: startswith(matrix.os, 'ubuntu') + run: echo 'PATH=/usr/lib/ccache:'"$PATH" >> $GITHUB_ENV + + - name: Configure shell (MacOS) + if: startswith(matrix.os, 'macos') + run: echo "PATH=$(brew --prefix)/opt/ccache/libexec:$PATH" >> $GITHUB_ENV + + - name: Build (Windows) + if: startswith(matrix.os, 'windows') + shell: cmd + run: | + call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat" + + if "${{ matrix.compiler }}" EQU "cl" ( + set CC=cl + set CXX=cl + echo 'CC=cl' >> $GITHUB_ENV + echo 'CXX=cl' >> $GITHUB_ENV + ) else ( + set CC=clang + set CXX=clang++ + echo 'CC=clang' >> $GITHUB_ENV + echo 'CXX=clang++' >> $GITHUB_ENV + ) + + set + where cmake && cmake --version + where ninja && ninja --version + where %CC% && %CC% -version + where %CXX% && %CXX% -version + + cd runtime/Cpp + + cmake -G Ninja -DCMAKE_BUILD_TYPE=Debug -DANTLR_BUILD_CPP_TESTS=OFF -DCMAKE_UNITY_BUILD=${{ matrix.unity_build }} -DCMAKE_UNITY_BUILD_BATCH_SIZE=20 -S . -B out/Debug + if %errorlevel% neq 0 exit /b %errorlevel% + + cmake --build out/Debug -j %NUMBER_OF_PROCESSORS% + if %errorlevel% neq 0 exit /b %errorlevel% + + cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DANTLR_BUILD_CPP_TESTS=OFF -S . -B out/Release + if %errorlevel% neq 0 exit /b %errorlevel% + + cmake --build out/Release -j %NUMBER_OF_PROCESSORS% + if %errorlevel% neq 0 exit /b %errorlevel% + + - name: Build (non-Windows) + if: startswith(matrix.os, 'macos') || startswith(matrix.os, 'ubuntu') + run: | + if [ "${{matrix.compiler}}" == "clang" ]; then + export CC=clang + export CXX=clang++ + echo 'CC=clang' >> $GITHUB_ENV + echo 'CXX=clang++' >> $GITHUB_ENV + else + export CC=gcc + export CXX=g++ + echo 'CC=gcc' >> $GITHUB_ENV + echo 'CXX=g++' >> $GITHUB_ENV + fi + + env + which cmake && cmake --version + which ninja && ninja --version + which $CC && $CC --version + which $CXX && $CXX --version + + cd runtime/Cpp + + cmake -G Ninja -DCMAKE_BUILD_TYPE=Debug -DANTLR_BUILD_CPP_TESTS=OFF -DCMAKE_UNITY_BUILD=${{ matrix.unity_build }} -DCMAKE_UNITY_BUILD_BATCH_SIZE=20 -S . -B out/Debug + cmake --build out/Debug --parallel + + cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DANTLR_BUILD_CPP_TESTS=OFF -S . -B out/Release + cmake --build out/Release --parallel + + - name: Prepare artifacts + if: always() + run: | + cd ${{ github.workspace }}/.. + tar czfp antlr_${{ matrix.os }}_${{ matrix.compiler }}.tgz --exclude='.git' antlr4 + mv antlr_${{ matrix.os }}_${{ matrix.compiler }}.tgz ${{ github.workspace }}/. + + - name: Archive artifacts + if: always() + continue-on-error: true + uses: actions/upload-artifact@v4 + with: + name: antlr_${{ matrix.os }}_${{ matrix.compiler }} + path: antlr_${{ matrix.os }}_${{ matrix.compiler }}.tgz + + + build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ + macos-15, + ubuntu-22.04, + windows-2022 + ] + target: [ + tool, + cpp, + csharp, + dart, + go, + java, + javascript, + typescript, + php, + python3, + # swift, + ] + exclude: + - os: windows-2022 + target: swift + + steps: + # Check out the code before setting the environment since some + # of the actions actually parse the files to figure out the + # dependencies, for instance, the setup-java actually parses + # **/pom.xml files to decide what to cache. + - name: Check out code + uses: actions/checkout@v3 + + - name: Checkout antlr PHP runtime + if: matrix.target == 'php' + uses: actions/checkout@v3 + with: + repository: antlr/antlr-php-runtime + path: runtime/PHP + + - name: Setup PHP 8.2 + if: matrix.target == 'php' + uses: shivammathur/setup-php@v2 + with: + php-version: '8.3' + extensions: mbstring + tools: composer + + - name: Install dependencies + env: + COMPOSER_CACHE_DIR: ${{ github.workspace }}/.cache + if: matrix.target == 'php' + run: |- + cd runtime/PHP + composer install --no-progress --no-interaction --prefer-dist --optimize-autoloader + + - name: Install dependencies (Ubuntu) + if: startswith(matrix.os, 'ubuntu') + run: | + sudo apt-get update -qq + sudo apt install -y ninja-build + + - name: Install dependencies (MacOS) + if: startswith(matrix.os, 'macos') + run: brew install ninja + + - name: Set up JDK 11 + id: setup-java + uses: actions/setup-java@v3 + with: + distribution: 'zulu' + java-version: 11 + cache: 'maven' + + - name: Set up Maven + if: steps.setup-java.outputs.cache-hit != 'true' + uses: stCarolas/setup-maven@v4.5 + with: + maven-version: 3.8.5 + + - name: Add msbuild to PATH + if: startswith(matrix.os, 'windows') && (matrix.target == 'cpp') + uses: microsoft/setup-msbuild@v1.1 + + - name: Set up Python 3 + if: matrix.target == 'python3' + uses: actions/setup-python@v4 + with: + python-version: '3.x' + architecture: 'x64' + + - name: Set up Node 16 + if: (matrix.target == 'javascript') || (matrix.target == 'typescript') + uses: actions/setup-node@v3.6.0 + with: + node-version: '16' + + - name: Setup Dotnet + if: matrix.target == 'csharp' + uses: actions/setup-dotnet@v3.0.3 + with: + dotnet-version: '7.0.x' + + - name: Setup Dart 2.12.1 + if: matrix.target == 'dart' + uses: dart-lang/setup-dart@v1.3 + with: + sdk: 2.12.1 + + - name: Setup Go 1.19 + if: matrix.target == 'go' + uses: actions/setup-go@v3.3.1 + with: + go-version: '^1.19' + + - name: Setup Swift + if: matrix.target == 'swift' + uses: swift-actions/setup-swift@v1.19.0 + with: + swift-version: '5.2' + + - name: Use ccache + if: (startswith(matrix.os, 'macos') || startswith(matrix.os, 'ubuntu')) && (matrix.target == 'cpp') + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ matrix.os }}-${{ matrix.target }} + + - name: Configure shell (Ubuntu) + if: startswith(matrix.os, 'ubuntu') && (matrix.target == 'cpp') + run: echo 'PATH=/usr/lib/ccache:'"$PATH" >> $GITHUB_ENV + + - name: Configure shell (MacOS) + if: startswith(matrix.os, 'macos') && (matrix.target == 'cpp') + run: echo "PATH=$(brew --prefix)/opt/ccache/libexec:$PATH" >> $GITHUB_ENV + + - name: Build ANTLR with Maven + run: mvn install -DskipTests=true -Darguments="-Dmaven.javadoc.skip=true" -B -V + + - name: Test tool + if: matrix.target == 'tool' + run: | + cd tool-testsuite + mvn test + + - name: Test runtime (Windows) + if: startsWith(matrix.os, 'windows') && (matrix.target != 'tool') + run: | + gci env:* | sort-object name + + cd runtime-testsuite + switch ("${{ matrix.target }}") + { + python3 { mvn -X '-Dantlr-python3-exec="${{ env.pythonLocation }}\python.exe"' '-Dtest=python3.**' test } + default { mvn -X '-Dtest=${{ matrix.target }}.**' test } + } + + env: + CMAKE_GENERATOR: Ninja + + - name: Test runtime (non-Windows) + if: (startsWith(matrix.os, 'ubuntu') || startsWith(matrix.os, 'macos')) && (matrix.target != 'tool') + run: | + env + + cd runtime-testsuite + case ${{ matrix.target }} in + python3) mvn -X '-Dantlr-python3-exec=${{ env.pythonLocation }}/bin/python' '-Dtest=python3.**' test ;; + *) mvn -X '-Dtest=${{ matrix.target }}.**' test ;; + esac + + - name: Prepare artifacts + if: always() + run: | + cd ${{ github.workspace }}/.. + tar czfp antlr_${{ matrix.os }}_${{ matrix.target }}.tgz --exclude='.git' antlr4 + mv antlr_${{ matrix.os }}_${{ matrix.target }}.tgz ${{ github.workspace }}/. + + - name: Archive artifacts + if: always() + continue-on-error: true + uses: actions/upload-artifact@v4 + with: + name: antlr_${{ matrix.os }}_${{ matrix.target }} + path: antlr_${{ matrix.os }}_${{ matrix.target }}.tgz diff --git a/.gitignore b/.gitignore index adc8e5163d..b6ea50bc29 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Nuget packages +*.nupkg + # Maven build folders target/ # ... but not code generation targets @@ -21,6 +24,8 @@ user.build.properties __pycache__/ *.py[cod] *$py.class +# Build results +*.egg-info/ ## CSharp and VisualStudio, selected lines from https://raw.githubusercontent.com/github/gitignore/master/VisualStudio.gitignore # User-specific files @@ -29,9 +34,8 @@ __pycache__/ *.userosscache *.sln.docstates -# User-specific files (MonoDevelop/Xamarin Studio) +# User-specific files (MonoDevelop/Xamarin Studio/Visual Studio) *.userprefs -*.user .vs/ project.lock.json @@ -47,9 +51,6 @@ bld/ [Oo]bj/ [Ll]og/ -# Visual Studio 2015 cache/options directory -.vs/ - # NetBeans user configuration files nbactions*.xml /nbproject/private/ @@ -82,6 +83,9 @@ nbactions*.xml /gen4/ /tool/playground/ tmp/ +**/generatedCode/*.interp +**/generatedCode/*.tokens +**/generatedCode/*.bak # Configurable build files bilder.py @@ -97,4 +101,41 @@ xcuserdata # VSCode Java plugin temporary files javac-services.0.log javac-services.0.log.lck -test/ + +# Don't ignore python tests +!runtime/Python3/test/ +Antlr4.sln +runtime/PHP + +# Swift binaries +.build/ + +# Code coverage reports +coverage/ + +# Cpp generated build files +runtime/Cpp/CMakeCache.txt +runtime/Cpp/CMakeFiles/ +runtime/Cpp/CPackConfig.cmake +runtime/Cpp/CPackSourceConfig.cmake +runtime/Cpp/CTestTestfile.cmake +runtime/Cpp/Makefile +runtime/Cpp/_deps/ +runtime/Cpp/cmake_install.cmake +runtime/Cpp/runtime/CMakeFiles/ +runtime/Cpp/runtime/CTestTestfile.cmake +runtime/Cpp/runtime/Makefile +runtime/Cpp/runtime/antlr4_tests +runtime/Cpp/runtime/antlr4_tests\[1]_include.cmake +runtime/Cpp/runtime/antlr4_tests\[1]_tests.cmake +runtime/Cpp/runtime/cmake_install.cmake +runtime/Cpp/runtime/libantlr4-runtime.4.10.1.dylib +runtime/Cpp/runtime/libantlr4-runtime.a +runtime/Cpp/runtime/libantlr4-runtime.dylib +/runtime/Cpp/runtime/libantlr4-runtime.4.12.0.dylib +/runtime/Cpp/runtime/libantlr4-runtime.4.13.0.dylib + +# Go test and performance trace files +**/*.pprof +*.interp +*.tokens diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 7944e8e32f..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "runtime/PHP"] - path = runtime/PHP - url = https://github.com/antlr/antlr-php-runtime.git diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index aac274bebb..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,246 +0,0 @@ -sudo: true - -language: java - -branches: - only: - - rust-target - -before_cache: - - rm -rf $HOME/.m2/repository/org/antlr -cache: - timeout: 600 - directories: - - $HOME/.m2 - - $HOME/Library/Caches/Antlr4 - - $HOME/Library/Caches/Homebrew - cargo: true - -stages: - - smoke-test - # - main-test - # - extended-test - - deploy - -jobs: - include: - - os: linux - dist: trusty - jdk: openjdk8 - cache: cargo - env: TARGET=rust - stage: smoke-test - - script: skip - before_install: skip - stage: deploy - deploy: - provider: releases - token: $GITHUB_TOKEN - repo: rrevenantt/antlr4rust - file: tool/target/antlr4-4.8-2-SNAPSHOT-complete.jar - tag_name: antlr4-4.8-2-Rust-0.2 - overwrite: true - cleanup: false - prerelease: false - edge: true - on: - tags: false - branch: rust-target - # - os: linux - # dist: trusty - # compiler: clang - # jdk: openjdk8 - # env: - # - TARGET=cpp - # - CXX=g++-5 - # - GROUP=LEXER - # stage: main-test - # addons: - # apt: - # sources: - # - ubuntu-toolchain-r-test - # - llvm-toolchain-precise-3.7 - # packages: - # - g++-5 - # - uuid-dev - # - clang-3.7 - # - os: linux - # dist: trusty - # compiler: clang - # jdk: openjdk8 - # env: - # - TARGET=cpp - # - CXX=g++-5 - # - GROUP=PARSER - # stage: main-test - # addons: - # apt: - # sources: - # - ubuntu-toolchain-r-test - # - llvm-toolchain-precise-3.7 - # packages: - # - g++-5 - # - uuid-dev - # - clang-3.7 - # - os: linux - # dist: trusty - # compiler: clang - # jdk: openjdk8 - # env: - # - TARGET=cpp - # - CXX=g++-5 - # - GROUP=RECURSION - # stage: main-test - # addons: - # apt: - # sources: - # - ubuntu-toolchain-r-test - # - llvm-toolchain-precise-3.7 - # packages: - # - g++-5 - # - uuid-dev - # - clang-3.7 - # - os: osx - # compiler: clang - # osx_image: xcode10.2 - # env: - # - TARGET=cpp - # - GROUP=LEXER - # stage: extended-test - # - os: osx - # compiler: clang - # osx_image: xcode10.2 - # env: - # - TARGET=cpp - # - GROUP=PARSER - # stage: extended-test - # - os: osx - # compiler: clang - # osx_image: xcode10.2 - # env: - # - TARGET=cpp - # - GROUP=RECURSION - # stage: extended-test - # - os: osx - # compiler: clang - # osx_image: xcode10.2 - # env: - # - TARGET=swift - # - GROUP=LEXER - # stage: main-test - # - os: osx - # compiler: clang - # osx_image: xcode10.2 - # env: - # - TARGET=swift - # - GROUP=PARSER - # stage: main-test - # - os: osx - # compiler: clang - # osx_image: xcode10.2 - # env: - # - TARGET=swift - # - GROUP=RECURSION - # stage: main-test - # - os: linux - # dist: xenial - # compiler: clang - # env: - # - TARGET=swift - # - GROUP=ALL - # stage: extended-test - # - os: osx - # osx_image: xcode10.2 - # env: - # - TARGET=dotnet - # - GROUP=LEXER - # stage: extended-test - # - os: osx - # osx_image: xcode10.2 - # env: - # - TARGET=dotnet - # - GROUP=PARSER - # stage: extended-test - # - os: osx - # osx_image: xcode10.2 - # env: - # - TARGET=dotnet - # - GROUP=RECURSION - # stage: extended-test - # - os: linux - # dist: trusty - # jdk: openjdk7 - # env: TARGET=java - # stage: extended-test - - os: linux - jdk: openjdk8 - env: TARGET=java - stage: smoke-test - # - os: linux - # jdk: openjdk8 - # env: TARGET=csharp - # stage: main-test - # - os: linux - # language: php - # php: - # - 7.2 - # jdk: openjdk8 - # env: TARGET=php - # stage: main-test - # - os: linux - # jdk: openjdk8 - # dist: trusty - # env: - # - TARGET=dotnet - # - GROUP=LEXER - # stage: extended-test - # - os: linux - # jdk: openjdk8 - # dist: trusty - # env: - # - TARGET=dotnet - # - GROUP=PARSER - # stage: extended-test - # - os: linux - # jdk: openjdk8 - # dist: trusty - # env: - # - TARGET=dotnet - # - GROUP=RECURSION - # stage: extended-test -# - os: linux -# jdk: openjdk8 -# env: TARGET=python2 -# stage: main-test -# - os: linux -# jdk: openjdk8 -# env: TARGET=python3 -# addons: -# apt: -# sources: -# - deadsnakes # source required so it finds the package definition below -# packages: -# - python3.7 -# stage: main-test -# - os: linux -# dist: trusty -# jdk: openjdk8 -# env: TARGET=javascript -# stage: main-test -# - os: linux -# dist: trusty -# jdk: openjdk8 -# env: TARGET=go -# stage: smoke-test - -before_install: - - f="./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh"; ! [ -x "$f" ] || "$f" - -script: - - | - cd runtime-testsuite; - travis_wait 40 ../.travis/run-tests-$TARGET.sh; - rc=$?; - cat target/surefire-reports/*.dumpstream || true; - exit $rc - diff --git a/.travis/before-install-linux-csharp.sh b/.travis/before-install-linux-csharp.sh deleted file mode 100755 index 0872a4635e..0000000000 --- a/.travis/before-install-linux-csharp.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -echo "deb http://download.mono-project.com/repo/debian xenial main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list -sudo apt-get update -qq -sudo apt-get install -qq mono-complete diff --git a/.travis/before-install-linux-dotnet.sh b/.travis/before-install-linux-dotnet.sh deleted file mode 100755 index 816044f40b..0000000000 --- a/.travis/before-install-linux-dotnet.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# install dotnet -sudo sh -c 'echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/dotnet-release/ trusty main" > /etc/apt/sources.list.d/dotnetdev.list' -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 417A0893 -sudo apt-get update -sudo apt-get --allow-unauthenticated install dotnet-dev-1.0.4 - diff --git a/.travis/before-install-linux-go.sh b/.travis/before-install-linux-go.sh deleted file mode 100755 index 16c8281801..0000000000 --- a/.travis/before-install-linux-go.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -eval "$(sudo gimme 1.7.3)" -( go version ; go env ) || true diff --git a/.travis/before-install-linux-javascript.sh b/.travis/before-install-linux-javascript.sh deleted file mode 100755 index 23fc8c1da9..0000000000 --- a/.travis/before-install-linux-javascript.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt-get update -qq -curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash - -sudo apt-get install -qq nodejs -node --version diff --git a/.travis/before-install-linux-php.sh b/.travis/before-install-linux-php.sh deleted file mode 100755 index b95e3b31d5..0000000000 --- a/.travis/before-install-linux-php.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -sudo apt-get update -qq - -php -v - -mvn install -DskipTests=true -Dmaven.javadoc.skip=true -B -V \ No newline at end of file diff --git a/.travis/before-install-linux-swift.sh b/.travis/before-install-linux-swift.sh deleted file mode 100755 index 1a2b2a5550..0000000000 --- a/.travis/before-install-linux-swift.sh +++ /dev/null @@ -1,13 +0,0 @@ -set -euo pipefail - -# install dependencies -# some packages below will be update, swift assumes newer versions -# of, for example, sqlite3 and libicu, without the update some -# tools will not work -sudo apt-get update -sudo apt-get install clang-3.6 libxml2 -sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.6 100 - -# This would fix a know linker issue mentioned in: -# https://bugs.swift.org/browse/SR-2299 -sudo ln -sf ld.gold /usr/bin/ld diff --git a/.travis/before-install-osx-dotnet.sh b/.travis/before-install-osx-dotnet.sh deleted file mode 100755 index a4b187709d..0000000000 --- a/.travis/before-install-osx-dotnet.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -cache_dir="$HOME/Library/Caches/Antlr4" -dotnet_url='https://download.microsoft.com/download/F/4/F/F4FCB6EC-5F05-4DF8-822C-FF013DF1B17F/dotnet-dev-osx-x64.1.1.4.pkg' -dotnet_file=$(basename "$dotnet_url") -dotnet_shasum='dc46d93716db8bea8cc3c668088cc9e39384b5a4' - -thisdir=$(dirname "$0") - -# OpenSSL setup for dotnet core -mkdir -p /usr/local/lib -ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/ -ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/ - -# download dotnet core -mkdir -p "$cache_dir" -(cd "$cache_dir" - if [ -f "$dotnet_file" ] - then - if ! shasum -s -c <<<"$dotnet_shasum $dotnet_file" - then - rm -f "$dotnet_file" - fi - fi - if ! [ -f "$dotnet_file" ] - then - curl "$dotnet_url" -o "$dotnet_file" - fi -) - -# install dotnet core -sudo installer -pkg "$cache_dir/$dotnet_file" -target / - -# make the link -ln -s /usr/local/share/dotnet/dotnet /usr/local/bin/ diff --git a/.travis/run-tests-cpp.sh b/.travis/run-tests-cpp.sh deleted file mode 100755 index e95dc8922d..0000000000 --- a/.travis/run-tests-cpp.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -if [ $GROUP == "LEXER" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.LexerTests" -Dtest=cpp.* test -elif [ $GROUP == "PARSER" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.ParserTests" -Dtest=cpp.* test -elif [ $GROUP == "RECURSION" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.LeftRecursionTests" -Dtest=cpp.* test -else - mvn -q -Dtest=cpp.* test -fi - diff --git a/.travis/run-tests-csharp.sh b/.travis/run-tests-csharp.sh deleted file mode 100755 index 7f2c275249..0000000000 --- a/.travis/run-tests-csharp.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=csharp.* test diff --git a/.travis/run-tests-dotnet.sh b/.travis/run-tests-dotnet.sh deleted file mode 100755 index 4488056641..0000000000 --- a/.travis/run-tests-dotnet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# we need to build the runtime before test run, since we used "--no-dependencies" -# when we call dotnet cli for restore and build, in order to speed up - -dotnet restore ../runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj -dotnet build -c Release -f netstandard1.3 ../runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj - -# call test - -if [ $GROUP == "LEXER" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.LexerTests" -Dparallel=classes -DthreadCount=4 -Dtest=csharp.* -Dantlr-csharp-netstandard=true test -elif [ $GROUP == "PARSER" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.ParserTests" -Dparallel=classes -DthreadCount=4 -Dtest=csharp.* -Dantlr-csharp-netstandard=true test -elif [ $GROUP == "RECURSION" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.LeftRecursionTests" -Dparallel=classes -DthreadCount=4 -Dtest=csharp.* -Dantlr-csharp-netstandard=true test -else - mvn -q -Dparallel=classes -DthreadCount=4 -Dtest=csharp.* -Dantlr-csharp-netstandard=true test -fi diff --git a/.travis/run-tests-go.sh b/.travis/run-tests-go.sh deleted file mode 100755 index 1a127ce864..0000000000 --- a/.travis/run-tests-go.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=go.* test diff --git a/.travis/run-tests-java.sh b/.travis/run-tests-java.sh deleted file mode 100755 index b2fde3660d..0000000000 --- a/.travis/run-tests-java.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=java.* test -cd ../tool-testsuite -mvn test diff --git a/.travis/run-tests-javascript.sh b/.travis/run-tests-javascript.sh deleted file mode 100755 index 013321870b..0000000000 --- a/.travis/run-tests-javascript.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=node.* test diff --git a/.travis/run-tests-php.sh b/.travis/run-tests-php.sh deleted file mode 100755 index 853efbd86a..0000000000 --- a/.travis/run-tests-php.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -php_path=$(which php) - -composer install -d ../runtime/PHP - -mvn -q -DPHP_PATH="${php_path}" -Dparallel=methods -DthreadCount=4 -Dtest=php.* test diff --git a/.travis/run-tests-python2.sh b/.travis/run-tests-python2.sh deleted file mode 100755 index c5cd0ca998..0000000000 --- a/.travis/run-tests-python2.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -python --version - -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=python2.* test - -cd ../runtime/Python2/tests - -python run.py diff --git a/.travis/run-tests-python3.sh b/.travis/run-tests-python3.sh deleted file mode 100755 index 8b74928c5c..0000000000 --- a/.travis/run-tests-python3.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -python3 --version - -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=python3.* test - -cd ../runtime/Python3/test - -python3 run.py diff --git a/.travis/run-tests-swift.sh b/.travis/run-tests-swift.sh deleted file mode 100755 index 677f356b2c..0000000000 --- a/.travis/run-tests-swift.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# linux specific setup, those setup have to be -# here since environment variables doesn't pass -# across scripts -if [ $TRAVIS_OS_NAME == "linux" ]; then - export SWIFT_VERSION=swift-5.0.1 - export SWIFT_HOME=$(pwd)/swift/$SWIFT_VERSION-RELEASE-ubuntu16.04/usr/bin/ - export PATH=$SWIFT_HOME:$PATH - - # download swift - mkdir swift - curl https://swift.org/builds/$SWIFT_VERSION-release/ubuntu1604/$SWIFT_VERSION-RELEASE/$SWIFT_VERSION-RELEASE-ubuntu16.04.tar.gz -s | tar xz -C swift &> /dev/null -fi - -if [ -z "${JAVA_HOME-}" ] -then - export JAVA_HOME="$(java -XshowSettings:properties -version 2>&1 | - grep 'java\.home' | awk '{ print $3 }')" - echo "export JAVA_HOME=$JAVA_HOME" -fi - -# check swift -swift --version -swift build --version - -pushd ../runtime/Swift -./boot.py --test -popd - -if [ $GROUP == "LEXER" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.LexerTests" -Dtest=swift.* test -elif [ $GROUP == "PARSER" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.ParserTests" -Dtest=swift.* test -elif [ $GROUP == "RECURSION" ]; then - mvn -q -Dgroups="org.antlr.v4.test.runtime.category.LeftRecursionTests" -Dtest=swift.* test -else - mvn -q -Dtest=swift.* test -fi diff --git a/ANTLR-HOUSE-RULES.md b/ANTLR-HOUSE-RULES.md new file mode 100644 index 0000000000..4fe89e9486 --- /dev/null +++ b/ANTLR-HOUSE-RULES.md @@ -0,0 +1,28 @@ +# ANTLR HOUSE RULES + +*Last updated: Sept 10, 2022* + +This brief document describes best practices for us to all get along and for the benefit of the project. Collaborating on this project poses a number of difficulties: + +* different native languages +* different time zones +* lack of common company or other organization as social glue +* we are just github userids without personal connection to most other contributors +* those developers able to contribute to such a complex project typically have a lot of experience and, consequently, strong opinions + +Effective communication is difficult under the circumstances and civil discourse is a requirement to keep the project on track. Over 35 years, in-fighting between contributors has made parrt's job as supreme dictator for life much more difficult. + +Rules + +1. Assume good intentions of the other party. +2. Try to be welcoming and respectful of differing viewpoints experiences. +2. No personal attacks, meaning ideas can be bad in your comments but not people. Replace "You are ..." with "Your idea is ...". +3. Control your anger please. No hate speech, racism, sexism, or ethnocentrism. No trolling or insulting. See rule #1. +2. Be tolerant and understanding of non-native English speakers' word choice and phrasing. This is a huge source of misunderstandings; see rule #1. For example, to a native English speaker "I cannot *approve* this" makes it sound like the writer has control over the readers contribution. Instead, the writer likely meant "I cannot *support* this." See rule #1. +3. Soften word choice to use conditional tenses and helper words. For example, use phrases such as "I'm not sure this is a good idea because ..." or "I wonder if you'd consider this other possibility: ..." etc... + +Supreme dictator for life parrt has final say. His decisions will not always be correct nor to your liking, but he has a difficult cost-benefit equation to solve for every bug fix, feature, and PR. + +Any text contrary to these house rules will likely be edited and replaced with an admonishment by parrt. + +Send concerns to parrt@antlr.org. \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 989acfe550..0a2317bab3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,10 +1,22 @@ # Contributing to ANTLR 4 -1. [Fork](https://help.github.com/articles/fork-a-repo) the [antlr/antlr4 repo](https://github.com/antlr/antlr4) -2. Install and configure [EditorConfig](http://editorconfig.org/) so your text editor or IDE uses the ANTLR 4 coding style -3. [Build ANTLR 4](doc/building-antlr.md) -4. [Run the ANTLR project unit tests](doc/antlr-project-testing.md) -5. Create a [pull request](https://help.github.com/articles/using-pull-requests/) including your change +1. [Fork](https://help.github.com/articles/fork-a-repo) the [antlr/antlr4 repo](https://github.com/antlr/antlr4), which will give you both key branches, `master` and `dev` +2. Make sure to `git checkout dev` in your fork so that you are working from the latest development branch +3. Create and work from a branch derived from `dev` such as `git checkout -b your-branch-name` +4. Install and configure [EditorConfig](http://editorconfig.org/) so your text editor or IDE uses the ANTLR 4 coding style +5. [Build ANTLR 4](doc/building-antlr.md) +6. [Run the ANTLR project unit tests](doc/antlr-project-testing.md) +7. Create a [pull request](https://help.github.com/articles/using-pull-requests/) with your changes and make sure you're comparing your `dev`-derived branch in your fork to the `dev` branch from the `antlr/antlr4` repo: + -**Note:** You must sign the `contributors.txt` certificate of origin with your pull request if you've not done so before. +**Note:** Each commit requires a "signature", which is simple as using `-s` (not +`-S`) to the git commit command: + +``` +git commit -s -m 'This is my commit message' +``` + +Github's pull request process enforces the sig and gives instructions on how to +fix any commits that lack the sig. See [Github DCO app](https://github.com/apps/dco) +for more info. diff --git a/LICENSE.txt b/LICENSE.txt index 2042d1bda6..5d27694155 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,52 +1,28 @@ -[The "BSD 3-clause license"] -Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -===== - -MIT License for codepointat.js from https://git.io/codepointat -MIT License for fromcodepoint.js from https://git.io/vDW1m - -Copyright Mathias Bynens - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Package.swift b/Package.swift new file mode 100644 index 0000000000..9d212615c1 --- /dev/null +++ b/Package.swift @@ -0,0 +1,38 @@ +// swift-tools-version:5.6 + +import PackageDescription + +let package = Package( + name: "Antlr4", + products: [ + .library( + name: "Antlr4", + targets: ["Antlr4"]), + .library( + name: "Antlr4Static", + type: .static, + targets: ["Antlr4"]), + .library( + name: "Antlr4Dynamic", + type: .dynamic, + targets: ["Antlr4"]), + ], + targets: [ + .target( + name: "Antlr4", + dependencies: [], + path: "./runtime/Swift/Sources/Antlr4"), + .testTarget( + name: "Antlr4Tests", + dependencies: ["Antlr4"], + path: "./runtime/Swift/Tests/Antlr4Tests", + exclude: [ + "./runtime/Swift/Tests/VisitorBasic.g4", + "./runtime/Swift/Tests/VisitorCalc.g4", + "./runtime/Swift/Tests/LexerA.g4", + "./runtime/Swift/Tests/LexerB.g4", + "./runtime/Swift/Tests/Threading.g4" + ] + ) + ] +) diff --git a/README.md b/README.md index ec5c21c627..206491addc 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,60 @@ # ANTLR v4 -[![Build Travis-CI Status](https://travis-ci.org/antlr/antlr4.svg?branch=master)](https://travis-ci.org/antlr/antlr4) [![Build AppVeyor Status](https://ci.appveyor.com/api/projects/status/5acpbx1pg7bhgh8v/branch/master?svg=true)](https://ci.appveyor.com/project/parrt/antlr4) [![Java 7+](https://img.shields.io/badge/java-7+-4c7e9f.svg)](http://java.oracle.com) [![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/antlr/antlr4/master/LICENSE.txt) +[![Java 11+](https://img.shields.io/badge/java-11+-4c7e9f.svg)](http://java.oracle.com) +[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/antlr/antlr4/master/LICENSE.txt) **ANTLR** (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface (or visitor) that makes it easy to respond to the recognition of phrases of interest. -*Given day-job constraints, my time working on this project is limited so I'll have to focus first on fixing bugs rather than changing/improving the feature set. Likely I'll do it in bursts every few months. Please do not be offended if your bug or pull request does not yield a response! --parrt* +**Dev branch build status** -[![Donate](https://www.paypal.com/en_US/i/btn/x-click-butcc-donate.gif)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=BF92STRXT8F8Q) +[![MacOSX, Windows, Linux](https://github.com/antlr/antlr4/actions/workflows/hosted.yml/badge.svg)](https://github.com/antlr/antlr4/actions/workflows/hosted.yml) (github actions) + + + + +## Versioning + +ANTLR 4 supports 10 target languages +(Cpp, CSharp, Dart, Java, JavaScript, PHP, Python3, Swift, TypeScript, Go), +and ensuring consistency across these targets is a unique and highly valuable feature. +To ensure proper support of this feature, each release of ANTLR is a complete release of the tool and the 10 runtimes, all with the same version. +As such, ANTLR versioning does not strictly follow semver semantics: + +* a component may be released with the latest version number even though nothing has changed within that component since the previous release +* major version is bumped only when ANTLR is rewritten for a totally new "generation", such as ANTLR3 -> ANTLR4 (LL(\*) -> ALL(\*) parsing) +* minor version updates may include minor breaking changes, the policy is to regenerate parsers with every release (4.11 -> 4.12) +* backwards compatibility is only guaranteed for patch version bumps (4.11.1 -> 4.11.2) + +If you use a semver verifier in your CI, you probably want to apply special rules for ANTLR, such as treating minor change as a major change. + +## Repo branch structure + +The default branch for this repo is [`master`](https://github.com/antlr/antlr4/tree/master), which is the latest stable release and has tags for the various releases; e.g., see release tag [4.9.3](https://github.com/antlr/antlr4/tree/4.9.3). Branch [`dev`](https://github.com/antlr/antlr4/tree/dev) is where development occurs between releases and all pull requests should be derived from that branch. The `dev` branch is merged back into `master` to cut a release and the release state is tagged (e.g., with `4.10-rc1` or `4.10`.) Visually our process looks roughly like this: + + + +The Go target now has its own dedicated repo: + +```bash +$ go get github.com/antlr4-go/antlr +``` +**Note** +The dedicated Go repo is for `go get` and `import` only. Go runtime development is still performed in the main `antlr/antlr4` repo. ## Authors and major contributors * [Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu ANTLR project lead and supreme dictator for life [University of San Francisco](http://www.usfca.edu/) -* [Sam Harwell](http://tunnelvisionlabs.com/) (Tool co-author, Java and C# target) -* Eric Vergnaud (Javascript, Python2, Python3 targets and significant work on C# target) +* [Sam Harwell](http://tunnelvisionlabs.com/) (Tool co-author, Java and original C# target) +* [Eric Vergnaud](https://github.com/ericvergnaud) (Javascript, TypeScript, Python2, Python3 targets and maintenance of C# target) * [Peter Boyer](https://github.com/pboyer) (Go target) * [Mike Lischke](http://www.soft-gems.net/) (C++ completed target) * Dan McLaughlin (C++ initial target) @@ -23,6 +63,12 @@ ANTLR project lead and supreme dictator for life * [Ewan Mellor](https://github.com/ewanmellor), [Hanzhou Shi](https://github.com/hanjoes) (Swift target merging) * [Ben Hamilton](https://github.com/bhamiltoncx) (Full Unicode support in serialized ATN and all languages' runtimes for code points > U+FFFF) * [Marcos Passos](https://github.com/marcospassos) (PHP target) +* [Lingyu Li](https://github.com/lingyv-li) (Dart target) +* [Ivan Kochurkin](https://github.com/KvanTTT) has made major contributions to overall quality, error handling, and Target performance. +* [Justin King](https://github.com/jcking) has done a huge amount of work across multiple targets, but especially for C++. +* [Ken Domino](https://github.com/kaby76) has a knack for finding bugs/issues and analysis; also a major contributor on the [grammars-v4 repo](https://github.com/antlr/grammars-v4). +* [Jim Idle](https://github.com/jimidle) has contributed to previous versions of ANTLR and recently jumped back in to solve a major problem with the Go target. + ## Useful information @@ -31,7 +77,12 @@ ANTLR project lead and supreme dictator for life * [Official site](http://www.antlr.org/) * [Documentation](https://github.com/antlr/antlr4/blob/master/doc/index.md) * [FAQ](https://github.com/antlr/antlr4/blob/master/doc/faq/index.md) -* [ANTLR code generation targets](https://github.com/antlr/antlr4/blob/master/doc/targets.md)
(Currently: Java, C#, Python2|3, JavaScript, Go, C++, Swift) +* [ANTLR code generation targets](https://github.com/antlr/antlr4/blob/master/doc/targets.md)
(Currently: Java, C#, Python3, JavaScript, TypeScript, Go, C++, Swift, Dart, PHP) +* _Note: As of version 4.14, we are dropping support for Python 2. We love the Python +community, but Python 2 support was officially halted in Jan 2020. More recently, +GiHub also dropped support for Python 2, which has made it impossible for us to +maintain a consistent level of quality across targets (we use GitHub for our CI). +Long live Python 3!_ * [Java API](http://www.antlr.org/api/Java/index.html) * [ANTLR v3](http://www.antlr3.org/) * [v3 to v4 Migration, differences](https://github.com/antlr/antlr4/blob/master/doc/faq/general.md) @@ -45,7 +96,7 @@ You might also find the following pages useful, particularly if you want to mess Programmers run into parsing problems all the time. Whether it’s a data format like JSON, a network protocol like SMTP, a server configuration file for Apache, a PostScript/PDF file, or a simple spreadsheet macro language—ANTLR v4 and this book will demystify the process. ANTLR v4 has been rewritten from scratch to make it easier than ever to build parsers and the language applications built on top. This completely rewritten new edition of the bestselling Definitive ANTLR Reference shows you how to take advantage of these new features. -You can buy the book [The Definitive ANTLR 4 Reference](http://amzn.com/1934356999) at amazon or an [electronic version at the publisher's site](https://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). +You can buy the book [The Definitive ANTLR 4 Reference](http://amzn.com/dp/1934356999) at amazon or an [electronic version at the publisher's site](https://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). You will find the [Book source code](http://pragprog.com/titles/tpantlr2/source_code) useful. diff --git a/antlr4-maven-plugin/pom.xml b/antlr4-maven-plugin/pom.xml index 76e9bd37f3..be4fb698f2 100644 --- a/antlr4-maven-plugin/pom.xml +++ b/antlr4-maven-plugin/pom.xml @@ -8,23 +8,18 @@ org.antlr antlr4-master - 4.8-2-SNAPSHOT + 4.13.3-SNAPSHOT antlr4-maven-plugin maven-plugin ANTLR 4 Maven plugin Maven plugin for ANTLR 4 grammars - - 3.0 - - - + 2009 - 3.3.9 - 1.11.12 + 3.8.5 @@ -39,13 +34,13 @@ org.apache.maven maven-plugin-api - 3.0.5 - compile + ${mavenVersion} + provided org.codehaus.plexus plexus-compiler-api - 2.2 + 2.12.1 org.sonatype.plexus @@ -64,26 +59,26 @@ junit junit - 4.12 + 4.13.2 test org.apache.maven.plugin-tools maven-plugin-annotations - 3.2 + 3.6.4 provided io.takari.maven.plugins takari-plugin-testing - 2.9.0 + 3.0.0 test org.apache.maven maven-core ${mavenVersion} - test + provided org.apache.maven @@ -94,14 +89,19 @@ org.codehaus.plexus plexus-utils - 3.0.15 + 3.4.2 provided - - org.apache.maven - maven-project - 2.2.1 - + + org.slf4j + slf4j-api + 2.0.0 + + + org.slf4j + slf4j-simple + 2.0.0 + @@ -120,7 +120,7 @@ org.apache.maven.plugins maven-plugin-plugin - 3.3 + 3.6.2 true @@ -143,7 +143,7 @@ io.takari.maven.plugins takari-lifecycle-plugin - ${takariLifecycleVersion} + 2.0.7 true @@ -155,29 +155,26 @@ + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + + - - org.apache.maven.plugins - maven-plugin-plugin - 3.3 - org.apache.maven.plugins maven-javadoc-plugin - 2.10.4 + 3.3.1 true - - org.apache.maven.plugins - maven-jxr-plugin - 2.5 - diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java index c0926fe6c8..3fb37a8e0e 100644 --- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java +++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java @@ -55,7 +55,7 @@ name = "antlr4", defaultPhase = LifecyclePhase.GENERATE_SOURCES, requiresDependencyResolution = ResolutionScope.COMPILE, - requiresProject = true) + requiresProject = true, threadSafe = true) public class Antlr4Mojo extends AbstractMojo { // First, let's deal with the options that the ANTLR tool itself @@ -276,7 +276,7 @@ public void execute() throws MojoExecutionException, MojoFailureException { for (List args : argumentSets) { try { // Create an instance of the ANTLR 4 build tool - tool = new CustomTool(args.toArray(new String[args.size()])); + tool = new CustomTool(args.toArray(new String[0])); } catch (Exception e) { log.error("The attempt to create the ANTLR 4 build tool failed, see exception report for details", e); throw new MojoFailureException("Error creating an instanceof the ANTLR tool.", e); diff --git a/antlr4-maven-plugin/src/test/projects/dependencyRemoved/pom.xml b/antlr4-maven-plugin/src/test/projects/dependencyRemoved/pom.xml index 35be7219ba..e778e9206d 100644 --- a/antlr4-maven-plugin/src/test/projects/dependencyRemoved/pom.xml +++ b/antlr4-maven-plugin/src/test/projects/dependencyRemoved/pom.xml @@ -17,7 +17,7 @@ junit junit - 4.11 + 4.13.1 test diff --git a/antlr4-maven-plugin/src/test/projects/importTokens/pom.xml b/antlr4-maven-plugin/src/test/projects/importTokens/pom.xml index 7f8c6570db..99c88cb09b 100644 --- a/antlr4-maven-plugin/src/test/projects/importTokens/pom.xml +++ b/antlr4-maven-plugin/src/test/projects/importTokens/pom.xml @@ -17,7 +17,7 @@ junit junit - 4.11 + 4.13.1 test diff --git a/antlr4-maven-plugin/src/test/projects/importsCustom/pom.xml b/antlr4-maven-plugin/src/test/projects/importsCustom/pom.xml index 4ab0de8f20..bd2a6d26a5 100644 --- a/antlr4-maven-plugin/src/test/projects/importsCustom/pom.xml +++ b/antlr4-maven-plugin/src/test/projects/importsCustom/pom.xml @@ -17,7 +17,7 @@ junit junit - 4.11 + 4.13.1 test diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/pom.xml b/antlr4-maven-plugin/src/test/projects/importsStandard/pom.xml index 77dcf00381..2a6efd2522 100644 --- a/antlr4-maven-plugin/src/test/projects/importsStandard/pom.xml +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/pom.xml @@ -17,7 +17,7 @@ junit junit - 4.11 + 4.13.1 test diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 5788acadf3..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: '4.7.1-SNAPSHOT+AppVeyor.{build}' -cache: - - '%USERPROFILE%\.m2' - - '%USERPROFILE%\.nuget\packages -> **\project.json' -image: Visual Studio 2017 -build: off -install: - - git submodule update --init --recursive - - cinst -y php --params "/InstallDir:C:\tools\php" - - cinst -y composer -build_script: - - mvn -DskipTests install --batch-mode - - msbuild /target:restore /target:rebuild /property:Configuration=Release /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln - - msbuild ./runtime-testsuite/target/classes/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed -after_build: - - msbuild /target:pack /property:Configuration=Release /verbosity:detailed runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln -test_script: - - mvn install -Dantlr-php-php="C:\tools\php\php.exe" -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode -artifacts: -- path: 'runtime\**\*.nupkg' - name: NuGet \ No newline at end of file diff --git a/build/Antlr4.Runtime.nuspec b/build/Antlr4.Runtime.nuspec deleted file mode 100644 index f629ba1fe1..0000000000 --- a/build/Antlr4.Runtime.nuspec +++ /dev/null @@ -1,64 +0,0 @@ - - - - Antlr4.Runtime - 0.0.0 - Sam Harwell, Terence Parr - Sam Harwell - The runtime library for parsers generated by the C# target of ANTLR 4. This package supports projects targeting .NET 2.0 or newer, and built using Visual Studio 2008 or newer. - en-us - https://github.com/sharwell/antlr4cs - https://raw.github.com/sharwell/antlr4cs/master/LICENSE.txt - https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png - Copyright © Sam Harwell 2014 - https://github.com/sharwell/antlr4cs/releases/v$version$ - true - antlr antlr4 parsing - ANTLR 4 Runtime - The runtime library for parsers generated by the C# target of ANTLR 4. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/build/Antlr4.VS2008.nuspec b/build/Antlr4.VS2008.nuspec deleted file mode 100644 index 36ffe7ec9a..0000000000 --- a/build/Antlr4.VS2008.nuspec +++ /dev/null @@ -1,35 +0,0 @@ - - - - Antlr4.VS2008 - 0.0.0 - Sam Harwell, Terence Parr - Sam Harwell - The C# target of the ANTLR 4 parser generator for Visual Studio 2008 projects. This package supports projects targeting .NET 2.0 or newer, and built using Visual Studio 2008. - en-us - https://github.com/sharwell/antlr4cs - https://raw.github.com/sharwell/antlr4cs/master/LICENSE.txt - https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png - Copyright © Sam Harwell 2014 - https://github.com/sharwell/antlr4cs/releases/v$version$ - true - true - antlr antlr4 parsing - ANTLR 4 (Visual Studio 2008) - The C# target of the ANTLR 4 parser generator for Visual Studio 2008 projects. - - - - - - - - - - - - - - - - diff --git a/build/Antlr4.nuspec b/build/Antlr4.nuspec deleted file mode 100644 index 9dc4e4754c..0000000000 --- a/build/Antlr4.nuspec +++ /dev/null @@ -1,35 +0,0 @@ - - - - Antlr4 - 0.0.0 - Sam Harwell, Terence Parr - Sam Harwell - The C# target of the ANTLR 4 parser generator for Visual Studio 2010+ projects. This package supports projects targeting .NET 2.0 or newer, and built using Visual Studio 2010 or newer. - en-us - https://github.com/sharwell/antlr4cs - https://raw.github.com/sharwell/antlr4cs/master/LICENSE.txt - https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png - Copyright © Sam Harwell 2014 - https://github.com/sharwell/antlr4cs/releases/v$version$ - true - true - antlr antlr4 parsing - ANTLR 4 - The C# target of the ANTLR 4 parser generator for Visual Studio 2010+ projects. - - - - - - - - - - - - - - - - diff --git a/build/build.ps1 b/build/build.ps1 deleted file mode 100644 index 5dcc5e6071..0000000000 --- a/build/build.ps1 +++ /dev/null @@ -1,139 +0,0 @@ -param ( - [switch]$Debug, - [string]$VisualStudioVersion = "12.0", - [switch]$NoClean, - [string]$Java6Home, - [string]$MavenHome, - [string]$MavenRepo = "$($env:USERPROFILE)\.m2", - [switch]$SkipMaven, - [switch]$SkipKeyCheck -) - -# build the solutions -$SolutionPath = "..\Runtime\CSharp\Antlr4.sln" -$CF35SolutionPath = "..\Runtime\CSharp\Antlr4.VS2008.sln" - -# make sure the script was run from the expected path -if (!(Test-Path $SolutionPath)) { - echo "The script was run from an invalid working directory." - exit 1 -} - -. .\version.ps1 - -If ($Debug) { - $BuildConfig = 'Debug' -} Else { - $BuildConfig = 'Release' -} - -If ($NoClean) { - $Target = 'build' -} Else { - $Target = 'rebuild' -} - -If (-not $MavenHome) { - $MavenHome = $env:M2_HOME -} - -$Java6RegKey = 'HKLM:\SOFTWARE\JavaSoft\Java Runtime Environment\1.6' -$Java6RegValue = 'JavaHome' -If (-not $Java6Home -and (Test-Path $Java6RegKey)) { - $JavaHomeKey = Get-Item -LiteralPath $Java6RegKey - If ($JavaHomeKey.GetValue($Java6RegValue, $null) -ne $null) { - $JavaHomeProperty = Get-ItemProperty $Java6RegKey $Java6RegValue - $Java6Home = $JavaHomeProperty.$Java6RegValue - } -} - -# this is configured here for path checking, but also in the .props and .targets files -[xml]$pom = Get-Content "..\tool\pom.xml" -$CSharpToolVersionNodeInfo = Select-Xml "/mvn:project/mvn:version" -Namespace @{mvn='http://maven.apache.org/POM/4.0.0'} $pom -$CSharpToolVersion = $CSharpToolVersionNodeInfo.Node.InnerText.trim() - -# build the main project -$msbuild = "$env:windir\Microsoft.NET\Framework64\v4.0.30319\msbuild.exe" - -&$msbuild '/nologo' '/m' '/nr:false' "/t:$Target" "/p:Configuration=$BuildConfig" "/p:VisualStudioVersion=$VisualStudioVersion" $SolutionPath -if ($LASTEXITCODE -ne 0) { - $host.ui.WriteErrorLine('Build failed, aborting!') - exit $p.ExitCode -} - -# build the compact framework project -$msbuild = "$env:windir\Microsoft.NET\Framework\v4.0.30319\msbuild.exe" - -&$msbuild '/nologo' '/m' '/nr:false' '/t:rebuild' "/p:Configuration=$BuildConfig" $CF35SolutionPath -if ($LASTEXITCODE -ne 0) { - $host.ui.WriteErrorLine('.NET 3.5 Compact Framework Build failed, aborting!') - exit $p.ExitCode -} - -if (-not (Test-Path 'nuget')) { - mkdir "nuget" -} - -# Build the Java library using Maven -If (-not $SkipMaven) { - $OriginalPath = $PWD - - cd '..\tool' - $MavenPath = "$MavenHome\bin\mvn.bat" - If (-not (Test-Path $MavenPath)) { - $host.ui.WriteErrorLine("Couldn't locate Maven binary: $MavenPath") - cd $OriginalPath - exit 1 - } - - If (-not (Test-Path $Java6Home)) { - $host.ui.WriteErrorLine("Couldn't locate Java 6 installation: $Java6Home") - cd $OriginalPath - exit 1 - } - - $MavenGoal = 'package' - &$MavenPath '-DskipTests=true' '--errors' '-e' '-Dgpg.useagent=true' "-Djava6.home=$Java6Home" '-Psonatype-oss-release' $MavenGoal - if ($LASTEXITCODE -ne 0) { - $host.ui.WriteErrorLine('Maven build of the C# Target custom Tool failed, aborting!') - cd $OriginalPath - exit $p.ExitCode - } - - cd $OriginalPath -} - -$JarPath = "..\tool\target\antlr4-csharp-$CSharpToolVersion-complete.jar" -if (!(Test-Path $JarPath)) { - $host.ui.WriteErrorLine("Couldn't locate the complete jar used for building C# parsers: $JarPath") - exit 1 -} - -# By default, do not create a NuGet package unless the expected strong name key files were used -if (-not $SkipKeyCheck) { - . .\keys.ps1 - - foreach ($pair in $Keys.GetEnumerator()) { - $assembly = Resolve-FullPath -Path "..\runtime\CSharp\Antlr4.Runtime\bin\$($pair.Key)\$BuildConfig\Antlr4.Runtime.dll" - # Run the actual check in a separate process or the current process will keep the assembly file locked - powershell -Command ".\check-key.ps1 -Assembly '$assembly' -ExpectedKey '$($pair.Value)' -Build '$($pair.Key)'" - if ($LASTEXITCODE -ne 0) { - Exit $p.ExitCode - } - } -} - -$packages = @( - 'Antlr4.Runtime' - 'Antlr4' - 'Antlr4.VS2008') - -$nuget = '..\runtime\CSharp\.nuget\NuGet.exe' -ForEach ($package in $packages) { - If (-not (Test-Path ".\$package.nuspec")) { - $host.ui.WriteErrorLine("Couldn't locate NuGet package specification: $package") - exit 1 - } - - &$nuget 'pack' ".\$package.nuspec" '-OutputDirectory' 'nuget' '-Prop' "Configuration=$BuildConfig" '-Version' "$AntlrVersion" '-Prop' "M2_REPO=$M2_REPO" '-Prop' "CSharpToolVersion=$CSharpToolVersion" '-Symbols' -} diff --git a/build/check-key.ps1 b/build/check-key.ps1 deleted file mode 100644 index b92a9cdccf..0000000000 --- a/build/check-key.ps1 +++ /dev/null @@ -1,31 +0,0 @@ -param( - [string]$Assembly, - [string]$ExpectedKey, - [string]$Build = $null -) - -function Get-PublicKeyToken() { - param([string]$assembly = $null) - if ($assembly) { - $bytes = $null - $bytes = [System.Reflection.Assembly]::ReflectionOnlyLoadFrom($assembly).GetName().GetPublicKeyToken() - if ($bytes) { - $key = "" - for ($i=0; $i -lt $bytes.Length; $i++) { - $key += "{0:x2}" -f $bytes[$i] - } - - $key - } - } -} - -if (-not $Build) { - $Build = $Assembly -} - -$actual = Get-PublicKeyToken -assembly $Assembly -if ($actual -ne $ExpectedKey) { - $host.ui.WriteErrorLine("Invalid publicKeyToken for '$Build'; expected '$ExpectedKey' but found '$actual'") - exit 1 -} diff --git a/build/keys.ps1 b/build/keys.ps1 deleted file mode 100644 index 4e2f34250b..0000000000 --- a/build/keys.ps1 +++ /dev/null @@ -1,17 +0,0 @@ -# Note: these values may only change during minor release -$Keys = @{ - 'net20' = '7983ae52036899ac' - 'net30' = '7671200403f6656a' - 'net35-cf' = '770a97458f51159e' - 'net35-client' = '4307381ae04f9aa7' - 'net40-client' = 'bb1075973a9370c4' - 'net45' = 'edc21c04cf562012' - 'netcore45' = 'e4e9019902d0b6e2' - 'portable-net40' = '90bf14da8e1462b4' - 'portable-net45' = '3d23c8e77559f391' -} - -function Resolve-FullPath() { - param([string]$Path) - [System.IO.Path]::GetFullPath((Join-Path (pwd) $Path)) -} diff --git a/build/push.ps1 b/build/push.ps1 deleted file mode 100644 index 17791c1cd2..0000000000 --- a/build/push.ps1 +++ /dev/null @@ -1,29 +0,0 @@ -. .\version.ps1 - -If ($AntlrVersion.EndsWith('-dev')) { - $host.ui.WriteErrorLine("Cannot push development version '$AntlrVersion' to NuGet.") - Exit 1 -} - -$packages = @( - 'Antlr4.Runtime' - 'Antlr4' - 'Antlr4.VS2008') - -# Make sure all packages exist before pushing any packages -ForEach ($package in $packages) { - If (-not (Test-Path ".\nuget\$package.$AntlrVersion.nupkg")) { - $host.ui.WriteErrorLine("Couldn't locate NuGet package: $JarPath") - exit 1 - } - - If (-not (Test-Path ".\nuget\$package.$AntlrVersion.symbols.nupkg")) { - $host.ui.WriteErrorLine("Couldn't locate NuGet symbols package: $JarPath") - exit 1 - } -} - -$nuget = '..\runtime\CSharp\.nuget\NuGet.exe' -ForEach ($package in $packages) { - &$nuget 'push' ".\nuget\$package.$AntlrVersion.nupkg" -} diff --git a/build/version.ps1 b/build/version.ps1 deleted file mode 100644 index 457481bd8c..0000000000 --- a/build/version.ps1 +++ /dev/null @@ -1 +0,0 @@ -$AntlrVersion = "4.5.1" diff --git a/contributors.txt b/contributors.txt deleted file mode 100644 index 2de2535864..0000000000 --- a/contributors.txt +++ /dev/null @@ -1,243 +0,0 @@ -ANTLR Project Contributors Certification of Origin and Rights - -All contributors to ANTLR v4 must formally agree to abide by this -certificate of origin by signing on the bottom with their github -userid, full name, email address (you can obscure your e-mail, but it -must be computable by human), and date. - -By signing this agreement, you are warranting and representing that -you have the right to release code contributions or other content free -of any obligations to third parties and are granting Terence Parr and -ANTLR project contributors, henceforth referred to as The ANTLR -Project, a license to incorporate it into The ANTLR Project tools -(such as ANTLRWorks and StringTemplate) or related works under the BSD -license. You understand that The ANTLR Project may or may not -incorporate your contribution and you warrant and represent the -following: - -1. I am the creator of all my contributions. I am the author of all - contributed work submitted and further warrant and represent that - such work is my original creation and I have the right to license - it to The ANTLR Project for release under the 3-clause BSD - license. I hereby grant The ANTLR Project a nonexclusive, - irrevocable, royalty-free, worldwide license to reproduce, - distribute, prepare derivative works, and otherwise use this - contribution as part of the ANTLR project, associated - documentation, books, and tools at no cost to The ANTLR Project. - -2. I have the right to submit. This submission does not violate the - rights of any person or entity and that I have legal authority over - this submission and to make this certification. - -3. If I violate another's rights, liability lies with me. I agree to - defend, indemnify, and hold The ANTLR Project and ANTLR users - harmless from any claim or demand, including reasonable attorney - fees, made by any third party due to or arising out of my violation - of these terms and conditions or my violation of the rights of - another person or entity. - -4. I understand and agree that this project and the contribution are - public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license indicated in the file. - -I have read this agreement and do so certify by adding my signoff to -the end of the following contributors list. - -CONTRIBUTORS: - -YYYY/MM/DD, github id, Full name, email -2012/07/12, parrt, Terence Parr, parrt@antlr.org -2012/09/18, sharwell, Sam Harwell, sam@tunnelvisionlabs.com -2012/10/10, stephengaito, Stephen Gaito, stephen@percepitsys.co.uk -2012/11/23, maguro, Alan Cabrera, adc@toolazydogs.com -2013/01/29, metadave, Dave Parfitt, diparfitt@gmail.com -2013/03/06, bkiers, Bart Kiers, bkiers@gmail.com -2013/08/20, cayhorstmann, Cay Horstmann, cay@horstmann.com -2014/03/18, aphyr, Kyle Kingsbury, aphyr@aphyr.com -2014/06/07, ericvergnaud, Eric Vergnaud, eric.vergnaud@wanadoo.fr -2014/07/04, jimidle, Jim Idle, jimi@Idle.ws -2014/01/01, danmclaughlin, Dan McLaughlin, dan.mclaughlin@gmail.com -2014/09/04. jeduden, Jan-Eric Duden, jeduden@gmail.com -2014/09/27, petrbel, Petr Bělohlávek, antlr@petrbel.cz -2014/10/18, sergiusignacius, Sérgio Silva, serge.a.silva@gmail.com -2014/10/26, bdkearns, Brian Kearns, bdkearns@gmail.com -2014/10/27, michaelpj, Michael Peyton Jones, michaelpj@gmail.com -2015/01/29, TomLottermann, Thomas Lottermann, tomlottermann@gmail.com -2015/02/15, pavlo, Pavlo Lysov, pavlikus@gmail.com -2015/03/07, RedTailedHawk, Lawrence Parker, larry@answerrocket.com -2015/04/03, rljacobson, Robert Jacobson, rljacobson@gmail.com -2015/04/06, ojakubcik, Ondrej Jakubcik, ojakubcik@gmail.com -2015/04/29, jszheng, Jinshan Zheng, zheng_js@hotmail.com -2015/05/08, ViceIce, Michael Kriese, michael.kriese@gmx.de -2015/05/09, lkraz, Luke Krasnoff, luke.krasnoff@gmail.com -2015/05/12, Pursuit92, Josh Chase, jcjoshuachase@gmail.com -2015/05/20, peturingi, Pétur Ingi Egilsson, petur@petur.eu -2015/05/27, jcbrinfo, Jean-Christophe Beaupré, jcbrinfo@users.noreply.github.com -2015/06/29, jvanzyl, Jason van Zyl, jason@takari.io -2015/08/18, krzkaczor, Krzysztof Kaczor, krzysztof@kaczor.io -2015/09/18, worsht, Rajiv Subrahmanyam, rajiv.public@gmail.com -2015/09/24, HSorensen, Henrik Sorensen, henrik.b.sorensen@gmail.com -2015/10/06, brwml, Bryan Wilhelm, bryan.wilhelm@microsoft.com -2015/10/08, fedotovalex, Alex Fedotov, me@alexfedotov.com -2015/10/12, KvanTTT, Ivan Kochurkin, ivan.kochurkin@gmail.com -2015/10/21, martin-probst, Martin Probst, martin-probst@web.de -2015/10/21, hkff, Walid Benghabrit, walid.benghabrit@mines-nantes.fr -2015/11/12, cooperra, Robbie Cooper, cooperra@users.noreply.github.com -2015/11/25, abego, Udo Borkowski, ub@abego.org -2015/12/17, sebadur, Sebastian Badur, sebadur@users.noreply.github.com -2015/12/23, pboyer, Peter Boyer, peter.b.boyer@gmail.com -2015/12/24, dtymon, David Tymon, david.tymon@gmail.com -2016/02/18, reitzig, Raphael Reitzig, reitzig[at]cs.uni-kl.de -2016/03/10, mike-lischke, Mike Lischke, mike@lischke-online.de -2016/03/27, beardlybread, Bradley Steinbacher, bradley.j.steinbacher@gmail.com -2016/03/29, msteiger, Martin Steiger, antlr@martin-steiger.de -2016/03/28, gagern, Martin von Gagern, gagern@ma.tum.de -2016/07/10, twz123, Tom Wieczorek, tom.wieczorek@zalando.de -2016/07/20, chrisheller, Chris Heller, chris.heller.greyheller@gmail.com -2016/07/20, nburles, Nathan Burles, nburles@gmail.com -2016/07/20, kosl90, Li Liqiang, kos1990l@gmail.com -2016/07/27, timoc, Tim O'Callaghan, timo@linux.com -2016/07/26, nic30, Michal Orsák, michal.o.socials@gmail.com -2016/07/18, willfaught, Will Faught, will.faught@gmail.com -2016/08/08, wjkohnen, Wolfgang Johannes Kohnen, wjkohnen-go-antlr@ko-sys.com -2016/08/11, BurtHarris, Ralph "Burt" Harris, Burt_Harris_antlr4@azxs.33mail.com -2016/08/19, andjo403, Andreas Jonson, andjo403@hotmail.com -2016/09/27, harriman, Kurt Harriman, harriman@acm.org -2016/10/13, cgudrian, Christian Gudrian, christian.gudrian@gmx.de -2016/10/13, nielsbasjes, Niels Basjes, niels@basjes.nl -2016/10/21, FloorGoddijn, Floor Goddijn, floor.goddijn[at]aimms.com -2016/11/01, RYDB3RG, Kai Stammerjohann, RYDB3RG@users.noreply.github.com -2016/11/05, runner-mei, meifakun, runner.mei@gmail.com -2016/11/15, hanjoes, Hanzhou Shi, hanzhou87@gmail.com -2016/11/16, sridharxp, Sridharan S, aurosridhar@gmail.com -2016/11/06, NoodleOfDeath, Thom Morgan, github@bytemeapp.com -2016/11/01, sebkur, Sebastian Kürten, sebastian@topobyte.de -2016/04/13, renatahodovan, Renata Hodovan, reni@inf.u-szeged.hu -2016/11/05, ewanmellor, Ewan Mellor, github@ewanmellor.org -2016/11/06, janyou, Janyou, janyou.antlr@outlook.com -2016/11/20, marcohu, Marco Hunsicker, antlr@hunsicker.de -2016/09/02, lygav, Vladimir (Vladi) Lyga, lyvladi@gmail.com -2016/09/23, ghosthope, Dmitry Shakhtanov, sudstrike@gmail.com -2016/11/25, MrSampson, Oliver Sampson, olsam@quickaudio.com -2016/11/29, millergarym, Gary Miller, miller.garym@gmail.com -2016/11/29, wxio, Gary Miller, gm@wx.io -2016/11/29, Naios, Denis Blank, naios@users.noreply.github.com -2016/12/01, samtatasurya, Samuel Tatasurya, xemradiant@gmail.com -2016/12/03, redxdev, Samuel Bloomberg, sam@redxdev.com -2016/12/11, Gaulouis, Gaulouis, gaulouis.com@gmail.com -2016/12/22, akosthekiss, Akos Kiss, akiss@inf.u-szeged.hu -2016/12/24, adrpo, Adrian Pop, adrian.pop@liu.se -2017/01/11, robertbrignull, Robert Brignull, robertbrignull@gmail.com -2017/01/13, marcelo-rocha, Marcelo Rocha, mcrocha@gmail.com -2017/01/23, bhamiltoncx, Ben Hamilton, bhamiltoncx+antlr@gmail.com -2017/01/18, mshockwave, Bekket McClane, yihshyng223@gmail.com -2017/02/10, lionelplessis, Lionel Plessis, lionelplessis@users.noreply.github.com -2017/02/14, lecode-official, David Neumann, david.neumann@lecode.de -2017/02/14, xied75, Dong Xie, xied75@gmail.com -2017/02/20, Thomasb81, Thomas Burg, thomasb81@gmail.com -2017/02/26, jvasileff, John Vasileff, john@vasileff.com -2017/03/08, harry-tallbelt, Igor Vysokopoyasny, harry.tallbelt@gmail.com -2017/03/09, teverett, Tom Everett, tom@khubla.com -2017/03/03, chund, Christian Hund, christian.hund@gmail.com -2017/03/15, robertvanderhulst, Robert van der Hulst, robert@xsharp.eu -2017/03/28, cmd-johnson, Jonas Auer, jonas.auer.94@gmail.com -2017/04/12, lys0716, Yishuang Lu, luyscmu@gmail.com -2017/04/30, shravanrn, Shravan Narayan, shravanrn@gmail.com -2017/05/11, jimallman, Jim Allman, jim@ibang.com -2017/05/26, waf, Will Fuqua, wafuqua@gmail.com -2017/05/29, kosak, Corey Kosak, kosak@kosak.com -2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net -2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com -2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com -2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me -2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com -2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in -2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com -2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com -2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com -2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com -2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com -2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com -2017/08/20, tiagomazzutti, Tiago Mazzutti, tiagomzt@gmail.com -2017/08/20, milanaleksic, Milan Aleksic, milanaleksic@gmail.com -2017/08/29, Eddy Reyes, eddy@mindsight.io -2017/09/09, brauliobz, Bráulio Bezerra, brauliobezerra@gmail.com -2017/09/11, sachinjain024, Sachin Jain, sachinjain024@gmail.com -2017/09/25, kaedvann, Rostislav Listerenko, r.listerenko@gmail.com -2017/10/06, bramp, Andrew Brampton, brampton@gmail.com -2017/10/15, simkimsia, Sim Kim Sia, kimcity@gmail.com -2017/10/27, Griffon26, Maurice van der Pot, griffon26@kfk4ever.com -2017/05/29, rlfnb, Ralf Neeb, rlfnb@rlfnb.de -2017/10/29, gendalph, Максим Прохоренко, Maxim\dotProhorenko@gm@il.com -2017/11/02, jasonmoo, Jason Mooberry, jason.mooberry@gmail.com -2017/11/05, ajaypanyala, Ajay Panyala, ajay.panyala@gmail.com -2017/11/24, zqlu.cn, Zhiqiang Lu, zqlu.cn@gmail.com -2017/11/28, niccroad, Nicolas Croad, nic.croad@gmail.com -2017/12/01, DavidMoraisFerreira, David Morais Ferreira, david.moraisferreira@gmail.com -2017/12/01, SebastianLng, Sebastian Lang, sebastian.lang@outlook.com -2017/12/03, oranoran, Oran Epelbaum, oran / epelbaum me -2017/12/12, janlinde, Jan Lindemann, jan@janware.com -2017/12/13, enessoylu, Enes Soylu, enessoylutr@gmail.com -2017/12/20, kbsletten, Kyle Sletten, kbsletten@gmail.com -2017/12/27, jkmar, Jakub Marciniszyn, marciniszyn.jk@gmail.com -2018/03/08, dannoc, Daniel Clifford, danno@google.com -2018/03/10, uvguy, kangjoni76@gmail.com -2018/01/06, kasbah, Kaspar Emanuel, kaspar@monostable.co.uk -2018/01/15, xgcssch, Sönke Schau, xgcssch@users.noreply.github.com -2018/02/08, razfriman, Raz Friman, raz@razfriman.com -2018/02/11, io7m, Mark Raynsford, code@io7m.com -2018/04/24, solussd, Joe Smith, joe@uwcreations.com -2018/15/05, johnvanderholt, jan dillingh johnvanderholte@gmail.com -2018/06/14, scadgek, Sergey Chupov, scadgek@live.com -2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com -2018/06/27, wu-sheng, Wu Sheng, wu.sheng@foxmail.com -2018/02/25, chaseoxide, Marcus Ong, taccs97[at]gmail[dot]com -2018/05/15, johnvanderholt, jan dillingh johnvanderholte@gmail.com -2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com -2018/05/15, johnvanderholt, jan dillingh johnvanderholte@gmail.com -2018/05/17, sinopsysHK, Eric Bardes, sinofwd@gmail.com -2018/05/23, srvance, Stephen Vance, steve@vance.com -2018/06/14, alecont, Alessandro Contenti, alecontenti@hotmail.com -2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com -2018/07/03, jgoppert, James Goppert, james.goppert@gmail.com -2018/07/27, Maksim Novikov, mnovikov.work@gmail.com -2018/08/03, ENDOH takanao, djmchl@gmail.com -2018/10/18, edirgarcia, Edir García Lazo, edirgl@hotmail.com -2018/07/31, Lucas Henrqiue, lucashenrique580@gmail.com -2018/08/03, ENDOH takanao, djmchl@gmail.com -2018/10/29, chrisaycock, Christopher Aycock, chris[at]chrisaycock[dot]com -2018/11/12, vinoski, Steve Vinoski, vinoski@ieee.org -2018/11/14, nxtstep, Adriaan (Arjan) Duz, codewithadriaan[et]gmail[dot]com -2018/11/15, amykyta3, Alex Mykyta, amykyta3@users.noreply.github.com -2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com -2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com -2018/12/23, youkaichao, Kaichao You, youkaichao@gmail.com -2019/01/16, kuegi, Markus Zancolo, markus.zancolo@roomle.com -2019/02/06, ralucado, Cristina Raluca Vijulie, ralucris.v[at]gmail[dot]com -2019/02/23, gedimitr, Gerasimos Dimitriadis, gedimitr@gmail.com -2019/03/13, base698, Justin Thomas, justin.thomas1@gmail.com -2019/03/18, carlodri, Carlo Dri, carlo.dri@gmail.com -2019/05/02, askingalot, Andy Collins, askingalot@gmail.com -2019/07/11, olowo726, Olof Wolgast, olof@baah.se -2019/07/16, abhijithneilabraham, Abhijith Neil Abraham, abhijithneilabrahampk@gmail.com -2019/07/26, Braavos96, Eric Hettiaratchi, erichettiaratchi@gmail.com -2019/08/23, akaJes, Oleksandr Mamchyts, akaJes@gmail.com -2019/09/10, ImanHosseini, Iman Hosseini, hosseini.iman@yahoo.com -2019/09/03, João Henrique, johnnyonflame@hotmail.com -2019/09/10, neko1235, Ihar Mokharau, igor.mohorev@gmail.com -2019/09/10, yar3333, Yaroslav Sivakov, yar3333@gmail.com -2019/09/10, marcospassos, Marcos Passos, marcospassos.com@gmail.com -2019/09/10, amorimjuliana, Juliana Amorim, juu.amorim@gmail.com -2019/09/17, kaz, Kazuki Sawada, kazuki@6715.jp -2019/09/28, lmy269, Mingyang Liu, lmy040758@gmail.com -2019/10/29, tehbone, Tabari Alexander, tehbone@gmail.com -2019/10/31, a-square, Alexei Averchenko, lex.aver@gmail.com -2019/11/11, foxeverl, Liu Xinfeng, liuxf1986[at]gmail[dot]com -2019/11/17, felixn, Felix Nieuwenhuizhen, felix@tdlrali.com -2019/11/18, mlilback, Mark Lilback, mark@lilback.com -2020/02/02, carocad, Camilo Roca, carocad@unal.edu.co -2020/02/10, rrevenantt, Konstantin Anisimov, rrevenantt[at]gmail.com -2025/05/28, alexsnaps, Alex Snaps, alex@wcgw.dev -2025/09/12, torbensen, Torben Magne, torbenmagne@gmail.com diff --git a/developer-cert-of-origin.txt b/developer-cert-of-origin.txt new file mode 100644 index 0000000000..7274c56507 --- /dev/null +++ b/developer-cert-of-origin.txt @@ -0,0 +1,52 @@ +As of 4.10, ANTLR uses the Linux Foundation's Developer +Certificate of Origin, DCO, version 1.1. See either +https://developercertificate.org/ or the text below. + +Each commit requires a "signature", which is simple as +using `-s` (not `-S`) to the git commit command: + +git commit -s -m 'This is my commit message' + +Github's pull request process enforces the sig and gives +instructions on how to fix any commits that lack the sig. +See https://github.com/apps/dco for more info. + +No signature is required in this file (unlike the +previous ANTLR contributor's certificate of origin.) + +----- https://developercertificate.org/ ------ + +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/doc/IDEs.md b/doc/IDEs.md index 7c21e8af01..9e98503d3c 100644 --- a/doc/IDEs.md +++ b/doc/IDEs.md @@ -1,5 +1,5 @@ # Integrating ANTLR into Development Systems -The Java target is the reference implementation mirrored by other targets. The following pages help you integrate ANTLR into development environments and build systems appropriate for your target language. As of December 2016, we have Java, C#, Python 2, Python 3, JavaScript, Go, C++, and Swift targets. +The Java target is the reference implementation mirrored by other targets. The following pages help you integrate ANTLR into development environments and build systems appropriate for your target language. As of December 2016, we have Java, C#, Python 3, JavaScript, Go, C++, and Swift targets. The easiest thing is probably just to use an [ANTLR plug-in](http://www.antlr.org/tools.html) for your favorite development environment. diff --git a/doc/actions.md b/doc/actions.md index ef51c8f4c3..2eef7e4f6d 100644 --- a/doc/actions.md +++ b/doc/actions.md @@ -65,7 +65,7 @@ Most of the time you access the attributes of the token, but sometimes it is use |text|String|The text matched for the token; translates to a call to getText. Example: $ID.text.| |type|int|The token type (nonzero positive integer) of the token such as INT; translates to a call to getType. Example: $ID.type.| |line|int|The line number on which the token occurs, counting from 1; translates to a call to getLine. Example: $ID.line.| -|pos|int|The character position within the line at which the token’s first character occurs counting from zero; translates to a call togetCharPositionInLine. Example: $ID.pos.| +|pos|int|The character position within the line at which the token’s first character occurs counting from zero; translates to a call to getCharPositionInLine. Example: $ID.pos.| |index|int|The overall index of this token in the token stream, counting from zero; translates to a call to getTokenIndex. Example: $ID.index.| |channel|int|The token’s channel number. The parser tunes to only one channel, effectively ignoring off-channel tokens. The default channel is 0 (Token.DEFAULT_CHANNEL), and the default hidden channel is Token.HIDDEN_CHANNEL. Translates to a call to getChannel. Example: $ID.channel.| |int|int|The integer value of the text held by this token; it assumes that the text is a valid numeric string. Handy for building calculators and so on. Translates to Integer.valueOf(text-of-token). Example: $INT.int.| @@ -81,10 +81,10 @@ returnStat : 'return' expr {System.out.println("matched "+$expr.text);} ; Using a rule label looks like this: ``` -returnStat : 'return' e=expr {System.out.println("matched "+e.text);} ; +returnStat : 'return' e=expr {System.out.println("matched "+$e.text);} ; ``` -You can also use `$ followed by the name of the attribute to access the value associated with the currently executing rule. For example, `$start` is the starting token of the current rule. +You can also use `$` followed by the name of the attribute to access the value associated with the currently executing rule. For example, `$start` is the starting token of the current rule. ``` returnStat : 'return' expr {System.out.println("first token "+$start.getText());} ; @@ -98,6 +98,7 @@ returnStat : 'return' expr {System.out.println("first token "+$start.getText()); |start|Token|The first token to be potentially matched by the rule that is on the main token channel; in other words, this attribute is never a hidden token. For rules that end up matching no tokens, this attribute points at the first token that could have been matched by this rule. When referring to the current rule, this attribute is available to any action within the rule.| |stop|Token|The last nonhidden channel token to be matched by the rule. When referring to the current rule, this attribute is available only to the after and finally actions.| |ctx|ParserRuleContext|The rule context object associated with a rule invocation. All of the other attributes are available through this attribute. For example, `$ctx.start` accesses the start field within the current rules context object. It’s the same as `$start`.| +|parser|Parser|The parser itself. This attribute can be used, for example, to invoke a method defined in the parser's `@members` section from a semantic predicate.| ## Dynamically-Scoped Attributes diff --git a/doc/antlr-project-testing.md b/doc/antlr-project-testing.md index 1ff46aae9b..8968a4bdfb 100644 --- a/doc/antlr-project-testing.md +++ b/doc/antlr-project-testing.md @@ -2,266 +2,162 @@ ## Introduction -Because ANTLR supports multiple target languages, the unit tests are broken into two groups: the unit tests that test the tool itself (in `tool-testsuite`) and the unit tests that test the parser runtimes (in `antlr4/runtime-testsuite`). The tool tests are straightforward because they are Java code testing Java code; see the section at the bottom of this file. - -The runtime tests must be specified in a generic fashion to work across language targets. Furthermore, we must test the various targets from Java. This usually means Java launching processes to compile, say, C++ and run parsers. - -As of 4.6, we use [a Java descriptor object](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java) to describe each runtime test. Unit tests are grouped together into categories such as [ParserExecDescriptors](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java), which has multiple nested descriptor objects, one per test. For example, here is the start of that file: - -```java -public class ParserExecDescriptors { - public static class APlus extends BaseParserTestDescriptor { - public String input = "a b c"; - public String output = "abc\n"; - public String errors = ""; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ID+ { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - } -``` - -The mysterious `@CommentHasStringValue` annotation is a bit of a hack that allows multi-line strings in Java. This kung fu is required so that we can use Java classes rather than StringTemplate group files to specify runtime tests (the legacy system used those and it was hard to get them right). Here are all the [Runtime test descriptors](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors) organized into groups. +Because ANTLR supports multiple target languages, the unit tests are broken into two groups: +the unit tests that test the tool itself (in `tool-testsuite`) and the unit tests that test the parser runtimes (in `antlr4/runtime-testsuite`). +The tool tests are straightforward because they are Java code testing Java code; see the section at the bottom of this file. -The grammars are strings representing StringTemplates (`ST` objects) so `` will get replace when the unit test file is generated (`Test.java`, `Test.cs`, ...). The `writeln` template must be defined per target. Here are all of the -[Target templates for runtime tests](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates). +The runtime tests must be specified in a generic fashion to work across language targets. +Furthermore, the various targets from Java must be tested. -## Requirements +This usually means Java launching processes to compile, say, C++ and run parsers. -In order to perform the tests on all target languages, you need to have the following languages installed: +As of 4.10, a Java descriptor file held as an [RuntimeTestDescriptor.java](../runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java) +is used to represent each runtime test. -* `mono` (e.g., `brew install mono`) on non-Windows boxes (on Windows it uses the Microsoft .net stack). Also must [`xbuild` the runtime](https://github.com/antlr/antlr4/blob/master/doc/releasing-antlr.md) before tests will run; see below -* `nodejs` -* Python 2.7 -* Python 3.6 -* Go -* Swift 4 (via XCode 10.x) tested currently only osx -* clang (for C++ target) -* -To **install into local repository** `~/.m2/repository/org/antlr`, do this: +Each test is described with a text file with various sections and resides in a group directory; +see [directories under descriptors' dir](../runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors). +Here is a sample test descriptor: -```bash -$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux -$ mvn install -DskipTests=true # make sure all artifacts are visible on this machine ``` +[notes] +This is a regression test for blah blah blah... -Now, make sure C# runtime is built and installed locally. +[type] +Parser -```bash -cd ~/antlr/code/antlr4/runtime/CSharp/runtime/CSharp -# kill previous ones manually as "xbuild /t:Clean" didn't seem to do it -find . -name '*.dll' -exec rm {} \; -# build -xbuild /p:Configuration=Release Antlr4.Runtime/Antlr4.Runtime.mono.csproj -``` +[grammar] +grammar T; +a : ID* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; -C++ test rig automatically builds C++ runtime during tests. Others don't need a prebuilt lib. +[start] +a +[input] +a b c -## Running the runtime tests +[output] +"""abc +""" +``` -A single test rig is sufficient to test all targets against all descriptors using the [junit parameterized tests](https://github.com/junit-team/junit4/wiki/parameterized-tests) mechanism. But, that is inconvenient because we often want to test just a single target or perhaps even just a single test within a single group of a single target. I have automatically generated a bunch of -[Target runtime test rigs](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime) that allow developers such flexibility. For example, here are the Python3 test rigs in intellij: +The grammars are strings representing StringTemplates (`ST` objects) so `` will get replace when the unit test file is generated (`Test.java`, `Test.cs`, ...). +The `writeln` template must be defined per target. +Here are all the +[Target templates for runtime tests](../runtime-testsuite/resources/org/antlr/v4/test/runtime/templates). +Use triple-quotes `"""` when whitespace matters (usually input/output sections). - +## Requirements -And the result of testing the entire subdirectory: +In order to perform the tests on all target languages, the following tools should be installed: - +* dotnet +* Node.js +* Python 3 +* Go +* Swift +* Clang (Linux, Mac) or MSBuild (Windows) for C++ +* Dart +* PHP -From `mvn`, on the commandline, you will see: +To **install into local repository** `~/.m2/repository/org/antlr`, do this: ```bash -$ cd antlr4 -$ mvn test -... -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Running org.antlr.v4.test.runtime.csharp.TestCompositeLexers -dir /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068612451 -Starting build /usr/bin/xbuild /p:Configuration=Release /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068612451/Antlr4.Test.mono.csproj -dir /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068615081 -Starting build /usr/bin/xbuild /p:Configuration=Release /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068615081/Antlr4.Test.mono.csproj -Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.451 sec -Running org.antlr.v4.test.runtime.csharp.TestCompositeParsers -dir /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864 -antlr reports warnings from [-visitor, -Dlanguage=CSharp, -o, /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864, -lib, /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864, -encoding, UTF-8, /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864/M.g4] -... -[INFO] ------------------------------------------------------------------------ -[INFO] Reactor Summary: -[INFO] -[INFO] ANTLR 4 ............................................ SUCCESS [ 0.445 s] -[INFO] ANTLR 4 Runtime .................................... SUCCESS [ 3.392 s] -[INFO] ANTLR 4 Tool ....................................... SUCCESS [ 1.373 s] -[INFO] ANTLR 4 Maven plugin ............................... SUCCESS [ 1.519 s] -[INFO] ANTLR 4 Runtime Test Annotations ................... SUCCESS [ 0.086 s] -[INFO] ANTLR 4 Runtime Test Processors .................... SUCCESS [ 0.014 s] -[INFO] ANTLR 4 Runtime Tests (2nd generation) ............. SUCCESS [06:39 min] -[INFO] ANTLR 4 Tool Tests ................................. SUCCESS [ 6.922 s] -[INFO] ------------------------------------------------------------------------ -[INFO] BUILD SUCCESS -[INFO] ------------------------------------------------------------------------ -[INFO] Total time: 06:53 min -[INFO] Finished at: 2016-11-16T15:36:56-08:00 -[INFO] Final Memory: 44M/458M -[INFO] ------------------------------------------------------------------------ +$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux +$ mvn install -DskipTests # make sure all artifacts are visible on this machine ``` -Note: That is actually result of running the much faster: - -```bash -mvn -Dparallel=methods -DthreadCount=4 install -``` +## Running the runtime tests -## Running test subsets +A single test rig is sufficient to test all targets against all descriptors using the [junit dynamic tests](https://junit.org/junit5/docs/current/user-guide/#writing-tests-dynamic-tests) mechanism. +But it's often convenient to test just a single target or perhaps even just a single test within a single group of a single target. +IntelliJ automatically generates a bunch of +[Target runtime test rigs](../runtime-testsuite/test/org/antlr/v4/test/runtime) that allows developers such flexibility. +For example, here are the Python3 test rigs in IntelliJ: -*From the `runtime-testsuite` dir* +![testrigs](images/testrigs.png) -### Run one test group across targets +And the result of testing the entire subdirectory: -```bash -$ cd runtime-testsuite -$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux -$ mvn -Dtest=TestParserExec test -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Running org.antlr.v4.test.runtime.cpp.TestParserExec -... -Tests run: 32, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 114.283 sec -Running org.antlr.v4.test.runtime.csharp.TestParserExec -... -``` +![python3-tests](images/python3-tests.png) -Or run all lexer related tests: +All test are run in parallel both via maven and via IDE. -``` -$ cd runtime-testsuite -$ mvn -Dtest=Test*Lexer* test -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Running org.antlr.v4.test.runtime.cpp.TestCompositeLexers -... -``` +In IntelliJ, it's very easy to go to source by right-clicking on any test and pressing `Jump to source` (F4). -### Run all tests for a single target +## Running test subsets -```bash -$ cd runtime-testsuite -$ mvn -Dtest=java.* test -... -``` +From the `runtime-testsuite` dir -Or run all lexer related tests in Java target only: +### Run all tests for a single target ```bash $ cd runtime-testsuite -$ mvn -Dtest=java.*Lexer* test -... +$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux +$ mvn -Dtest='java.**' test ------------------------------------------------------- T E S T S ------------------------------------------------------- -Running org.antlr.v4.test.runtime.java.TestCompositeLexers -Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.277 sec -Running org.antlr.v4.test.runtime.java.TestLexerErrors -Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.376 sec -Running org.antlr.v4.test.runtime.java.TestLexerExec -Tests run: 38, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.07 sec -Running org.antlr.v4.test.runtime.java.TestSemPredEvalLexer -Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.255 sec - -Results : - -Tests run: 59, Failures: 0, Errors: 0, Skipped: 0 -``` - -## Testing in parallel - -Use this to run tests in parallel: - -```bash -$ export MAVEN_OPTS="-Xmx1G" -$ mvn -Dparallel=methods -DthreadCount=4 test +[INFO] Running org.antlr.v4.test.runtime.java.TestIntegerList +[INFO] Running org.antlr.v4.test.runtime.java.JavaRuntimeTests ... -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Concurrency config is parallel='methods', perCoreThreadCount=true, threadCount=4, useUnlimitedThreads=false +[INFO] Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.023 s - in org.antlr.v4.test.runtime.java.TestIntegerList +[INFO] Tests run: 348, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.269 s - in org.antlr.v4.test.runtime.java.JavaRuntimeTests ... ``` -This can be combined with other `-D` above. - ## Adding a runtime test -To add a new runtime test, first determine which [group of tests](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors) it belongs to. Then, add a new [RuntimeTestDescriptor](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java) implementation by subclassing one of: +To add a new runtime test, first determine which [group (dir) of tests](../runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors) it belongs to. +Then, add a new descriptor file implementation by filling in one of these (omitting unused sections): -* [BaseParserTestDescriptor](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseParserTestDescriptor.java); see example [APlus](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java#L7). -* [BaseDiagnosticParserTestDescriptor](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseDiagnosticParserTestDescriptor) if you want to test parser diagnostic output; see [example output](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/FullContextParsingDescriptors.java#L16). -* [BaseCompositeParserTestDescriptor](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeParserTestDescriptor.java); see example [BringInLiteralsFromDelegate](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeParsersDescriptors.java#L11) -* [BaseLexerTestDescriptor](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseLexerTestDescriptor.java); see example [ActionPlacement](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java#L12). -* [BaseCompositeLexerTestDescriptor](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeLexerTestDescriptor.java); see example [LexerDelegatorInvokesDelegateRule](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeLexersDescriptors.java#L11) +``` +[notes] +[type] -Each descriptor object describes the following mandatory elements for the test: +[grammar] - * the test type - * the grammar - * the start rule - * the input text to parse or lex - * the expected output - * the expected errors +[slaveGrammar] -Your best bet is to find a similar test in the appropriate group and then copy and paste the descriptor object, creating a new nested class within the test group class. Modify the field definitions to suit your new problem. +[start] -If you need to create a whole new group of tests, it requires a new descriptor class; call it `XDescriptors`. Then, in each [target subdirectory](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime), you need to create a new test rig `TestX.java` file: +[input] -```java -package org.antlr.v4.test.runtime.java; +[output] -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +[errors] -@RunWith(Parameterized.class) -public class TestX extends BaseRuntimeTest { - public TestX(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseTest()); - } +[flags] - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(XDescriptors.class, ""); - } -} +[skip] ``` -where `` is replaced with Java, Cpp, CSharp, Python2, ... in the various subdirectories. +Your best bet is to find a similar test in the appropriate group and then copy and paste the descriptor file, creating a new file within the test group dir. +Modify the sections to suit your new problem. ### Ignoring tests -In order to turn off a test for a particular target, we need to use the `ignore` method. Given a target name, a descriptor object can decide whether to ignore the test. This is not always convenient but it is fully general and works well for the one case we have now where we have to ignore `Visitor` tests in all targets except JavaScript. +In order to turn off a test for a particular target, the `skip` section in the descriptor file should be used. +For example, the following skips PHP and Dart targets: + +``` +[skip] +PHP +Dart +``` ### Target API/library testing -Some parts of the runtime API need to be tested with code written specifically in the target language. For example, you can see all of the Java runtime API tests here: +Some parts of the runtime API need to be tested with code written specifically in the target language. +For example, all the Java runtime API tests are placed here: -[https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api) +[runtime-testsuite/test/org/antlr/v4/test/runtime/java/api](../runtime-testsuite/test/org/antlr/v4/test/runtime/java/api) -Notice that it is under an `api` dir. The directory above is where all of the `Test*` files go. +Notice that it is under an `api` dir. The directory above is where all of the `*Test*` files go. ### Cross-language actions embedded within grammars @@ -277,7 +173,7 @@ Use instead the language-neutral: ``` -Template file [runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg) has templates like: +Template file [Java.test.stg](../runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg) has templates like: ``` writeln(s) ::= <);>> @@ -287,6 +183,6 @@ that translate generic operations to target-specific language statements or expr ## Adding an ANTLR tool unit test -Just go into the appropriate Java test class in dir [antlr4/tool-testsuite/test/org/antlr/v4/test/tool](https://github.com/antlr/antlr4/tree/master/tool-testsuite/test/org/antlr/v4/test/tool) and add your unit test. +Just go into the appropriate Java test class in dir [antlr4/tool-testsuite/test/org/antlr/v4/test/tool](../tool-testsuite/test/org/antlr/v4/test/tool) and add your unit test. diff --git a/doc/building-antlr.md b/doc/building-antlr.md index 494bea81de..76bdbcd0b6 100644 --- a/doc/building-antlr.md +++ b/doc/building-antlr.md @@ -7,7 +7,7 @@ Most programmers do not need the information on this page because they will simp I will assume that the root directory is `/tmp` for the purposes of explaining how to build ANTLR in this document. -*As of 4.6, ANTLR tool and Java-target runtime requires Java 7.* +*As of 4.6, ANTLR tool and Java-target runtime requires Java 7. As of 4.10, we have verified that the tool itself builds with Java 8 and 11.* # Get the source @@ -25,43 +25,53 @@ Checking connectivity... done. Checking out files: 100% (1427/1427), done. ``` -# Compile +# Check your environment + +If you are starting from a clean, minimum Ubuntu OS, check your environment. + ```bash -$ cd /tmp/antlr4 -$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux -$ mvn clean # must be separate, not part of install/compile -$ mvn -DskipTests install +$ sudo apt-get update +$ # Get Java +$ java > /dev/null 2>&1 +$ if [[ "$?" != "0" ]]; then sudo apt install -y openjdk-11-jre-headless; fi +$ # Get Mvn +$ mvn > /dev/null 2>&1 +$ if [[ "$?" != "0" ]]; then sudo apt install -y maven; fi + +``` + +# Compile + +The current maven build seems complicated to me because there is a dependency of the project on itself. The runtime tests naturally depend on the current version being available but it won't compile without the current version. Once you have the generated/installed jar, mvn builds but otherwise there's a dependency on what you are going to build. You will get this error when you try to clean but you can ignore it: + +``` +[INFO] ANTLR 4 Runtime Tests (4th generation) ............. FAILURE [ 0.073 s] ... -[INFO] ------------------------------------------------------------------------ -[INFO] Reactor Summary: -[INFO] -[INFO] ANTLR 4 ............................................ SUCCESS [ 0.287 s] -[INFO] ANTLR 4 Runtime .................................... SUCCESS [ 4.915 s] -[INFO] ANTLR 4 Tool ....................................... SUCCESS [ 1.315 s] -[INFO] ANTLR 4 Maven plugin ............................... SUCCESS [ 2.393 s] -[INFO] ANTLR 4 Runtime Test Annotations ................... SUCCESS [ 0.078 s] -[INFO] ANTLR 4 Runtime Test Processors .................... SUCCESS [ 0.019 s] -[INFO] ANTLR 4 Runtime Tests (2nd generation) ............. SUCCESS [ 1.986 s] -[INFO] ANTLR 4 Tool Tests ................................. SUCCESS [ 0.513 s] -[INFO] ------------------------------------------------------------------------ -[INFO] BUILD SUCCESS -[INFO] ------------------------------------------------------------------------ -[INFO] Total time: 12.005 s -[INFO] Finished at: 2016-11-21T11:42:42-08:00 -[INFO] Final Memory: 52M/434M -[INFO] ------------------------------------------------------------------------ +[ERROR] Plugin org.antlr:antlr4-maven-plugin:4.10-SNAPSHOT or one of its dependencies could not be resolved: Could not find artifact org.antlr:antlr4-maven-plugin:jar:4.10-SNAPSHOT -> [Help 1] +``` + +To be super squeaky clean, you can wipe out the repository cache, then do the build: + +``` +$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux +cd /tmp/antlr4 # or wherever you have the software +rm -rf ~/.m2/repository/org/antlr* +mvn clean +mvn -DskipTests install ``` **NOTE:** We do `install` not `compile` as tool tests and such refer to modules that must be pulled from the maven install local cache. +Once you have completed this process once and there is a jar hanging around in the repository cache. + # Installing libs to mvn cache locally To skip the tests (which require all the target languages be installed) and **install into local repository** `~/.m2/repository/org/antlr`, do this: ```bash $ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux -$ mvn install -DskipTests=true # make sure all artifacts are visible on this machine +$ mvn install -DskipTests # make sure all artifacts are visible on this machine ``` You should see these jars (when building 4.6-SNAPSHOT): @@ -69,8 +79,6 @@ You should see these jars (when building 4.6-SNAPSHOT): ```bash /Users/parrt/.m2/repository/org/antlr $ find antlr4* -name '*.jar' antlr4-maven-plugin/4.6-SNAPSHOT/antlr4-maven-plugin-4.6-SNAPSHOT.jar -antlr4-runtime-test-annotation-processors/4.6-SNAPSHOT/antlr4-runtime-test-annotation-processors-4.6-SNAPSHOT.jar -antlr4-runtime-test-annotations/4.6-SNAPSHOT/antlr4-runtime-test-annotations-4.6-SNAPSHOT.jar antlr4-runtime-testsuite/4.6-SNAPSHOT/antlr4-runtime-testsuite-4.6-SNAPSHOT-tests.jar antlr4-runtime-testsuite/4.6-SNAPSHOT/antlr4-runtime-testsuite-4.6-SNAPSHOT.jar antlr4-runtime/4.6-SNAPSHOT/antlr4-runtime-4.6-SNAPSHOT.jar diff --git a/doc/case-insensitive-lexing.md b/doc/case-insensitive-lexing.md deleted file mode 100644 index 4c42484383..0000000000 --- a/doc/case-insensitive-lexing.md +++ /dev/null @@ -1,79 +0,0 @@ -# Case-Insensitive Lexing - -In some languages, keywords are case insensitive meaning that `BeGiN` means the same thing as `begin` or `BEGIN`. ANTLR has two mechanisms to support building grammars for such languages: - -1. Build lexical rules that match either upper or lower case. - * **Advantage**: no changes required to ANTLR, makes it clear in the grammar that the language in this case insensitive. - * **Disadvantage**: might have a small efficiency cost and grammar is a more verbose and more of a hassle to write. - -2. Build lexical rules that match keywords in all uppercase and then parse with a custom [character stream](https://github.com/antlr/antlr4/blob/master/runtime/Java/src/org/antlr/v4/runtime/CharStream.java) that converts all characters to uppercase before sending them to the lexer (via the `LA()` method). Care must be taken not to convert all characters in the stream to uppercase because characters within strings and comments should be unaffected. All we really want is to trick the lexer into thinking the input is all uppercase. - * **Advantage**: Could have a speed advantage depending on implementation, no change required to the grammar. - * **Disadvantage**: Requires that the case-insensitive stream and grammar are used in correctly in conjunction with each other, makes all characters appear as uppercase/lowercase to the lexer but some grammars are case sensitive outside of keywords, errors new case insensitive streams and language output targets (java, C#, C++, ...). - -For the 4.7.1 release, we discussed both approaches in [detail](https://github.com/antlr/antlr4/pull/2046) and even possibly altering the ANTLR metalanguage to directly support case-insensitive lexing. We discussed including the case insensitive streams into the runtime but not all would be immediately supported. I decided to simply make documentation that clearly states how to handle this and include the appropriate snippets that people can cut-and-paste into their grammars. - -## Case-insensitive grammars - -As a prime example of a grammar that specifically describes case insensitive keywords, see the -[SQLite grammar](https://github.com/antlr/grammars-v4/blob/master/sqlite/SQLite.g4). To match a case insensitive keyword, there are rules such as - -``` -K_UPDATE : U P D A T E; -``` - -that will match `UpdaTE` and `upDATE` etc... as the `update` keyword. This rule makes use of some generically useful fragment rules that you can cut-and-paste into your grammars: - -``` -fragment A : [aA]; // match either an 'a' or 'A' -fragment B : [bB]; -fragment C : [cC]; -fragment D : [dD]; -fragment E : [eE]; -fragment F : [fF]; -fragment G : [gG]; -fragment H : [hH]; -fragment I : [iI]; -fragment J : [jJ]; -fragment K : [kK]; -fragment L : [lL]; -fragment M : [mM]; -fragment N : [nN]; -fragment O : [oO]; -fragment P : [pP]; -fragment Q : [qQ]; -fragment R : [rR]; -fragment S : [sS]; -fragment T : [tT]; -fragment U : [uU]; -fragment V : [vV]; -fragment W : [wW]; -fragment X : [xX]; -fragment Y : [yY]; -fragment Z : [zZ]; -``` - -No special streams are required to use this mechanism for case insensitivity. - -## Custom character streams approach - -The other approach is to use lexical rules that match either all uppercase or all lowercase, such as: - -``` -K_UPDATE : 'UPDATE'; -``` - -Then, when creating the character stream to parse from, we need a custom class that overrides methods used by the lexer. Below you will find custom character streams for a number of the targets that you can copy into your projects, but here is how to use the streams in Java as an example: - -```java -CharStream s = CharStreams.fromPath(Paths.get('test.sql')); -CaseChangingCharStream upper = new CaseChangingCharStream(s, true); -Lexer lexer = new SomeSQLLexer(upper); -``` - -Here are implementations of `CaseChangingCharStream` in various target languages: - -* [C#](https://github.com/antlr/antlr4/blob/master/doc/resources/CaseChangingCharStream.cs) -* [Go](https://github.com/antlr/antlr4/blob/master/doc/resources/case_changing_stream.go) -* [Java](https://github.com/antlr/antlr4/blob/master/doc/resources/CaseChangingCharStream.java) -* [JavaScript](https://github.com/antlr/antlr4/blob/master/doc/resources/CaseChangingStream.js) -* [Python2/3](https://github.com/antlr/antlr4/blob/master/doc/resources/CaseChangingStream.py) diff --git a/doc/cpp-target.md b/doc/cpp-target.md index eec7cf88bd..ed0dcd2f8f 100644 --- a/doc/cpp-target.md +++ b/doc/cpp-target.md @@ -1,6 +1,6 @@ # C++ -The C++ target supports all platforms that can either run MS Visual Studio 2013 (or newer), XCode 7 (or newer) or CMake (C++11 required). All build tools can either create static or dynamic libraries, both as 64bit or 32bit arch. Additionally, XCode can create an iOS library. Also see [Antlr4 for C++ with CMake: A practical example](http://blorente.me//Antlr-,-C++-and-CMake-Wait-what.html). +The C++ target supports all platforms that can either run MS Visual Studio 2017 (or newer), XCode 7 (or newer) or CMake (C++17 required). All build tools can either create static or dynamic libraries, both as 64bit or 32bit arch. Additionally, XCode can create an iOS library. Also see [Antlr4 for C++ with CMake: A practical example](http://blorente.me/beyond-the-loop/Antlr-cpp-cmake/). ## How to create a C++ lexer or parser? This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example: @@ -65,20 +65,20 @@ int main(int argc, const char* argv[]) { tree::ParseTree *tree = parser.key(); TreeShapeListener listener; - tree::ParseTreeWalker::DEFAULT->walk(&listener, tree); + tree::ParseTreeWalker::DEFAULT.walk(&listener, tree); return 0; } ``` -This example assumes your grammar contains a parser rule named `key` for which the enterKey function was generated. +This example assumes your grammar contains a parser rule named `key` for which the `enterKey` function was generated. -## Specialities of this ANTLR target +## Special cases for this ANTLR target There are a couple of things that only the C++ ANTLR target has to deal with. They are described here. -### Build Aspects +### Code Generation Aspects The code generation (by running the ANTLR4 jar) allows to specify 2 values you might find useful for better integration of the generated files into your application (both are optional): * A **namespace**: use the **`-package`** parameter to specify the namespace you want. @@ -102,8 +102,20 @@ In order to create a static lib in Visual Studio define the `ANTLR4CPP_STATIC` m For gcc and clang it is possible to use the `-fvisibility=hidden` setting to hide all symbols except those that are made default-visible (which has been defined for all public classes in the runtime). +### Compile Aspects + +When compiling generated files, you can configure a compile option according to your needs (also optional): + +* A **thread local DFA macro**: Add `-DANTLR4_USE_THREAD_LOCAL_CACHE=1` to the compilation options +will enable using thread local DFA cache (disabled by default), after that, each thread uses its own DFA. +This will increase memory usage to store thread local DFAs and redundant computation to build thread local DFAs (not too much). +The benefit is that it can improve the concurrent performance running with multiple threads. +In other words, when you find your concurent throughput is not high enough, you should consider turning on this option. + ### Memory Management -Since C++ has no built-in memory management we need to take extra care. For that we rely mostly on smart pointers, which however might cause time penalties or memory side effects (like cyclic references) if not used with care. Currently however the memory household looks very stable. Generally, when you see a raw pointer in code consider this as being managed elsewehere. You should never try to manage such a pointer (delete, assign to smart pointer etc.). +Since C++ has no built-in memory management we need to take extra care. For that we rely mostly on smart pointers, which however might cause time penalties or memory side effects (like cyclic references) if not used with care. Currently however the memory household looks very stable. Generally, when you see a raw pointer in code consider this as being managed elsewhere. You should never try to manage such a pointer (delete, assign to smart pointer etc.). + +Accordingly a parse tree is only valid for the lifetime of its parser. The parser, in turn, is only valid for the lifetime of its token stream, and so on back to the original `ANTLRInputStream` (or equivalent). To retain a tree across function calls you'll need to create and store all of these and `delete` all but the tree when you no longer need it. ### Unicode Support Encoding is mostly an input issue, i.e. when the lexer converts text input into lexer tokens. The parser is completely encoding unaware. @@ -111,7 +123,7 @@ Encoding is mostly an input issue, i.e. when the lexer converts text input into The C++ target always expects UTF-8 input (either in a string or stream) which is then converted to UTF-32 (a char32_t array) and fed to the lexer. ### Named Actions -In order to help customizing the generated files there are a number of additional socalled **named actions**. These actions are tight to specific areas in the generated code and allow to add custom (target specific) code. All targets support these actions +In order to help customizing the generated files there are a number of additional so-called **named actions**. These actions are tight to specific areas in the generated code and allow to add custom (target specific) code. All targets support these actions * @parser::header * @parser::members @@ -127,7 +139,7 @@ In addition to that the C++ target supports many more such named actions. Unfort * **@lexer::preinclude** - Placed right before the first #include (e.g. good for headers that must appear first, for system headers etc.). Appears in both lexer h and cpp file. * **@lexer::postinclude** - Placed right after the last #include, but before any class code (e.g. for additional namespaces). Appears in both lexer h and cpp file. * **@lexer::context** - Placed right before the lexer class declaration. Use for e.g. additional types, aliases, forward declarations and the like. Appears in the lexer h file. -* **@lexer::declarations** - Placed in the private section of the lexer declaration (generated sections in all classes strictly follow the pattern: public, protected, privat, from top to bottom). Use this for private vars etc. +* **@lexer::declarations** - Placed in the private section of the lexer declaration (generated sections in all classes strictly follow the pattern: public, protected, private, from top to bottom). Use this for private vars etc. * **@lexer::definitions** - Placed before other implementations in the cpp file (but after *@postinclude*). Use this to implement e.g. private types. For the parser there are the same actions as shown above for the lexer. In addition to that there are even more actions for visitor and listener classes: diff --git a/doc/creating-a-language-target.md b/doc/creating-a-language-target.md index ff7db290ea..723b865f2c 100644 --- a/doc/creating-a-language-target.md +++ b/doc/creating-a-language-target.md @@ -6,17 +6,76 @@ This document describes how to make ANTLR generate parsers in a new language, *X Creating a new target involves the following key elements: -1. For the tool, create class *X*Target as a subclass of class `Target` in package `org.antlr.v4.codegen.target`. This class describes language specific details about escape characters and strings and so on. There is very little to do here typically. -1. Create *X*.stg in directory tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg. This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express all of the parsing elements needed to generate code. You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... Each of these must be described how to build the indicated chunk of code. Your best bet is to find the closest existing target, copy that template file, and tweak to suit. -1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure as dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below. -1. Create a template file for runtime tests. All you have to do is provide a few templates that indicate how to print values and declare variables. Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code using these templates for each target and check the test results. It needs to know how to define various class fields, compare members and so on. You must create a *X*.test.stg file underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime). Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit. +1. For the tool, create class *X*Target as a subclass of class `Target` in package `org.antlr.v4.codegen.target`. + This class describes language specific details about escape characters and strings and so on. + There is very little to do here typically. +2. Create `*X*.stg` in directory `tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg`. + This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express + all the parsing elements needed to generate code. + You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... + Each of these must be described how to build the indicated chunk of code. + Your best bet is to find the closest existing target, copy that template file, and tweak to suit. +3. Create a runtime library to support the parsers generated by ANTLR. + Under directory `runtime/*X*`, you are in complete control of the directory structure as dictated by common usage of that target language. + For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. + Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below. +4. Create a template file for runtime tests. + All you have to do is provide a few templates that indicate how to print values and declare variables. + Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code using these templates for each target and check the test results. + It needs to know how to define various class fields, compare members and so on. + You must create a `*X*.test.stg` file underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime](../runtime-testsuite/resources/org/antlr/v4/test/runtime) + and `Test.*x*.stg` underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers](../runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers). + Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit. +6. Create test files under [/runtime-testsuite/test/org/antlr/v4/test/runtime](../runtime-testsuite/test/org/antlr/v4/test/runtime). + They will load defined test cases in each test descriptor. + Also add the `/runtime-testsuite/test/org/antlr/v4/test/runtime/X/BaseXTest.java` which defines how test cases will execute and output. +7. Create/edit shell scripts in [/.github](../.github) to run tests in CI pipelines. ## Getting started -1. Fork the `antlr/antlr4` repository at github to your own user so that you have repository `username/antlr4`. -2. Clone `username/antlr4`, the forked repository, to your local disk. Your remote `origin` will be the forked repository on GitHub. Add a remote `upstream` to the original `antlr/antlr4` repository (URL `https://github.com/antlr/antlr4.git`). Changes that you would like to contribute back to the project are done with [pull requests](https://help.github.com/articles/using-pull-requests/). +1. Fork the `antlr/antlr4` repository at GitHub to your own user so that you have repository `username/antlr4`. +2. Clone `username/antlr4`, the forked repository, to your local disk. + Your remote `origin` will be the forked repository on GitHub. + Add a remote `upstream` to the original `antlr/antlr4` repository (URL `https://github.com/antlr/antlr4.git`). + Changes that you would like to contribute back to the project are done with [pull requests](https://help.github.com/articles/using-pull-requests/). 3. Try to build it before doing anything + ```bash $ mvn compile ``` + That should proceed with success. See [Building ANTLR](building-antlr.md) for more details. + +## Comparing your target's parsing decisionmaking with Java's + +ANTLR's power comes from it's dynamic parsing strategy, but that means each target +must implement that complicated algorithm. You should compare your target's debug +output for ParserATNSimulator with Java's. + +Run this so we get right jars before trying this script: + +``` +cd ANTLR-ROOT-DIR +mvn install -DskipTests=true +cd runtime-tests +mvn install -DskipTests=true # yes do it again +``` + +Run the script from `runtime-tests` dir with + +``` +../scripts/traceatn.sh /tmp/JSON.g4 json -target Go /tmp/foo.json +``` + +or whatever your test grammar, start rule, target, test input are. + +### Debugging the PHP target + +Because the PHP target is hosted in a separate repository, you will need to clone the [antlr/php-antlr-runtime](https://github.com/antlr/antlr-php-runtime) +repository into the `runtime/PHP` and install the dependencies with `composer install` before you can run the tests. + +``` +git clone -b dev https://github.com/antlr/antlr-php-runtime.git runtime/PHP +cd runtime/PHP +composer install +``` diff --git a/doc/csharp-target.md b/doc/csharp-target.md index a869a82f60..1fcce06de7 100644 --- a/doc/csharp-target.md +++ b/doc/csharp-target.md @@ -36,11 +36,10 @@ using Antlr4.Runtime.Tree; public void MyParseMethod() { String input = "your text to parse here"; - ICharStream stream = CharStreams.fromstring(input); + ICharStream stream = CharStreams.fromString(input); ITokenSource lexer = new MyGrammarLexer(stream); ITokenStream tokens = new CommonTokenStream(lexer); MyGrammarParser parser = new MyGrammarParser(tokens); - parser.BuildParseTree = true; IParseTree tree = parser.StartRule(); } ``` @@ -86,7 +85,7 @@ In order to execute this listener, you would simply add the following lines to t ... IParseTree tree = parser.StartRule() - only repeated here for reference KeyPrinter printer = new KeyPrinter(); -ParseTreeWalker.DEFAULT.walk(printer, tree); +ParseTreeWalker.Default.Walk(printer, tree); ``` Further information can be found from The Definitive ANTLR Reference book. diff --git a/doc/dart-target.md b/doc/dart-target.md new file mode 100644 index 0000000000..270766fd81 --- /dev/null +++ b/doc/dart-target.md @@ -0,0 +1,129 @@ +# ANTLR4 Runtime for Dart + +From version 4.9 onwards antlr's dart generated code is null sound safety compatible and sets the minimum dart sdk version to 2.12.0. + +### First steps + +#### 1. Install ANTLR4 + +[The getting started guide](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md) +should get you started. + +#### 2. Install the Dart ANTLR runtime + +Each target language for ANTLR has a runtime package for running parser +generated by ANTLR4. The runtime provides a common set of tools for using your parser. + +Install the runtime with the same version as the main ANTLR tool: + +Add this to your package's pubspec.yaml file: +```yaml +... +dependencies: + antlr4: +... +``` + +#### 3. Generate your parser + +You use the ANTLR4 "tool" to generate a parser. These will reference the ANTLR +runtime, installed above. + +Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool +as described in [the getting started guide](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md). +To generate your Dart parser, run the following command: + +```shell script +antlr4 -Dlanguage=Dart MyGrammar.g4 +``` + +For a full list of antlr4 tool options, please visit the +[tool documentation page](https://github.com/antlr/antlr4/blob/master/doc/tool-options.md). + +### Complete example + +Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json. + +Then, invoke `antlr4 -Dlanguage=Dart JSON.g4`. The result of this is a +collection of `.dart` including: + +* JsonLexer.dart +* JsonParser.dart +* JsonBaseListener.dart +* JsonListener.dart (if you have not activated the -no-listener option) +* JsonVisitor.dart (if you have activated the -visitor option) + +We'll write a small main func to call the generated parser/lexer +(assuming they are separate). This one writes out the encountered +`ParseTreeContext`'s: + +```dart +import 'package:antlr4/antlr4.dart'; +import 'package:my_project/JSONParser.dart'; +import 'package:my_project/JSONLexer.dart'; + +class TreeShapeListener implements ParseTreeListener { + @override + void enterEveryRule(ParserRuleContext ctx) { + print(ctx.text); + } + + @override + void exitEveryRule(ParserRuleContext node) { + } + + @override + void visitErrorNode(ErrorNode node) { + } + + @override + void visitTerminal(TerminalNode node) { + } +} + +void main(List args) async { + JSONLexer.checkVersion(); + JSONParser.checkVersion(); + final input = await InputStream.fromPath(args[0]); + final lexer = JSONLexer(input); + final tokens = CommonTokenStream(lexer); + final parser = JSONParser(tokens); + parser.addErrorListener(DiagnosticErrorListener()); + final tree = parser.json(); + ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree); +} +``` + +Create a `example.json` file: +```json +{"a":1} +``` + +Parse the input file: + +```shell script +dart bin/main.dart example.json +``` + +The expected output is: + +``` +{"a":1} +{"a":1} +{"a":1} +"a":1 +1 +``` + +### Debug + +We have some logs in place that can ease the debugging process, in order to turn these logs on you can enable the following environment declarations: + +- ANTLR_LEXER_DEBUG +- ANTLR_LEXER_DFA_DEBUG +- ANTLR_PARSER_DEBUG +- ANTLR_PARSER_LIST_ATN_DECISIONS_DEBUG +- ANTLR_PARSER_DFA_DEBUG +- ANTLR_PARSER_RETRY_DEBUG + +If you're using flutter, you can define these variables by adding an `--dart-define` arguments, eg. `flutter run --dart-define LEXER_DEBUG=false` diff --git a/doc/faq/index.md b/doc/faq/index.md index 734fc6c13a..9dd72165ff 100644 --- a/doc/faq/index.md +++ b/doc/faq/index.md @@ -38,8 +38,8 @@ This is the main landing page for the ANTLR 4 FAQ. The links below will take you ## Translation -* [ASTs vs parse trees](parse-trees.md) -* [Decoupling input walking from output generation](parse-trees.md) +* [ASTs vs parse trees](translation.md) +* [Decoupling input walking from output generation](translation.md) ## Actions and semantic predicates diff --git a/doc/faq/parse-trees.md b/doc/faq/parse-trees.md index 5a243cedb6..48ce56315d 100644 --- a/doc/faq/parse-trees.md +++ b/doc/faq/parse-trees.md @@ -50,7 +50,7 @@ For writing a compiler, either generate [LLVM-type static-single-assignment](htt ### XPath -XPath works great when you need to find specific nodes, possibly in certain contexts. The context is limited to the parents on the way to the root of the tree. For example, if you want to find all ID nodes, use path `//ID`. If you want all variable declarations, you might use path `//vardecl`. If you only want fields declarations, then you can use some context information via path `/classdef/vardecl`, which would only find vardecls that our children of class definitions. You can merge the results of multiple XPath `findAll()`s simulating a set union for XPath. The only caveat is that the order from the original tree is not preserved when you union multiple `findAll()` sets. +XPath works great when you need to find specific nodes, possibly in certain contexts. The context is limited to the parents on the way to the root of the tree. For example, if you want to find all ID nodes, use path `//ID`. If you want all variable declarations, you might use path `//vardecl`. If you only want fields declarations, then you can use some context information via path `/classdef/vardecl`, which would only find vardecls that are children of class definitions. You can merge the results of multiple XPath `findAll()`s simulating a set union for XPath. The only caveat is that the order from the original tree is not preserved when you union multiple `findAll()` sets. ### Tree pattern matching @@ -70,4 +70,4 @@ scopeStack.peek().define(new VariableSymbol("foo")) That way each listener function does not have to compute its appropriate scope. -Examples: [DefScopesAndSymbols.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/DefScopesAndSymbols.java) and [SetScopeListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/SetScopeListener.java) and [VerifyListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/VerifyListener.java) \ No newline at end of file +Examples: [DefScopesAndSymbols.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/DefScopesAndSymbols.java) and [SetScopeListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/SetScopeListener.java) and [VerifyListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/VerifyListener.java) diff --git a/doc/getting-started.md b/doc/getting-started.md index 4614c67f6b..a4296dfcf5 100644 --- a/doc/getting-started.md +++ b/doc/getting-started.md @@ -1,51 +1,177 @@ # Getting Started with ANTLR v4 -Hi and welcome to the version 4 release of ANTLR! It's named after the fearless hero of the [Crazy Nasty-Ass Honey Badger](http://www.youtube.com/watch?v=4r7wHMg5Yjg) since ANTLR v4 takes whatever you give it--it just doesn't give a crap! See [Why do we need ANTLR v4?](faq/general.md) and the [preface of the ANTLR v4 book](http://media.pragprog.com/titles/tpantlr2/preface.pdf). +Hi and welcome to the version 4 release of ANTLR! See [Why do we need ANTLR v4?](faq/general.md) and the [preface of the ANTLR v4 book](http://media.pragprog.com/titles/tpantlr2/preface.pdf). + +## Getting started the easy way using antlr4-tools + +To play around with ANTLR without having to worry about installing it and the Java needed to execute it, use [antlr4-tools](https://github.com/antlr/antlr4-tools). The only requirement is Python3, which is typically installed on all developer machines on all operating systems. (See below for Windows issue.) + +```bash +$ pip install antlr4-tools +``` + +That command creates `antlr4` and `antlr4-parse` executables that, if necessary, will download and install Java 11 plus the latest ANTLR jar: + +```bash +$ antlr4 +Downloading antlr4-4.13.2-complete.jar +ANTLR tool needs Java to run; install Java JRE 11 yes/no (default yes)? y +Installed Java in /Users/parrt/.jre/jdk-11.0.15+10-jre; remove that dir to uninstall +ANTLR Parser Generator Version 4.13.2 + -o ___ specify output directory where all output is generated + -lib ___ specify location of grammars, tokens files +... +``` + +Let's play with a simple grammar: + +``` +grammar Expr; +prog: expr EOF ; +expr: expr ('*'|'/') expr + | expr ('+'|'-') expr + | INT + | '(' expr ')' + ; +NEWLINE : [\r\n]+ -> skip; +INT : [0-9]+ ; +``` + +### Windows-specific issues + +On Windows, the `pip` command doesn't just work---you need to add the `...\local-packages\python38\scripts` dir to your `PATH`, which itself might require a fun reboot. If you use WSL on Windows, then the pip install will also properly at the scripts directly (if you run from bash shell). + + +1. Go to the Microsoft Store +2. Search in Microsoft Store for Python +3. Select the newest version of Python (3.11). +4. Click the "Get" button. Store installs python and pip at "c:\Users...\AppData\Local\Microsoft\WindowsApps\python.exe" and "c:\Users...\AppData\Local\Microsoft\WindowsApps\pip.exe", respectively. And, it updates the search path immediately with the install. +5. Open a "cmd" terminal. +6. You can now type "python" and "pip", and "pip install antlr4-tools". 7. Unfortunately, it does not add that to the search path. +7. Update the search path to contain `c:\Users...\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p8\LocalCache\local-packages\Python310\Scripts`. You may need to install MSYS2, then do a `find /c/ -name antlr4.exe 2> /dev/null` and enter that path. +8. Or, you can set up an alias to antlr4.exe on that path. + +The good news is that the ANTLR4 Python tool downloads the ANTLR jar in a standard location, and you don't need to do that manually. It's also possible to go in a browser, go to python.org, and download the python package. But, it's likely you will need to update the path for antlr4.exe as before. + +### Try parsing with a sample grammar + +To parse and get the parse tree in text form, use: + +```bash +$ antlr4-parse Expr.g4 prog -tree +10+20*30 +^D +(prog:1 (expr:2 (expr:3 10) + (expr:1 (expr:3 20) * (expr:3 30))) ) +``` +(Note: `^D` means control-D and indicates "end of input" on Unix; use `^Z` on Windows.) + +Here's how to get the tokens and trace through the parse: + +```bash +$ antlr4-parse Expr.g4 prog -tokens -trace +10+20*30 +^D +[@0,0:1='10',,1:0] +[@1,2:2='+',<'+'>,1:2] +[@2,3:4='20',,1:3] +[@3,5:5='*',<'*'>,1:5] +[@4,6:7='30',,1:6] +[@5,9:8='',,2:0] +enter prog, LT(1)=10 +enter expr, LT(1)=10 +consume [@0,0:1='10',<8>,1:0] rule expr +enter expr, LT(1)=+ +consume [@1,2:2='+',<3>,1:2] rule expr +enter expr, LT(1)=20 +consume [@2,3:4='20',<8>,1:3] rule expr +enter expr, LT(1)=* +consume [@3,5:5='*',<1>,1:5] rule expr +enter expr, LT(1)=30 +consume [@4,6:7='30',<8>,1:6] rule expr +exit expr, LT(1)= +exit expr, LT(1)= +exit expr, LT(1)= +consume [@5,9:8='',<-1>,2:0] rule prog +exit prog, LT(1)= +``` + +Here's how to get a visual tree view: + +```bash +$ antlr4-parse Expr.g4 prog -gui +10+20*30 +^D +``` + +The following will pop up in a Java-based GUI window: + + + +### Generating parser code + +The previous section used a built-in ANTLR interpreter but typically you will ask ANTLR to generate code in the language used by your project (there are about 10 languages to choose from as of 4.11). Here's how to generate Java code from a grammar: + +```bash +$ antlr4 Expr.g4 +$ ls Expr*.java +ExprBaseListener.java ExprLexer.java ExprListener.java ExprParser.java +``` + +And, here's how to generate C++ code from the same grammar: + +```bash +$ antlr4 -Dlanguage=Cpp Expr.g4 +$ ls Expr*.cpp Expr*.h +ExprBaseListener.cpp ExprLexer.cpp ExprListener.cpp ExprParser.cpp +ExprBaseListener.h ExprLexer.h ExprListener.h ExprParser.h +``` ## Installation -ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library. +ANTLR is really two things: a tool written in Java that translates your grammar to a parser/lexer in Java (or other target language) and the runtime library needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library. -The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.7.1-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). +The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.13.2-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). -If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see Integrating ANTLR into Development Systems. +If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see [Integrating ANTLR into Development Systems](https://github.com/antlr/antlr4/blob/master/doc/IDEs.md). ### UNIX -0. Install Java (version 1.6 or higher) +0. Install Java (version 11 or higher) 1. Download ``` $ cd /usr/local/lib -$ curl -O https://www.antlr.org/download/antlr-4.7.1-complete.jar +$ curl -O https://www.antlr.org/download/antlr-4.13.2-complete.jar ``` Or just download in browser from website: [https://www.antlr.org/download.html](https://www.antlr.org/download.html) and put it somewhere rational like `/usr/local/lib`. -2. Add `antlr-4.7.1-complete.jar` to your `CLASSPATH`: +if you are using lower version jdk, just download from [website download](https://github.com/antlr/website-antlr4/tree/gh-pages/download) for previous version, and antlr version before 4.13.2 support jdk 1.8 + +2. Add `antlr-4.13.2-complete.jar` to your `CLASSPATH`: ``` -$ export CLASSPATH=".:/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH" +$ export CLASSPATH=".:/usr/local/lib/antlr-4.13.2-complete.jar:$CLASSPATH" ``` It's also a good idea to put this in your `.bash_profile` or whatever your startup script is. 3. Create aliases for the ANTLR Tool, and `TestRig`. ``` -$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' -$ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' +$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.13.2-complete.jar:$CLASSPATH" org.antlr.v4.Tool' +$ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.13.2-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' ``` ### WINDOWS (*Thanks to Graham Wideman*) -0. Install Java (version 1.6 or higher) -1. Download antlr-4.7.1-complete.jar (or whatever version) from [https://www.antlr.org/download/](https://www.antlr.org/download/) +0. Install Java (version 1.7 or higher) +1. Download antlr-4.13.2-complete.jar (or whatever version) from [https://www.antlr.org/download.html](https://www.antlr.org/download.html) Save to your directory for 3rd party Java libraries, say `C:\Javalib` -2. Add `antlr-4.7.1-complete.jar` to CLASSPATH, either: +2. Add `antlr-4.13.2-complete.jar` to CLASSPATH, either: * Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable * Temporarily, at command line: ``` -SET CLASSPATH=.;C:\Javalib\antlr-4.7.1-complete.jar;%CLASSPATH% +SET CLASSPATH=.;C:\Javalib\antlr-4.13.2-complete.jar;%CLASSPATH% ``` 3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands: * Batch files (in directory in system PATH) antlr4.bat and grun.bat @@ -71,7 +197,7 @@ Either launch org.antlr.v4.Tool directly: ``` $ java org.antlr.v4.Tool -ANTLR Parser Generator Version 4.7.1 +ANTLR Parser Generator Version 4.13.2 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... @@ -80,8 +206,8 @@ ANTLR Parser Generator Version 4.7.1 or use -jar option on java: ``` -$ java -jar /usr/local/lib/antlr-4.7.1-complete.jar -ANTLR Parser Generator Version 4.7.1 +$ java -jar /usr/local/lib/antlr-4.13.2-complete.jar +ANTLR Parser Generator Version 4.13.2 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... @@ -133,7 +259,12 @@ That pops up a dialog box showing that rule `r` matched keyword `hello` followed The book has lots and lots of examples that should be useful too. You can download them here for free: -[http://pragprog.com/titles/tpantlr2/source_code](http://pragprog.com/titles/tpantlr2/source_code) +[ANTLR reference book examples in Java](https://media.pragprog.com/titles/tpantlr2/code/tpantlr2-code.zip)
+[ANTLR reference book examples in C#](https://github.com/Philippe-Laval/tpantlr2) + + +[Language implementation patterns book examples in Java](https://media.pragprog.com/titles/tpdsl/code/tpdsl-code.zip)
+[Language implementation patterns book examples in C#](https://github.com/Philippe-Laval/tpdsl) Also, there is a large collection of grammars for v4 at github: diff --git a/doc/go-changes.md b/doc/go-changes.md new file mode 100644 index 0000000000..fb6dce814f --- /dev/null +++ b/doc/go-changes.md @@ -0,0 +1,179 @@ +# Changes to the Go Runtime over time + +## v4.12.0 to v4.13.0 + +Strictly speaking, if ANTLR was a go only project following [SemVer](https://semver.org/) release v4.13.0 would be +at least a minor version change and arguably a bump to v5. However, we must follow the ANTLR conventions here or the +release numbers would quickly become confusing. I apologize for being unable to follow the Go release rules absolutely +to the letter. + +There are a lot of changes and improvements in this release, but only the change of repo holding the runtime code, +and possibly the removal of interfaces will cause any code changes. There are no breaking changes to the runtime +interfaces. + +ANTLR Go Maintainer: [Jim Idle](https://github.com/jimidle) - Email: [jimi@idle.ws](mailto:jimi@idle.ws) + +### Code Relocation + +For complicated reasons, including not breaking the builds of some users who use a monorepo and eschew modules, as well +as not making substantial changes to the internal test suite, the Go runtime code will continue to be maintained in +the main ANTLR4 repo `antlr/antlr4`. If you wish to contribute changes to the Go runtime code, please continue to submit +PRs to this main repo, against the `dev` branch. + +The code located in the main repo at about the depth of the Mariana Trench, means that the go tools cannot reconcile +the module correctly. After some debate, it was decided that we would create a dedicated release repo for the Go runtime +so that it will behave exactly as the Go tooling expects. This repo is auto-maintained and keeps both the dev and master +branches up to date. + +Henceforth, all future projects using the ANTLR Go runtime, should import as follows: + +```go +import ( + "github.com/antlr4-go/antlr/v4" + ) +``` + +And use the command: + +```shell +go get github.com/antlr4-go/antlr +``` + +To get the module - `go mod tidy` is probably the best way once imports have been changed. + +Please note that there is no longer any source code kept in the ANTLR repo under `github.com/antlr/antlr4/runtime/Go/antlr`. +If you are using the code without modules, then sync the code from the new release repo. + +### Documentation + +Prior to this release, the godocs were essentially unusable as the go doc code was essentially copied without +change, from teh Java runtime. The godocs are now properly formatted for Go and pkg.dev. + +Please feel free to raise an issue if you find any remaining mistakes. Or submit a PR (remember - not to the new repo). +It is expected that it might take a few iterations to get the docs 100% squeaky clean. + +### Removal of Unnecessary Interfaces + +The Go runtime was originally produced as almost a copy of the Java runtime but with go syntax. This meant that everything +had an interface. There is no need to use interfaces in Go if there is only ever going to be one implementation of +some struct and its methods. Interfaces cause an extra deference at runtime and are detrimental to performance if you +are trying to squeeze out every last nanosecond, which some users will be trying to do. + +This is 99% an internal refactoring of the runtime with no outside effects to the user. + +### Generated Recognizers Return *struct and not Interfaces + +The generated recognizer code generated an interface for the parsers and lexers. As they can only be implemented by the +generated code, the interfaces were removed. This is possibly the only place you may need to make a code change to +your driver code. + +If your code looked like this: + +```go +var lexer = parser.NewMySqlLexer(nil) +var p = parser.NewMySqlParser(nil) +``` + +Or this: + +```go +lexer := parser.NewMySqlLexer(nil) +p := parser.NewMySqlParser(nil) +``` + +Then no changes need to be made. However, fi you predeclared the parser and lexer variables with there type, such as like +this: + +```go +var lexer parser.MySqlLexer +var p parser.MySqlParser +// ... +lexer = parser.NewMySqlLexer(nil) +p = parser.NewMySqlParser(nil) +``` + +You will need to change your variable declarations to pointers (note the introduction of the `*` below. + +```go +var lexer *parser.MySqlLexer +var p *parser.MySqlParser +// ... +lexer = parser.NewMySqlLexer(nil) +p = parser.NewMySqlParser(nil) +``` + +This is the only user facing change that I can see. This change though has a very beneficial side effect in that you +no longer need to cast the interface into a struct so that you can access methods and data within it. Any code you +had that needed to do that, will be cleaner and faster. + +The performance improvement is worth the change and there was no tidy way for me to avoid it. + +### Parser Error Recovery Does Not Use Panic + +THe generated parser code was again essentially trying to be Java code in disguise. This meant that every parser rule +executed a `defer {}` and a `recover()`, even if there wer no outstanding parser errors. Parser errors were issued by +issuing a `panic()`! + +While some major work has been performed in the go compiler and runtime to make `defer {}` as fast as possible, +`recover()` is (relatively) slow as it is not meant to be used as a general error mechanism, but to recover from say +an internal library problem if that problem can be recovered to a known state. + +The generated code now stores a recognition error and a flag in the main parser struct and use `goto` to exit the +rule instead of a `panic()`. As might be imagined, this is significantly faster through the happy path. It is also +faster at generating errors. + +The ANTLR runtime tests do check error raising and recovery, but if you find any differences in the error handling +behavior of your parsers, please raise an issue. + +### Reduction in use of Pointers + +Certain internal structs, such as interval sets are small and immutable, but were being passed around as pointers +anyway. These have been change to use copies, and resulted in significant performance increases in some cases. +There is more work to come in this regard. + +### ATN Deserialization + +When the ATN and associated structures are deserialized for the first time, there was a bug that caused a needed +optimization to fail to be executed. This could have a significant performance effect on recognizers that were written +in a suboptimal way (as in poorly formed grammars). This is now fixed. + +### Prediction Context Caching was not Working + +This has a massive effect when reusing a parser for a second and subsequent run. The PredictionContextCache merely +used memory but did not speed up subsequent executions. This is now fixed, and you should see a big difference in +performance when reusing a parser. This single paragraph does not do this fix justice ;) + +### Cumulative Performance Improvements + +Though too numerous to mention, there are a lot of small performance improvements, that add up in accumulation. Everything +from improvements in collection performance to slightly better algorithms or specific non-generic algorithms. + +### Cumulative Memory Improvements + +The real improvements in memory usage, allocation and garbage collection are saved for the next major release. However, +if your grammar is well-formed and does not require almost infinite passes using ALL(*), then both memory and performance +will be improved with this release. + +### Bug Fixes + +Other small bug fixes have been addressed, such as potential panics in funcs that did not check input parameters. There +are a lot of bug fixes in this release that most people were probably not aware of. All known bugs are fixed at the +time of release preparation. + +### A Note on Poorly Constructed Grammars + +Though I have made some significant strides on improving the performance of poorly formed grammars, those that are +particularly bad will see much less of an incremental improvement compared to those that are fairly well-formed. + +This is deliberately so in this release as I felt that those people who have put in effort to optimize the form of their +grammar are looking for performance, where those that have grammars that parser in seconds, tens of seconds or even +minutes, are presumed to not care about performance. + +A particularly good (or bad) example is the MySQL grammar in the ANTLR grammar repository (apologies to the Author +if you read this note - this isn't an attack). Although I have improved its runtime performance +drastically in the Go runtime, it still takes about a minute to parse complex select statements. As it is constructed, +there are no magic answers. I will look in more detail at improvements for such parsers, such as not freeing any +memory until the parse is finished (improved 100x in experiments). + +The best advice I can give is to put some effort in to the actual grammar itself. well-formed grammars will potentially +see some huge improvements with this release. Badly formed grammars, not so much. diff --git a/doc/go-target.md b/doc/go-target.md index 4f7e64e059..f40709949a 100644 --- a/doc/go-target.md +++ b/doc/go-target.md @@ -1,57 +1,220 @@ # ANTLR4 Language Target, Runtime for Go +### Changes from ANTLR 4.12.0 + +Please see [Changes in ANTLR Go runtimes](go-changes.md), but in summary: + - The Go runtime is now stored in the repo `antlr4-go/antlr` - change your import, remove the old location from + `go.mod` and use `go get github.com/antlr4-go/antlr` + - There are some new `@actions` for adding to the generated import statements and recognizer structure + - The recognizer rules are no longer called via an interface, for performance reasons + - Memory usage improvements + - Performance improvements + - Documentation in true Go format + - Git tags now work correctly with go tools + +### Removal of non v4 code + +Prior to the release of the v4 tagged runtime, the source code for the Go runtime module existed at +`runtime/Go/antlr`, which is the pre-v4 version of the code, and also under `runtime/Go/antlr/v4`. If your project +was not using modules, you could merely sync to the latest hash in the master branch and use the code. This has changed. + +As of the current release, the source code for the Go runtime module has been moved to its own repo in its own +GitHub organization. As of now, you can still use the code without modules, but you must use the code +in the repo at https://github.com/antlr4-go/antlr instead of the code in the main ANTLR repo. + +This is for historic reasons as the code was originally written before modules were a +thing, and the go runtime source was - and the maintainer's version still is - a part of the monorepo +that is `antlr/antlr4/...`. + +Note that I am unable to properly deprecate the go.mod in the non-V4 directory, for hte same reason that I +cannot use tag the v4 module at this depth in the source tree. + +We strongly advise you to use modules, though it is not required. See below for more information. + +ANTLR Go Maintainer: [Jim Idle](https://github.com/jimidle) - Email: [jimi@idle.ws](mailto:jimi@idle.ws) + ### First steps #### 1. Install ANTLR4 -[The getting started guide](getting-started.md) should get you started. +See: [The getting started guide](getting-started.md). #### 2. Get the Go ANTLR runtime -Each target language for ANTLR has a runtime package for running parser generated by ANTLR4. The runtime provides a common set of tools for using your parser. +Each target language for ANTLR has a runtime package for running a recognizer generated by ANTLR4. +The runtime provides a common set of tools for using your parser/lexer. Note that if you have existing projects and have +yet to replace the `v1.x.x` modules with the `v4` modules, then you can skip ahead to the section *Upgrading to v4 +from earlier versions* + +The Go runtime uses modules and has a version path of `/v4` to stay in sync with the runtime versions of all the other +runtimes and the tool itself. -Get the runtime and install it on your GOPATH: +Setup is the same as any other module based project: ```bash -go get github.com/antlr/antlr4/runtime/Go/antlr +$ cd mymodproject +$ go mod init mymodproject ``` -#### 3. Set the release tag (optional) +After which, you can use go get, to get the latest release version of the ANTLR v4 runtime using: -`go get` has no native way to specify a branch or commit. So, when you run it, you'll download the latest commits. This may or may not be your preference. +```bash +go get github.com/antlr4-go/antlr +``` -You'll need to use git to set the release. For example, to set the release tag for release 4.6.0: +If your project was already using the v4 runtime from the main ANTLR repo, then you can upgrade to the latest release +by removing the `github.com/antlr/antlr4/runtime/Go/antlr/v4` reference in your module, and changing the associated +import in your project code. The following script may be useful in changing your imports: -```bash -cd $GOPATH/src/github.com/antlr/antlr4 # enter the antlr4 source directory -git checkout tags/4.6.0 # the go runtime was added in release 4.6.0 +```shell +find . -type f \ + -name '*.go' \ + -exec sed -i -e 's,github.com/antlr/antlr4/runtime/Go/antlr/v4,github.com/antlr4-go/antlr/v4,g' {} \; +``` +Note that the import package still imports with the final path as `antlr`, so only the import statement itself needs to +change. + +If you are already using the repo and import `github.com/antlr4-go/antlr/v4` then you can upgrade to the latest version +using the standard. + +```shell +go get -u github.com/antlr4-go/antlr +``` + +If you have not yet upgraded existing projects to the `/v4` module path, consult the section *Upgrading to v4 +from earlier versions* + +The ANTLR runtime has only one external transient dependency, and that is part of the go system itself: + +``` +golang.org/x/exp +``` + +A complete list of releases can be found on [the release page](https://github.com/antlr/antlr4/releases). The Go +runtime will be tagged using standard Go tags, so release 4.13.2 in the `antlr4-go/antlr` repo, will be tagged with +`v4.13.2` and go get will pick that up from the ANTLR repo. + +#### 3. Configuring `go generate` in your project + +In order to promote the use of repeatable builds, it is often useful to add the latest tool jar to your project's +repo and configure a `generate.sh` and `generate.go` file. You can of course globally alias the java command required to run the +tool. Your own CI and dev environment will guide you. + +Here is how you can configure `go generate` for your project, assuming that you follow the general recommendation to +place the ANTLR grammar files in their own package in your project structure. Here is a general template as a starting point: + +``` + . + ├── myproject + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.13.2-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing # Generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go +``` + +Make sure that the package statement in your grammar file(s) reflects the go package the go code will be generated in. +The `generate.go` file then looks like this: + +```golang + package parser + + //go:generate ./generate.sh +``` + +And the `generate.sh` file will look similar to this: + +```shell + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr-4.13.2-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 ``` -A complete list of releases can be found on [the release page](https://github.com/antlr/antlr4/releases). +From the command line at the root of your package - the location of the `go.mod` file - you can then simply issue the command: + +```shell + go generate ./... +``` -#### 4. Generate your parser +If you have not yet run a `go get`, you can now run `go mod tidy` and update your + +#### 4. Generate your parser manually You use the ANTLR4 "tool" to generate a parser. These will reference the ANTLR runtime, installed above. -Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool as described in [the getting started guide](getting-started.md). To generate your go parser, you'll need to invoke: +Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool as described in +[the getting started guide](getting-started.md). -```bash -antlr4 -Dlanguage=Go MyGrammar.g4 +To generate your go parser, you'll need to invoke: + +```shell + antlr4 -Dlanguage=Go MyGrammar.g4 ``` For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). +### Upgrading to `/v4` from the default path + +*NB: While switching to new module path would normally imply that the public interface for the runtime has changed, this is +not actually the case - you will not need to change your existing code to upgrade. The main point of the repo change is so +that git tagging works with the ANTLR Go runtime and the go tools.* + +Prior to release v4.11.0 the Go runtime shipped with a module but the module had no version path. This meant that +the tags in the ANTLR repo did not work, as any tag above `v1` must refer to a matching module path. +So the command `go get github.com/antlr/antlr4/runtime/Go/antlr` would just bring in +whatever was the `HEAD` of the master branch. While this *kind of* worked, it is obviously subject to problems and does +not fit properly with the idiomatic ways of Go. + +As of v4.13.0 the runtime code exists in its own repo, `github.com/antlr4-go/antlr`, and is correctly tagged. +However, this means you need to perform a few simple actions in order to upgrade to the `/v4` path. + + - Firstly, make sure that you are using an ANTLR tool jar with a version number of 4.13.0 or greater. + - Next you replace any mention of the old (default) path to ANTLR in your go source files. + - If using modules, remove any existing reference to the ANTLR Go runtime + - Now regenerate your grammar files either manually or using `go generate ./...` (see above) + - Consider whether you can move to using modules in your project + +A quick way to replace the original module path references is to use this script from your module's base directory: + +```shell +find . -type f \ + -name '*.go' \ + -exec sed -i -e 's,github.com/antlr/antlr4/runtime/Go/antlr,github.com/antlr4-go/antlr/v4,g' {} \; +``` + +After performing the steps above, and you are using modules issuing: + +```shell +go mod tidy +``` +Should fix up your `go.mod` file to reference only the `v4` version of the ANTLR Go runtime: + +```shell +require github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +``` + +From this point on, your go mod commands will work correctly with the ANTLR repo and upgrades and downgrades will work +as you expect. As will branch version such as @dev + ### Referencing the Go ANTLR runtime You can reference the go ANTLR runtime package like this: -```go -import "github.com/antlr/antlr4/runtime/Go/antlr" +```golang +import "github.com/antlr4-go/antlr/v4" ``` ### Complete example -Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json. +Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json placed in the parser +directory and have initialized your `go mod` file. Then, invoke `antlr4 -Dlanguage=Go JSON.g4`. The result of this is a collection of .go files in the `parser` directory including: ``` @@ -61,16 +224,18 @@ json_lexer.go json_listener.go ``` -Another common option to the ANTLR tool is `-visitor`, which generates a parse tree visitor, but we won't be doing that here. For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). +Another common option to the ANTLR tool is `-visitor`, which generates a parse tree visitor, but we won't be doing that here. +For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). -We'll write a small main func to call the generated parser/lexer (assuming they are separate). This one writes out the encountered `ParseTreeContext`'s. Suppose the gen'ed parser code is in the `parser` directory relative to this code: +We'll write a small main func to call the generated parser/lexer (assuming they are separate). This one writes out the +encountered `ParseTreeContext`'s. Assuming the generated parser code is in the `parser` directory relative to this code: -``` +```golang package main import ( - "github.com/antlr/antlr4/runtime/Go/antlr" - "./parser" + "github.com/antlr4-go/antlr/v4" + "./parser" // Note that with modules you may not be able to use a relative immport path "os" "fmt" ) @@ -93,12 +258,17 @@ func main() { stream := antlr.NewCommonTokenStream(lexer,0) p := parser.NewJSONParser(stream) p.AddErrorListener(antlr.NewDiagnosticErrorListener(true)) - p.BuildParseTrees = true tree := p.Json() antlr.ParseTreeWalkerDefault.Walk(NewTreeShapeListener(), tree) } ``` +Fix up your `go.mod` file: + +```shell +go mod tidy +``` + This one expects the input to be passed on the command line: ``` diff --git a/doc/grammars.md b/doc/grammars.md index 5f88bf20ac..97ad2659ec 100644 --- a/doc/grammars.md +++ b/doc/grammars.md @@ -98,7 +98,7 @@ Not every kind of grammar can import every other kind of grammar: * Parsers can import parsers. * Combined grammars can import parsers or lexers without modes. -ANTLR adds imported rules to the end of the rule list in a main lexer grammar. That means lexer rules in the main grammar get precedence over imported rules. For example, if a main grammar defines rule `IF : ’if’ ;` and an imported grammar defines rule `ID : [a-z]+ ;` (which also recognizes `if`), the imported `ID` won’t hide the main grammar’s `IF` token definition. +ANTLR adds imported rules to the end of the rule list in a main lexer grammar. That means lexer rules in the main grammar get precedence over imported rules. For example, if a main grammar defines rule `IF : 'if' ;` and an imported grammar defines rule `ID : [a-z]+ ;` (which also recognizes `if`), the imported `ID` won’t hide the main grammar’s `IF` token definition. ## Tokens Section diff --git a/doc/images/PR-on-dev.png b/doc/images/PR-on-dev.png new file mode 100644 index 0000000000..f0890f8e24 Binary files /dev/null and b/doc/images/PR-on-dev.png differ diff --git a/doc/images/new-antlr-branches.png b/doc/images/new-antlr-branches.png new file mode 100644 index 0000000000..85fd4706d5 Binary files /dev/null and b/doc/images/new-antlr-branches.png differ diff --git a/doc/images/python3-tests.png b/doc/images/python3-tests.png index 3f278e30e4..9d8a9c079e 100644 Binary files a/doc/images/python3-tests.png and b/doc/images/python3-tests.png differ diff --git a/doc/images/testrigs.png b/doc/images/testrigs.png index 00e05b7cef..a9fc04a822 100644 Binary files a/doc/images/testrigs.png and b/doc/images/testrigs.png differ diff --git a/doc/index.md b/doc/index.md index 2ecb5099f4..9dfc2e2766 100644 --- a/doc/index.md +++ b/doc/index.md @@ -57,10 +57,10 @@ For those using Java, here's a great [set of ANTLR in Intellij notes](https://do * [Parsing binary streams](parsing-binary-files.md) -* [Case-Insensitive Lexing](case-insensitive-lexing.md) - * [Parser and lexer interpreters](interpreters.md) +* [Writing target-agnostic grammars](target-agnostic-grammars.md) + * [Resources](resources.md) # Building / releasing ANTLR itself diff --git a/doc/java-target.md b/doc/java-target.md index 60612a9fd5..1760dccbcb 100644 --- a/doc/java-target.md +++ b/doc/java-target.md @@ -134,12 +134,12 @@ Edit the pom.xml file. Now we need to extensively modify the pom.xml file. The f org.antlr antlr4-runtime - 4.5 + 4.9.3 junit junit - 3.8.1 + 4.13.1 @@ -150,8 +150,8 @@ Edit the pom.xml file. Now we need to extensively modify the pom.xml file. The f maven-compiler-plugin 3.1 - 1.7 - 1.7 + 1.8 + 1.8 ' ; -CDATA : '' ;OPEN : '<' -> pushMode(INSIDE) ; +CDATA : '' ; +OPEN : '<' -> pushMode(INSIDE) ; ... XMLDeclOpen : ' pushMode(INSIDE) ; SPECIAL_OPEN: ' more, pushMode(PROC_INSTR) ; @@ -305,3 +308,16 @@ As of 4.5, you can also define channel names like enumerations with the followin ``` channels { WSCHANNEL, MYHIDDEN } ``` + +## Lexer Rule Options + +### caseInsensitive + +Defines if the current lexer rule is case-insensitive. +The argument can be `true` or `false`. +The option rewrites `caseInsensitive` grammar option value if it's defined. + +```g4 +options { caseInsensitive=true; } +STRING options { caseInsensitive=false; } : 'N'? '\'' (~'\'' | '\'\'')* '\''; // lower n is not allowed +``` diff --git a/doc/lexicon.md b/doc/lexicon.md index 078dc3e7ae..a8f0427606 100644 --- a/doc/lexicon.md +++ b/doc/lexicon.md @@ -26,8 +26,8 @@ The Javadoc comments are hidden from the parser and are ignored at the moment. Token names always start with a capital letter and so do lexer rules as defined by Java’s `Character.isUpperCase` method. Parser rule names always start with a lowercase letter (those that fail `Character.isUpperCase`). The initial character can be followed by uppercase and lowercase letters, digits, and underscores. Here are some sample names: ``` -ID, LPAREN, RIGHT_CURLY // token names/rules -expr, simpleDeclarator, d2, header_file // rule names +ID, LPAREN, RIGHT_CURLY // token names/lexer rules +expr, simpleDeclarator, d2, header_file // parser rule names ``` Like Java, ANTLR accepts Unicode characters in ANTLR names: @@ -79,13 +79,13 @@ These more or less correspond to `isJavaIdentifierPart` and `isJavaIdentifierSta ## Literals -ANTLR does not distinguish between character and string literals as most languages do. All literal strings one or more characters in length are enclosed in single quotes such as `’;’`, `’if’`, `’>=’`, and `’\’` (refers to the one-character string containing the single quote character). Literals never contain regular expressions. +ANTLR does not distinguish between character and string literals as most languages do. All literal strings one or more characters in length are enclosed in single quotes such as `';'`, `'if'`, `'>='`, and `'\''` (refers to the one-character string containing the single quote character). Literals never contain regular expressions. -Literals can contain Unicode escape sequences of the form `’\uXXXX’` (for Unicode code points up to `’U+FFFF’`) or `’\u{XXXXXX}’` (for all Unicode code points), where `’XXXX’` is the hexadecimal Unicode code point value. +Literals can contain Unicode escape sequences of the form `'\uXXXX'` (for Unicode code points up to `'U+FFFF'`) or `'\u{XXXXXX}'` (for all Unicode code points), where `'XXXX'` is the hexadecimal Unicode code point value. -For example, `’\u00E8’` is the French letter with a grave accent: `’è’`, and `’\u{1F4A9}’` is the famous emoji: `’💩’`. +For example, `'\u00E8'` is the French letter with a grave accent: `'è'`, and `'\u{1F4A9}'` is the famous emoji: `'💩'`. -ANTLR also understands the usual special escape sequences: `’\n’` (newline), `’\r’` (carriage return), `’\t’` (tab), `’\b’` (backspace), and `’\f’` (form feed). You can use Unicode code points directly within literals or use the Unicode escape sequences: +ANTLR also understands the usual special escape sequences: `'\n'` (newline), `'\r'` (carriage return), `'\t'` (tab), `'\b'` (backspace), and `'\f'` (form feed). You can use Unicode code points directly within literals or use the Unicode escape sequences: ``` grammar Foreign; @@ -96,7 +96,7 @@ The recognizers that ANTLR generates assume a character vocabulary containing al ## Actions -Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with thelanguage option. +Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with the language option. Embedded code can appear in: `@header` and `@members` named actions, parser and lexer rules, exception catching specifications, attribute sections for parser rules (return values, arguments, and locals), and some rule element options (currently predicates). diff --git a/doc/listeners.md b/doc/listeners.md index c3bcad9c1c..834d2ed906 100644 --- a/doc/listeners.md +++ b/doc/listeners.md @@ -19,7 +19,7 @@ public interface JavaListener extends ParseTreeListener { } ``` -where there is an enter and exit method for each rule in the parser grammar. ANTLR also generates a base listener with the fall empty implementations of all listener interface methods, in this case called JavaBaseListener. You can build your listener by subclassing this base and overriding the methods of interest. +where there is an enter and exit method for each rule in the parser grammar. ANTLR also generates a base listener with empty implementations of all listener interface methods, in this case called JavaBaseListener. You can build your listener by subclassing this base and overriding the methods of interest. Assuming you've created a listener object called `MyListener`, here is how to call the Java parser and walk the parse tree: @@ -36,3 +36,131 @@ ParseTreeWalker.DEFAULT.walk(extractor, tree); // initiate walk of tree with lis Listeners and visitors are great because they keep application-specific code out of grammars, making grammars easier to read and preventing them from getting entangled with a particular application. See the book for more information on listeners and to learn how to use visitors. (The biggest difference between the listener and visitor mechanisms is that listener methods are called independently by an ANTLR-provided walker object, whereas visitor methods must walk their children with explicit visit calls. Forgetting to invoke visitor methods on a node’s children, means those subtrees don’t get visited.) + +## Listening during the parse + +We can also use listeners to execute code during the parse instead of waiting for a tree walker walks the resulting parse tree. Let's say we have the following simple expression grammar. + +``` +grammar CalcNoLR; + +s : expr EOF ; + +expr: add ((MUL | DIV) add)* ; + +add : atom ((ADD | SUB) atom)* ; + +atom : INT ; + +INT : [0-9]+; +MUL : '*'; +DIV : '/'; +ADD : '+'; +SUB : '-'; +WS : [ \t]+ -> channel(HIDDEN); +``` + +We can create a listener that executes during the parse by implementing the listener interface as before: + + +```java +class CountListener extends CalcNoLRBaseListener { + public int nums = 0; + public boolean execExitS = false; + + @Override + public void exitS(CalcNoLRParser.SContext ctx) { + execExitS = true; + } + + @Override + public void exitAtom(CalcNoLRParser.AtomContext ctx) { + nums++; + } +} +``` + +And then passing it to `addParseListener()`: + +```java +String input = "2 + 8 / 2"; +CalcNoLRLexer lexer = new CalcNoLRLexer(new ANTLRInputStream(input)); +CalcNoLRParser parser = new CalcNoLRParser(new CommonTokenStream(lexer)); +CountListener counter = new CountListener(); +parser.addParseListener(counter); + +// Check that the purses valid first +CalcNoLRParser.SContext context = parser.s(); +String parseTreeS = context.toStringTree(parser); +assertEquals("(s (expr (add (atom 2) + (atom 8)) / (add (atom 2))) )", parseTreeS); +assertEquals(3, counter.nums); +assertEquals(true, counter.execExitS); +``` + +One should not do very complicated work during the parse because the parser is throwing exception to handle syntax errors. If you're complicated code throws different kind of exception it will screw up the parsing and things will go nuts. If you want to catch and properly handle exceptions in your listener code during the parse, you should override this method from `Parser`: + +```java +protected boolean listenerExceptionOccurred = false; + +/** + * Notify any parse listeners of an exit rule event. + * + * @see #addParseListener + */ +@override +protected void triggerExitRuleEvent() { + if ( listenerExceptionOccurred ) return; + try { + // reverse order walk of listeners + for (int i = _parseListeners.size() - 1; i >= 0; i--) { + ParseTreeListener listener = _parseListeners.get(i); + _ctx.exitRule(listener); + listener.exitEveryRule(_ctx); + } + } + catch (Throwable e) { + // If an exception is thrown in the user's listener code, we need to bail out + // completely out of the parser, without executing anymore user code. We + // must also stop the parse otherwise other listener actions will attempt to execute + // almost certainly with invalid results. So, record the fact an exception occurred + listenerExceptionOccurred = true; + throw e; + } +} +``` + +Now, if you throw an exception inside one of the listener methods: + +```java +// Now throw an exception in the listener +class ErrorListener extends CalcNoLRBaseListener { + public boolean execExitS = false; + public boolean execExitAtom = false; + + @Override + public void exitS(CalcNoLRParser.SContext ctx) { + execExitS = true; + } + + @Override + public void exitAtom(CalcNoLRParser.AtomContext ctx) { + execExitAtom = true; + throw new NullPointerException("bail out"); + } +} +``` + +then the exception will properly cause the parser to bailout and the exception will not be thrown out: + +``` +java.lang.NullPointerException: bail out + + at org.antlr.v4.test.runtime.java.api.TestParseListener$2ErrorListener.exitAtom(TestParseListener.java:102) + at org.antlr.v4.test.runtime.java.api.CalcNoLRParser$AtomContext.exitRule(CalcNoLRParser.java:311) + at org.antlr.v4.runtime.Parser.triggerExitRuleEvent(Parser.java:412) + at org.antlr.v4.runtime.Parser.exitRule(Parser.java:654) + at org.antlr.v4.test.runtime.java.api.CalcNoLRParser.atom(CalcNoLRParser.java:336) + at org.antlr.v4.test.runtime.java.api.CalcNoLRParser.add(CalcNoLRParser.java:261) + at org.antlr.v4.test.runtime.java.api.CalcNoLRParser.expr(CalcNoLRParser.java:181) + at org.antlr.v4.test.runtime.java.api.CalcNoLRParser.s(CalcNoLRParser.java:123) +``` diff --git a/doc/options.md b/doc/options.md index 7ce277551e..f5faa7ec37 100644 --- a/doc/options.md +++ b/doc/options.md @@ -12,7 +12,10 @@ where a value can be an identifier, a qualified identifier (for example, a.b.c), All grammars can use the following options. In combined grammars, all options except language pertain only to the generated parser. Options may be set either within the grammar file using the options syntax (described above) or when invoking ANTLR on the command line, using the `-D` option. (see Section 15.9, [ANTLR Tool Command Line Options](tool-options.md).) The following examples demonstrate both mechanisms; note that `-D` overrides options within the grammar. -* `superClass`. Set the superclass of the generated parser or lexer. For combined grammars, it sets the superclass of the parser. +### `superClass` + +Set the superclass of the generated parser or lexer. For combined grammars, it sets the superclass of the parser. + ``` $ cat Hi.g4 grammar Hi; @@ -23,12 +26,20 @@ public class HiParser extends XX { $ grep 'public class' HiLexer.java public class HiLexer extends Lexer { ``` -* `language` Generate code in the indicated language, if ANTLR is able to do so. Otherwise, you will see an error message like this: + +### `language` + +Generate code in the indicated language, if ANTLR is able to do so. Otherwise, you will see an error message like this: + ``` $ antlr4 -Dlanguage=C MyGrammar.g4 error(31): ANTLR cannot generate C code as of version 4.0 ``` -* `tokenVocab` ANTLR assigns token type numbers to the tokens as it encounters them in a file. To use different token type values, such as with a separate lexer, use this option to have ANTLR pull in the tokens file. ANTLR generates a tokens file from each grammar. + +### `tokenVocab` + +ANTLR assigns token type numbers to the tokens as it encounters them in a file. To use different token type values, such as with a separate lexer, use this option to have ANTLR pull in the tokens file. ANTLR generates a tokens file from each grammar. + ``` $ cat SomeLexer.g4 lexer grammar SomeLexer; @@ -48,7 +59,11 @@ B=3 C=4 ID=1 ``` -* `TokenLabelType` ANTLR normally uses type Token when it generates variables referencing tokens. If you have passed a TokenFactory to your parser and lexer so that they create custom tokens, you should set this option to your specific type. This ensures that the context objects know your type for fields and method return values. + +### `TokenLabelType` + +ANTLR normally uses type Token when it generates variables referencing tokens. If you have passed a TokenFactory to your parser and lexer so that they create custom tokens, you should set this option to your specific type. This ensures that the context objects know your type for fields and method return values. + ``` $ cat T2.g4 grammar T2; @@ -58,20 +73,46 @@ $ antlr4 T2.g4 $ grep MyToken T2Parser.java public MyToken x; ``` -* `contextSuperClass`. Specify the super class of parse tree internal nodes. Default is `ParserRuleContext`. Should derive from ultimately `RuleContext` at minimum. -Java target can use `contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum` for convenience. It adds a backing field for `altNumber`, the alt matched for the associated rule node. -## Rule Options +### `contextSuperClass` -There are currently no valid rule-level options, but the tool still supports the following syntax for future use: +Specify the super class of parse tree internal nodes. Default is `ParserRuleContext`. Should derive from ultimately `RuleContext` at minimum. +Java target can use `contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum` for convenience. It adds a backing field for `altNumber`, the alt matched for the associated rule node. +### `caseInsensitive` + +As of 4.10, ANTLR supports case-insensitive lexers using a grammar option. For example, the parser from the following grammar: + +```g4 +lexer grammar L; +options { caseInsensitive = true; } +ENGLISH_TOKEN: [a-z]+; +GERMAN_TOKEN: [äéöüß]+; +FRENCH_TOKEN: [àâæ-ëîïôœùûüÿ]+; +CROATIAN_TOKEN: [ćčđšž]+; +ITALIAN_TOKEN: [àèéìòù]+; +SPANISH_TOKEN: [áéíñóúü¡¿]+; +GREEK_TOKEN: [α-ω]+; +RUSSIAN_TOKEN: [а-я]+; +WS: [ ]+ -> skip; ``` -rulename -options {...} - : ... - ; + +matches words such as the following: + +``` +abcXYZ äéöüßÄÉÖÜß àâæçÙÛÜŸ ćčđĐŠŽ àèéÌÒÙ áéÚÜ¡¿ αβγΧΨΩ абвЭЮЯ ``` +ANTLR considers only one-length chars in all cases. For instance, german lower `ß` is not treated as upper `ss` and vice versa. + +The mechanism works by automatically transforming grammar references to characters to there upper/lower case equivalent; e.g., `a` to `[aA]`. This means that you do not need to convert your input characters to uppercase--token text will be as it appears in the input stream. + +## Rule Options + +### caseInsensitive + +The tool support `caseInsensitive` lexer rule option that is described in [lexer-rules.md](lexer-rules.md#caseinsensitive). + ## Rule Element Options Token options have the form `T` as we saw in Section 5.4, [Dealing with Precedence, Left Recursion, and Associativity](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only token option is `assoc`, and it accepts values `left` and `right`. Here’s a sample grammar with a left-recursive expression rule that specifies a token option on the `^` exponent operator token: diff --git a/doc/parser-rules.md b/doc/parser-rules.md index 73c363b990..e1a32fe736 100644 --- a/doc/parser-rules.md +++ b/doc/parser-rules.md @@ -196,7 +196,7 @@ ANTLR generates a field holding the list of context objects: ## Rule Elements -Rule elements specify what the parser should do at a given moment just like statements in a programming language. The elements can be rule, token, string literal like expression, ID, and ’return’. Here’s a complete list of the rule elements (we’ll look at actions and predicates in more detail later): +Rule elements specify what the parser should do at a given moment just like statements in a programming language. The elements can be rule, token, string literal like expression, ID, and 'return'. Here’s a complete list of the rule elements (we’ll look at actions and predicates in more detail later): @@ -207,7 +207,7 @@ Rule elements specify what the parser should do at a given moment just like stat Match token T at the current input position. Tokens always begin with a capital letter. - @@ -232,7 +232,7 @@ Match any single token except for the end of file token. The “dot” operator
’literal’ +'literal' Match the string literal at the current input position. A string literal is simply a token with a fixed string.
-When you want to match everything but a particular token or set of tokens, use the `~` “not” operator. This operator is rarely used in the parser but is available. `~INT` matches any token except the `INT` token. `~’,’` matches any token except the comma. `~(INT|ID)` matches any token except an INT or an ID. +When you want to match everything but a particular token or set of tokens, use the `~` “not” operator. This operator is rarely used in the parser but is available. `~INT` matches any token except the `INT` token. `~','` matches any token except the comma. `~(INT|ID)` matches any token except an INT or an ID. Token, string literal, and semantic predicate rule elements can take options. See Rule Element Options. @@ -486,4 +486,4 @@ Invalid input would cause `config` to return immediately without matching any in ``` file : element* EOF; // don't stop early. must match all input -``` \ No newline at end of file +``` diff --git a/doc/php-target.md b/doc/php-target.md index 75eae465ce..a87410fff9 100644 --- a/doc/php-target.md +++ b/doc/php-target.md @@ -15,7 +15,7 @@ generated by ANTLR4. The runtime provides a common set of tools for using your p Install the runtime with Composer: ```bash -composer install antlr/antlr4 +composer require antlr/antlr4-php-runtime ``` #### 3. Generate your parser @@ -84,7 +84,6 @@ $lexer = new JSONLexer($input); $tokens = new CommonTokenStream($lexer); $parser = new JSONParser($tokens); $parser->addErrorListener(new DiagnosticErrorListener()); -$parser->setBuildParseTree(true); $tree = $parser->json(); ParseTreeWalker::default()->walk(new TreeShapeListener(), $tree); @@ -108,4 +107,4 @@ The expected output is: {"a":1} "a":1 1 -``` \ No newline at end of file +``` diff --git a/doc/predicates.md b/doc/predicates.md index 09998c4257..e04d2464b0 100644 --- a/doc/predicates.md +++ b/doc/predicates.md @@ -136,7 +136,7 @@ If, on the other hand, the next character after input `enum` is a letter, then o Predicates come into play by pruning the set of viable lexer rules. When the lexer encounters a false predicate, it deactivates that rule just like parsers deactivate alternatives with false predicates. -Like parser predicates, lexer predicates can't depend on side effects from lexer actions. That's because actions can only execute after the lexer positively identifies the rule to match. Since predicates are part of the rule selection process, they can't rely on action side effects. Lexer actions must appear after predicates in lexer rules. As an example, here's another way to match enum as a keyword in the lexer: +Like parser predicates, lexer predicates can't depend on side effects from lexer actions. That said, the predicate can depend on a side effect of an action that occured during the recognition of the previous token. That's because actions can only execute after the lexer positively identifies the rule to match. Since predicates are part of the rule selection process, they can't rely on action side effects created by actions in currently-prospective rules. Lexer actions must appear after predicates in lexer rules. As an example, here's another way to match enum as a keyword in the lexer: ``` ENUM: [a-z]+ {getText().equals("enum")}? @@ -162,3 +162,17 @@ That works great, but it's really just for instructional purposes. It's easier t ``` ENUM : 'enum' ; ``` + +Here's another example of a predicate. It's important to note that the predicate is evaluated before the action because actions are only executed if the lexer rule matches. The actions are not executed in line; they are collected and executed en mass later. + +``` +INDENT : [ \t]+ {System.out.println("INDENT")>} {this.getCharPositionInLine()==0}? ; +``` + +For more information on how actions and predicates operate in the lexer, see [Lexer actions and semantic predicates are executed out of order](https://github.com/antlr/antlr4/issues/3611) and [Lexer.getCharIndex() return value not behaving as expected](https://github.com/antlr/antlr4/issues/3606). The lexer rule that will not work as expected is: + +``` +Stuff : ( 'a'+ {count++;} | 'b') 'c' 'd' {count == 3}? ; +``` + +The `count++` code we'll not execute until after `Stuff` has been recognized (assuming count!=3). \ No newline at end of file diff --git a/doc/python-target.md b/doc/python-target.md index 7ed73c281e..1ef34e74d5 100644 --- a/doc/python-target.md +++ b/doc/python-target.md @@ -1,129 +1,262 @@ -# Python (2 and 3) +# Python 3 -The examples from the ANTLR 4 book converted to Python are [here](https://github.com/jszheng/py3antlr4book). +## Requirements -There are 2 Python targets: `Python2` and `Python3`. This is because there is only limited compatibility between those 2 versions of the language. Please refer to the [Python documentation](https://wiki.python.org/moin/Python2orPython3) for full details. - -How to create a Python lexer or parser? -This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example: - -``` -$ antlr4 -Dlanguage=Python2 MyGrammar.g4 -``` - -or - -``` -$ antlr4 -Dlanguage=Python3 MyGrammar.g4 -``` - -For a full list of antlr4 tool options, please visit the tool documentation page. - -## Where can I get the runtime? - -Once you've generated the lexer and/or parser code, you need to download the runtime. The Python runtimes are available from PyPI: - -* https://pypi.python.org/pypi/antlr4-python2-runtime/ -* https://pypi.python.org/pypi/antlr4-python3-runtime/ - -The runtimes are provided in the form of source code, so no additional installation is required. - -We will not document here how to refer to the runtime from your Python project, since this would differ a lot depending on your project type and IDE. - -## How do I run the generated lexer and/or parser? - -Let's suppose that your grammar is named, as above, "MyGrammar". Let's suppose this parser comprises a rule named "startRule". The tool will have generated for you the following files: - -* MyGrammarLexer.py -* MyGrammarParser.py -* MyGrammarListener.py (if you have not activated the -no-listener option) -* MyGrammarVisitor.py (if you have activated the -visitor option) - -(Developers used to Java/C# AntLR will notice that there is no base listener or visitor generated, this is because Python having no support for interfaces, the generated listener and visitor are fully fledged classes) - -Now a fully functioning script might look like the following: - -```python -import sys -from antlr4 import * -from MyGrammarLexer import MyGrammarLexer -from MyGrammarParser import MyGrammarParser - -def main(argv): - input_stream = FileStream(argv[1]) - lexer = MyGrammarLexer(input_stream) - stream = CommonTokenStream(lexer) - parser = MyGrammarParser(stream) - tree = parser.startRule() - -if __name__ == '__main__': - main(sys.argv) -``` - -This program will work. But it won't be useful unless you do one of the following: - -* you visit the parse tree using a custom listener -* you visit the parse tree using a custom visitor -* your grammar comprises production code (like ANTLR3) - -(please note that production code is target specific, so you can't have multi target grammars that include production code, except for very limited use cases, see below) - -## How do I create and run a custom listener? - -Let's suppose your MyGrammar grammar comprises 2 rules: "key" and "value". The antlr4 tool will have generated the following listener: - -```python -class MyGrammarListener(ParseTreeListener): - def enterKey(self, ctx): - pass - def exitKey(self, ctx): - pass - def enterValue(self, ctx): - pass - def exitValue(self, ctx): - pass -``` - -In order to provide custom behavior, you might want to create the following class: - -```python -class KeyPrinter(MyGrammarListener): - def exitKey(self, ctx): - print("Oh, a key!") -``` - -In order to execute this listener, you would simply add the following lines to the above code: - -``` - ... - tree = parser.startRule() - only repeated here for reference - printer = KeyPrinter() - walker = ParseTreeWalker() - walker.walk(printer, tree) -``` - -Further information can be found from the ANTLR 4 definitive guide. +You will need to install Python and Pip, version 3.6 or better. +See https://www.python.org/downloads/ +and https://www.geeksforgeeks.org/how-to-install-pip-on-windows/. + +## A simple example targeting Python3 + +An example of a parser for the Python3 target consists of the following files. +* An Antlr4 grammar, e.g., Expr.g4: + ```antlr + grammar Expr; + start_ : expr (';' expr)* EOF; + expr : atom | ('+' | '-') expr | expr '**' expr | expr ('*' | '/') expr | expr ('+' | '-') expr | '(' expr ')' | atom ; + atom : INT ; + INT : [0-9]+ ; + WS : [ \t\n\r]+ -> skip ; + ``` +* Driver.py: +The driver code opens a file, creates a lexer, token stream, +and parser, then calls the parser. + ```python + import sys + from antlr4 import * + from ExprLexer import ExprLexer + from ExprParser import ExprParser + from VisitorInterp import VisitorInterp + + def main(argv): + input_stream = FileStream(argv[1]) + lexer = ExprLexer(input_stream) + stream = CommonTokenStream(lexer) + parser = ExprParser(stream) + tree = parser.start_() + + if __name__ == '__main__': + main(sys.argv) + ``` +* requirements.txt: +This file contains a list of the +required packages for the program. Required +packages are downloaded by `pip`. The file +must include a reference to the Antlr Python3 runtime. + ``` + antlr4-python3-runtime==4.13.0 + ``` +* A build script, e.g., build.sh: +You should provide a script that builds the program. + ``` + pip install -r requirements.txt + antlr4 -Dlanguage=Python3 Expr.g4 + ``` +_It is vital that the versions for the +Antlr tool used to generate the parser +and the Antlr Python3 runtime match. +E.g., 4.13.0. Using build files will help +eliminate common errors from happening._ + +_For a list of antlr4 tool options, please visit [ANTLR Tool Command Line Options](https://github.com/antlr/antlr4/blob/master/doc/tool-options.md)._ +* Input, e.g., input.txt: + ``` + -(1 + 2)/3; + 1; + 2+3; + 8*9 + ``` +* A run script, which runs your program. + ``` + python Driver.py input.txt + ``` + +## Parse tree traversal + +Tree traversal is used to implement +[static](https://en.wikipedia.org/wiki/Static_program_analysis) or [dynamic](https://en.wikipedia.org/wiki/Dynamic_program_analysis) +program analysis. +Antlr generates two types of tree traversals: visitors and listeners. + +Understanding when to choose a visitor versus a listener is a good idea. +For further information, see https://tomassetti.me/listeners-and-visitors/. + +A visitor is the best choice when computing only a single [synthesized attribute](https://en.wikipedia.org/wiki/Attribute_grammar#Synthesized_attributes) +or when you want to control the order of parse tree nodes visited. +Alternatively, a listener is the best choice when computing both synthesized +and [inherited attributes](https://en.wikipedia.org/wiki/Attribute_grammar#Inherited_attributes). + +In many situations, they are interchangeable. + +### Visitors + +Antlr visitors generally implement a post-order tree walk. If you write +`visit...` methods, the method must contain code to visit the children +in the order you want. For a post-order tree walk, visit the children first. + +To implement a visitor, add the `-visitor` option to the `antlr4` command. +Create a class that inherits from the generated visitor, +then add `visit` methods that implement the analysis. Your driver code +should call the `visit()` method for the root of the parse tree. + +For example, the following code implements an expression evaluator for the Expr.g4 grammar using a visitor. -The Python implementation of ANTLR is as close as possible to the Java one, so you shouldn't find it difficult to adapt the examples for Python. +* Driver.py: + ```python + import sys + from antlr4 import * + from ExprLexer import ExprLexer + from ExprParser import ExprParser + from VisitorInterp import VisitorInterp -## Target agnostic grammars + def main(argv): + input_stream = FileStream(argv[1]) + lexer = ExprLexer(input_stream) + stream = CommonTokenStream(lexer) + parser = ExprParser(stream) + tree = parser.start_() + if parser.getNumberOfSyntaxErrors() > 0: + print("syntax errors") + else: + vinterp = VisitorInterp() + vinterp.visit(tree) -If your grammar is targeted to Python only, you may ignore the following. But if your goal is to get your Java parser to also run in Python, then you might find it useful. + if __name__ == '__main__': + main(sys.argv) + ``` +* VisitorInterp.py: + ```python + import sys + from antlr4 import * + from ExprParser import ExprParser + from ExprVisitor import ExprVisitor -1. Do not embed production code inside your grammar. This is not portable and will not be. Move all your code to listeners or visitors. -1. The only production code absolutely required to sit with the grammar should be semantic predicates, like: -``` -ID {$text.equals("test")}? -``` + class VisitorInterp(ExprVisitor): + def visitAtom(self, ctx:ExprParser.AtomContext): + return int(ctx.getText()) -Unfortunately, this is not portable, but you can work around it. The trick involves: + def visitExpr(self, ctx:ExprParser.ExprContext): + if ctx.getChildCount() == 3: + if ctx.getChild(0).getText() == "(": + return self.visit(ctx.getChild(1)) + op = ctx.getChild(1).getText() + v1 = self.visit(ctx.getChild(0)) + v2 = self.visit(ctx.getChild(2)) + if op == "+": + return v1 + v2 + if op == "-": + return v1 - v2 + if op == "*": + return v1 * v2 + if op == "/": + return v1 / v2 + return 0 + if ctx.getChildCount() == 2: + opc = ctx.getChild(0).getText() + if opc == "+": + return self.visit(ctx.getChild(1)) + if opc == "-": + return - self.visit(ctx.getChild(1)) + return 0 + if ctx.getChildCount() == 1: + return self.visit(ctx.getChild(0)) + return 0 -* deriving your parser from a parser you provide, such as BaseParser -* implementing utility methods in this BaseParser, such as "isEqualText" -* adding a "self" field to the Java/C# BaseParser, and initialize it with "this" + def visitStart_(self, ctx:ExprParser.Start_Context): + for i in range(0, ctx.getChildCount(), 2): + print(self.visit(ctx.getChild(i))) + return 0 + ``` -Thanks to the above, you should be able to rewrite the above semantic predicate as follows: +### Listeners + +Antlr listeners perform an LR tree traversal. `enter` and `exit` methods are +called during the tranversal. A parse tree node is visited twice, first for +the `enter` method, then the `exit` method after all children have been walked. + +To implement a listener, add the `-listener` option to the `antlr4` command. +Add a class that inherits from the generated listener +with code that implements the analysis. + +The following example implements an expression evaluator using a listener. + +* Driver.py: + ```python + import sys + from antlr4 import * + from ExprLexer import ExprLexer + from ExprParser import ExprParser + from ListenerInterp import ListenerInterp + + def main(argv): + input_stream = FileStream(argv[1]) + lexer = ExprLexer(input_stream) + stream = CommonTokenStream(lexer) + parser = ExprParser(stream) + tree = parser.start_() + if parser.getNumberOfSyntaxErrors() > 0: + print("syntax errors") + else: + linterp = ListenerInterp() + walker = ParseTreeWalker() + walker.walk(linterp, tree) + + if __name__ == '__main__': + main(sys.argv) + ``` + * ListenerInterp.py: + ```python + import sys + from antlr4 import * + from ExprParser import ExprParser + from ExprListener import ExprListener + + class ListenerInterp(ExprListener): + def __init__(self): + self.result = {} + + def exitAtom(self, ctx:ExprParser.AtomContext): + self.result[ctx] = int(ctx.getText()) + + def exitExpr(self, ctx:ExprParser.ExprContext): + if ctx.getChildCount() == 3: + if ctx.getChild(0).getText() == "(": + self.result[ctx] = self.result[ctx.getChild(1)] + else: + opc = ctx.getChild(1).getText() + v1 = self.result[ctx.getChild(0)] + v2 = self.result[ctx.getChild(2)] + if opc == "+": + self.result[ctx] = v1 + v2 + elif opc == "-": + self.result[ctx] = v1 - v2 + elif opc == "*": + self.result[ctx] = v1 * v2 + elif opc == "/": + self.result[ctx] = v1 / v2 + else: + ctx.result[ctx] = 0 + elif ctx.getChildCount() == 2: + opc = ctx.getChild(0).getText() + if opc == "+": + v = self.result[ctx.getChild(1)] + self.result[ctx] = v + elif opc == "-": + v = self.result[ctx.getChild(1)] + self.result[ctx] = - v + elif ctx.getChildCount() == 1: + self.result[ctx] = self.result[ctx.getChild(0)] + + def exitStart_(self, ctx:ExprParser.Start_Context): + for i in range(0, ctx.getChildCount(), 2): + print(self.result[ctx.getChild(i)]) + ``` + +Further information can be found from the ANTLR 4 definitive guide. + +## Examples + +The examples from the ANTLR 4 book converted to Python are [here](https://github.com/jszheng/py3antlr4book). -``` -ID {$self.isEqualText($text,"test")}? -``` +There are many examples of grammars that target the Python3 target in the +[grammars-v4 Github repository](https://github.com/antlr/grammars-v4). diff --git a/doc/releasing-antlr.md b/doc/releasing-antlr.md index cd1d0b9b47..6c1ffc516d 100644 --- a/doc/releasing-antlr.md +++ b/doc/releasing-antlr.md @@ -2,90 +2,112 @@ ## Github -Create a pre-release or full release at github; [Example 4.5-rc-1](https://github.com/antlr/antlr4/releases/tag/4.5-rc-1). +### Get dev merged into master + +Do this or make a PR: + +```bash +cd ~/antlr/code/antlr4 +git checkout master +git merge dev +``` + +### Turn on DCO Enforcement + +As of 4.10.1, we will be using the Linux DCO not the previous contributors license agreement that required signing the file. Now, we use the DCO and contributors must use `-s` on each commit to the branch associated with a pull request. + +See [GitHub App DCO](https://github.com/apps/dco). + +Make sure this feature is turned on for the `antlr4` repo upon release. ### Delete existing release tag Wack any existing tag as mvn will create one and it fails if already there. ``` -$ git tag -d 4.8 -$ git push origin :refs/tags/4.8 -$ git push upstream :refs/tags/4.8 +$ git tag -d 4.13.2 +$ git push origin :refs/tags/4.13.2 +$ git push upstream :refs/tags/4.13.2 ``` -### Create release candidate tag +### Go release tags -```bash -$ git tag -a 4.8-rc1 -m 'heading towards 4.8' -$ git push origin 4.8-rc1 -$ git push upstream 4.8-rc1 +*I don't think this is necessary anymore as we have moved it release branch to https://github.com/antlr4-go/antlr* + +It seems that [Go needs a `v` in the release git tag](https://go.dev/ref/mod#glos-version) so make sure that we double up with 4.13.2 and v4.13.2. + +``` +$ git tag -a runtime/Go/antlr/v4/v4.13.2 -m "Go runtime module only" +$ git push upstream runtime/Go/antlr/v4/v4.13.2 +$ git push origin runtime/Go/antlr/v4/v4.13.2 ``` -## Update submodules -Make sure you tell git to pull in the submodule (for every clone you do of antlr4): +## Bump version in code and other files -```bash -git submodule init -``` +There are a number of files that require inversion number be updated. -Also bump version to 4.8 in `runtime/PHP/src/RuntimeMetaData.php`. -Update the runtime submodules by running the following command: +Here is a simple script to display any line from the critical files with, say, `4.11.1` in it. Here's an example run of the script: ```bash -git submodule update --recursive -git submodule update --remote --merge # might only need this last one but do both +~/antlr/code/antlr4 $ python scripts/update_antlr_version.py 4.13.1 4.13.2 +Updating ANTLR version from 4.13.1 to 4.13.2 +Set ANTLR repo root (default ~/antlr/code/antlr4): +Perform antlr4 `mvn clean` and wipe build dirs Y/N? (default no): +Ok, not cleaning antlr4 dir +4.13.1 appears on 2 lines so _not_ updating /tmp/antlr4/runtime/JavaScript/package-lock.json +4.13.1 not in /tmp/antlr4/doc/releasing-antlr.md ``` -Make sure these changes go back to antlr4 repo: +Make sure this file doesn't have `-SNAPSHOT` when releasing! -```bash -git add runtime/PHP -git commit -m "Update PHP Runtime to latest version" -``` - -## Bump version - -Edit the repository looking for 4.5 or whatever and update it. Bump version in the following files: - - * runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java - * runtime/Python2/setup.py - * runtime/Python2/src/antlr4/Recognizer.py - * runtime/Python3/setup.py - * runtime/Python3/src/antlr4/Recognizer.py - * runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs - * runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj - * runtime/JavaScript/package.json - * runtime/JavaScript/src/antlr4/Recognizer.js - * runtime/Cpp/VERSION - * runtime/Cpp/runtime/src/RuntimeMetaData.cpp - * runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake - * runtime/Cpp/demo/generate.cmd - * runtime/Go/antlr/recognizer.go - * runtime/Swift/Antlr4/org/antlr/v4/runtime/RuntimeMetaData.swift - * tool/src/org/antlr/v4/codegen/target/GoTarget.java - * tool/src/org/antlr/v4/codegen/target/CppTarget.java - * tool/src/org/antlr/v4/codegen/target/CSharpTarget.java - * tool/src/org/antlr/v4/codegen/target/JavaScriptTarget.java - * tool/src/org/antlr/v4/codegen/target/Python2Target.java - * tool/src/org/antlr/v4/codegen/target/Python3Target.java - * tool/src/org/antlr/v4/codegen/target/SwiftTarget.java - * tool/src/org/antlr/v4/codegen/Target.java - * tool/resources/org/antlr/v4/tool/templates/codegen/Swift/Swift.stg - -Here is a simple script to display any line from the critical files with, say, `4.5` in it: +``` +runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java +``` + +It's also worth doing a quick check to see if you find any other references to a version: ```bash -find tool runtime -type f -exec grep -l '4\.6' {} \; +mvn clean +find . -type f -exec grep -l '4\.12.0' {} \; | grep -v -E '\.o|\.a|\.jar|\.dylib|node_modules/|\.class|tests/|CHANGELOG|\.zip|\.gz|.iml|.svg' ``` Commit to repository. -## Building +### PHP runtime + +We only have to copy the PHP runtime into the ANTLR repository to run the unittests. But, we still need to bump the version to 4.13.2 in `~/antlr/code/antlr-php-runtime/src/RuntimeMetaData.php` in the separate repository, commit, and push. -ugh. apparently you have to `mvn install` and then `mvn compile` or some such or subdir pom.xml's won't see the latest runtime build. +``` +cd ~/antlr/code/antlr-php-runtime/src +git checkout dev # Should be the default +git pull origin dev +... vi RuntimeMetaData.php ... +git commit -a -m "Update PHP Runtime to latest version" +git push origin dev +git checkout master +git pull origin master +git merge dev +git push origin master +``` + +## Build XPath parsers + +This section addresses a [circular dependency regarding XPath](https://github.com/antlr/antlr4/issues/3600). In the java target I avoided a circular dependency (gen 4.13.2 parser for XPath using 4.13.2 which needs it to build) by hand building the parser: runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPath.java. Probably we won't have to rerun this for the patch releases, just major ones that alter the ATN serialization. + +```bash +cd ~/antlr/code/antlr4/runtime/Cpp/runtime/src/tree/xpath +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.13.2-SNAPSHOT/antlr4-4.13.2-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Cpp XPathLexer.g4 + +cd ~/antlr/code/antlr4/runtime/CSharp/src/Tree/Xpath +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.13.2-SNAPSHOT/antlr4-4.13.2-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=CSharp XPathLexer.g4 + +cd ~/antlr/code/antlr4/runtime/Python3/tests/expr +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.13.2-SNAPSHOT/antlr4-4.13.2-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 Expr.g4 +cd ~/antlr/code/antlr4/runtime/Python3/src/antlr4/xpath +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.13.2-SNAPSHOT/antlr4-4.13.2-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 XPathLexer.g4 +``` ## Maven Repository Settings @@ -134,23 +156,14 @@ Here is the file template ## Maven deploy snapshot -The goal is to get a snapshot, such as `4.8-SNAPSHOT`, to the staging server: [antlr4 tool](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4) and [antlr4 java runtime](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-runtime). +The goal is to get a snapshot, such as `4.13.2-SNAPSHOT`, to the staging server: [antlr4 tool](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4/4.13.2-SNAPSHOT/) and [antlr4 java runtime](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-runtime/4.13.2-SNAPSHOT/). Do this: ```bash +$ mvn install -DskipTests # seems required to get the jar files visible to maven $ mvn deploy -DskipTests ... -[INFO] --- maven-deploy-plugin:2.7:deploy (default-deploy) @ antlr4-tool-testsuite --- -Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/maven-metadata.xml -Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.jar -Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.jar (3 KB at 3.4 KB/sec) -Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.pom -Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.pom (3 KB at 6.5 KB/sec) -Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml -Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml (371 B at 1.4 KB/sec) -Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/maven-metadata.xml -Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/maven-metadata.xml (774 B at 1.8 KB/sec) Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml (388 B at 0.9 KB/sec) [INFO] ------------------------------------------------------------------------ @@ -162,7 +175,7 @@ Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antl [INFO] ANTLR 4 Maven plugin ............................... SUCCESS [ 6.547 s] [INFO] ANTLR 4 Runtime Test Annotations ................... SUCCESS [ 2.519 s] [INFO] ANTLR 4 Runtime Test Processors .................... SUCCESS [ 2.385 s] -[INFO] ANTLR 4 Runtime Tests (2nd generation) ............. SUCCESS [ 15.276 s] +[INFO] ANTLR 4 Runtime Tests (4th generation) ............. SUCCESS [ 15.276 s] [INFO] ANTLR 4 Tool Tests ................................. SUCCESS [ 2.233 s] [INFO] ------------------------------------------------------------------------ [INFO] BUILD SUCCESS @@ -178,60 +191,54 @@ Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antl The maven deploy lifecycle phased deploys the artifacts and the poms for the ANTLR project to the [sonatype remote staging server](https://oss.sonatype.org/content/repositories/snapshots/). ```bash -export JAVA_HOME=`/usr/libexec/java_home -v 1.7`; mvn deploy -DskipTests +mvn deploy -DskipTests ``` -With JDK 1.7 (not 6 or 8), do this: +Make sure `gpg` is installed (`brew install gpg` on mac). Also must [create a key and publish it](https://blog.sonatype.com/2010/01/how-to-generate-pgp-signatures-with-maven/) then update `.m2/settings` to use that public key. + +Then: ```bash -export JAVA_HOME=`/usr/libexec/java_home -v 1.7`; mvn release:prepare -Darguments="-DskipTests" +mvn release:prepare -Darguments="-DskipTests" ``` -Hm...per https://github.com/keybase/keybase-issues/issues/1712 we need this to make gpg work: +Hmm...per https://github.com/keybase/keybase-issues/issues/1712 we need this to make gpg work: ```bash export GPG_TTY=$(tty) ``` -Side note to set jdk 1.7 on os x: +You should see 0x37 in generated .class files after 0xCAFEBABE; see [Java SE 11 = 55 (0x37 hex)](https://en.wikipedia.org/wiki/Java_class_file): ```bash -alias java='/Library/Java/JavaVirtualMachines/jdk1.7.0_21.jdk/Contents/Home/bin/java' -alias javac='/Library/Java/JavaVirtualMachines/jdk1.7.0_21.jdk/Contents/Home/bin/javac' -alias javadoc='/Library/Java/JavaVirtualMachines/jdk1.7.0_21.jdk/Contents/Home/bin/javadoc' -alias jar='/Library/Java/JavaVirtualMachines/jdk1.7.0_21.jdk/Contents/Home/bin/jar' -export JAVA_HOME=`/usr/libexec/java_home -v 1.7` -``` - -But I think just this on front of mvn works: - -``` -export JAVA_HOME=`/usr/libexec/java_home -v 1.7`; mvn ... +~/antlr/code/antlr4 $ od -h tool/target/classes/org/antlr/v4/Tool.class |head -1 +0000000 feca beba 0000 3700 ed04 0207 0a9d 0100 + ^^ ``` -You should see 0x33 in generated .class files after 0xCAFEBABE; see [Java SE 7 = 51 (0x33 hex)](https://en.wikipedia.org/wiki/Java_class_file): +Also verify run time is 1.8: ```bash -beast:/tmp/org/antlr/v4 $ od -h Tool.class |head -1 -0000000 feca beba 0000 3300 fa04 0207 0ab8 0100 +od -h runtime/Java/target/classes/org/antlr/v4/runtime/Token.class | head -1 +0000000 feca beba 0000 3400 2500 0007 0722 2300 ``` It will start out by asking you the version number: ``` ... -What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.8: : 4.8 -What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.8: : -What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.8: : -What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.8: : -What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.8: : -What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.8: : -What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.8: : 4.8 -What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.8.1-SNAPSHOT: +What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.13.2: : 4.13.2 +What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.13.2: : +What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.13.2: : +What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.13.2: : +What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.13.2: : +What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.13.2: : +What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.13.2: : 4.13.2 +What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.13.3-SNAPSHOT: ... ``` -Maven will go through your pom.xml files to update versions from 4.8-SNAPSHOT to 4.8 for release and then to 4.8.1-SNAPSHOT after release, which is done with: +Maven will go through your pom.xml files to update versions from 4.13.2-SNAPSHOT to 4.13.2 for release and then to 4.13.2-SNAPSHOT after release, which is done with: ```bash mvn release:perform -Darguments="-DskipTests" @@ -245,97 +252,55 @@ Now, go here: and on the left click "Staging Repositories". You click the staging repo and close it, then you refresh, click it and release it. It's done when you see it here: -    [https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.8-1/antlr4-runtime-4.8-1.jar](https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.8-1/antlr4-runtime-4.8-1.jar) - -All releases should be here: https://repo1.maven.org/maven2/org/antlr/antlr4-runtime/ - -Copy the jars to antlr.org site and update download/index.html - -```bash -cp ~/.m2/repository/org/antlr/antlr4-runtime/4.8/antlr4-runtime-4.8.jar ~/antlr/sites/website-antlr4/download/antlr-runtime-4.8.jar -cp ~/.m2/repository/org/antlr/antlr4/4.8/antlr4-4.8-complete.jar ~/antlr/sites/website-antlr4/download/antlr-4.8-complete.jar -cd ~/antlr/sites/website-antlr4/download -git add antlr-4.8-complete.jar -git add antlr-runtime-4.8.jar -``` - -Update on site: +    [https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.13.2/antlr4-runtime-4.13.2.jar](https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.13.2/antlr4-runtime-4.13.2.jar) -* download.html -* index.html -* api/index.html -* download/index.html -* scripts/topnav.js - -``` -git commit -a -m 'add 4.8 jars' -git push origin gh-pages -``` +All releases should be here: [https://repo1.maven.org/maven2/org/antlr/antlr4-runtime](https://repo1.maven.org/maven2/org/antlr/antlr4-runtime). ## Deploying Targets ### JavaScript -```bash -cd runtime/JavaScript -# git add, commit, push -``` - **Push to npm** +(I think this has to be run before the unit test can run locally as it installs the global lib) + ```bash -cd runtime/JavaScript -npm login -npm publish antlr4 +cd ~/antlr/code/antlr4/runtime/JavaScript +rm -rf node_modules # seems we might need this later but try it here +npm update +npm install +npm run build +npm login # asks for username/password/2FA (npmjs.com) +npm publish # don't put antlr4 on there or it will try to push the old version for some reason ``` -Move target to website +Move (and zip) target to website: ```bash -npm run build -cp /dist/antlr4.js ~/antlr/sites/website-antlr4/download +cd src +zip -r ~/antlr/sites/website-antlr4/download/antlr-javascript-runtime-4.13.2.zip . ``` ### CSharp -Now we have [appveyor create artifact](https://ci.appveyor.com/project/parrt/antlr4/build/artifacts). Go to [nuget](https://www.nuget.org/packages/manage/upload) to upload the `.nupkg`. - -### Publishing to Nuget from Windows +As of writing, you can only release from a Windows box, because Visual Studio for Mac can only build the netstandard2.0 version **Install the pre-requisites** -Of course you need Mono and `nuget` to be installed. On mac: +You need 'msbuild' and `nuget` to be installed. -- .NET build tools - can be loaded from [here](https://www.visualstudio.com/downloads/) -- nuget - download [nuget.exe](https://www.nuget.org/downloads) -- dotnet - follow [the instructions here](https://www.microsoft.com/net/core) +**Creating the signed assembly** -Alternatively, you can install Visual Studio 2017 and make sure to check boxes with .NET Core SDK. +cd ~/antlr/code/antlr4/runtime/CSharp/src +dotnet build -c Release Antlr4.csproj -You also need to enable .NET Framework 3.5 support in Windows "Programs and Features". +check that the bin/Release folder contains both the netstandard2.0 and the net45 builds +the binaries are already signed, but it's worth double checking -If everything is ok, the following command will restore nuget packages, build Antlr for .NET Standard and .NET 3.5 and create nuget package: +sn -v bin/Release/netstandard2.0/Antlr4.Runtime.Standard.dll +sn -v bin/Release/net45/Antlr4.Runtime.Standard.dll -```PS -msbuild /target:restore /target:rebuild /target:pack /property:Configuration=Release .\Antlr4.dotnet.sln /verbosity:minimal -``` - -This should display something like this: - -**Creating and packaging the assembly** - -``` -Microsoft (R) Build Engine version 15.4.8.50001 for .NET Framework -Copyright (C) Microsoft Corporation. All rights reserved. - - Restoring packages for C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\Antlr4.Runtime.dotnet.csproj... - Generating MSBuild file C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\obj\Antlr4.Runtime.dotnet.csproj.nuget.g.props. - Generating MSBuild file C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\obj\Antlr4.Runtime.dotnet.csproj.nuget.g.targets. - Restore completed in 427.62 ms for C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\Antlr4.Runtime.dotnet.csproj. - Antlr4.Runtime.dotnet -> C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\netstandard1.3\Antlr4.Runtime.Standard.dll - Antlr4.Runtime.dotnet -> C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\net35\Antlr4.Runtime.Standard.dll - Successfully created package 'C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\Antlr4.Runtime.Standard.4.8.2.nupkg'. -``` +both should say the dll is valid **Publishing to NuGet** @@ -345,14 +310,15 @@ As a registered NuGet user, you can then manually upload the package here: [http Alternately, you can publish from the cmd line. You need to get your NuGet key from [https://www.nuget.org/account#](https://www.nuget.org/account#) and then from the cmd line, you can then type: ```cmd +cd bin/Release nuget push Antlr4.Runtime.Standard..nupkg -Source https://www.nuget.org/api/v2/package ``` -Nuget packages are also accessible as artifacts of [AppVeyor builds](https://ci.appveyor.com/project/parrt/antlr4/build/artifacts). - ### Python -The Python targets get deployed with `setup.py`. First, set up `~/.pypirc` with tight privileges: +The Python target gets deployed with `twine` for Python 3. + +First, set up `~/.pypirc` with tight privileges: ```bash beast:~ $ ls -l ~/.pypirc @@ -374,20 +340,13 @@ username: parrt password: xxx ``` -Then run the usual python set up stuff: - -```bash -cd ~/antlr/code/antlr4/runtime/Python2 -# assume you have ~/.pypirc set up -python2 setup.py sdist upload -``` - -and do again for Python 3 target +Then run the python build and upload: ```bash cd ~/antlr/code/antlr4/runtime/Python3 +python -m build # assume you have ~/.pypirc set up -python3 setup.py sdist upload +twine upload dist/antlr4_python3_runtime-4.13.2.tar.gz dist/antlr4_python3_runtime-4.13.2-py3-none-any.whl ``` There are links to the artifacts in [download.html](http://www.antlr.org/download.html) already. @@ -408,49 +367,112 @@ For each platform there's a deployment script which generates zip archives and c On a Mac (with XCode 7+ installed): ```bash -cd runtime/Cpp +cd ~/antlr/code/antlr4/runtime/Cpp +rm CMakeCache.txt # otherwise can't find some include files ./deploy-macos.sh -cp antlr4-cpp-runtime-macos.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.8-macos.zip +cp antlr4-cpp-runtime-macos.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.13.2-macos.zip ``` On any Mac or Linux machine: ```bash -cd runtime/Cpp +cd ~/antlr/code/antlr4/runtime/Cpp ./deploy-source.sh -cp antlr4-cpp-runtime-source.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.8-source.zip +cp antlr4-cpp-runtime-source.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.13.2-source.zip ``` On a Windows machine the build scripts checks if VS 2017 and/or VS 2019 are installed and builds binaries for each, if found. This script requires 7z to be installed (http://7-zip.org then do `set PATH=%PATH%;C:\Program Files\7-Zip\` from DOS not powershell). ```bash -cd runtime/Cpp +cd ~/antlr/code/antlr4/runtime/Cpp deploy-windows.cmd Community -cp antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.8-vs2019.zip +cp antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.13.2-vs2019.zip ``` Move target to website (**_rename to a specific ANTLR version first if needed_**): ```bash pushd ~/antlr/sites/website-antlr4/download -# vi index.html -git add antlr4cpp-runtime-4.8-macos.zip -git add antlr4cpp-runtime-4.8-windows.zip -git add antlr4cpp-runtime-4.8-source.zip +git add antlr4-cpp-runtime-4.13.2-macos.zip +git add antlr4-cpp-runtime-4.13.2-windows.zip +git add antlr4-cpp-runtime-4.13.2-source.zip git commit -a -m 'update C++ runtime' git push origin gh-pages popd ``` -## Update javadoc for runtime and tool +### Dart + +Install Dart SDK from https://dart.dev/get-dart -First, gen javadoc: +Push to pub.dev ```bash -$ cd antlr4 -$ mvn -DskipTests javadoc:jar install +cd ~/antlr/code/antlr4/runtime/Dart +dart pub publish +``` + +It will warn that no change log found for the new version. +Otherwise enter `N` to ignore the warning. + +## Update website + +### javadoc for runtime and tool + +Jars are in: + +``` +~/.m2/repository/org/antlr/antlr4-runtime/4.13.2/antlr4-runtime-4.13.2 ``` +### Update version and copy jars / api + +Copy javadoc and java jars to website using this script: + +```bash +cd ~/antlr/code/antlr4 +python scripts/deploy_to_website.py 4.13.1 4.13.2 +``` + +Output: + +```bash +Updating ANTLR version from 4.13.1 to 4.13.2 +Set ANTLR website root (default /Users/parrt/antlr/sites/website-antlr4): +Version string updated. Please commit/push: +Javadoc copied: + api/Java updated from antlr4-runtime-4.13.2-javadoc.jar + api/JavaTool updated from antlr4-4.13.2-javadoc.jar +Jars copied: + antlr-4.13.2-complete.jar + antlr-runtime-4.13.2.jar + +Please look for and add new api files!! +Then MANUALLY commit/push: + +git commit -a -m 'Update website, javadoc, jars to 4.13.2' +git push origin gh-pages +``` + + + +Once it's done, you must do the following manually: + +``` +cd ~/antlr/sites/website-antlr4 +git commit -a -m 'Update website, javadoc, jars to 4.13.2' +git push origin gh-pages +``` + + + +## Get fresh dev branch + +```bash +git checkout master +git pull upstream master +git checkout dev +git pull upstream dev +git merge master +git push origin dev +git push upstream dev +``` -## Update Intellij plug-in +## Other updates -Rebuild antlr plugin with new antlr jar. +* Rebuild antlr Intellij plug-in with new antlr jar. +* Cut release notes in github +* Update lab.antlr.org diff --git a/doc/resources/CaseChangingCharStream.cs b/doc/resources/CaseChangingCharStream.cs deleted file mode 100644 index 9f73a03826..0000000000 --- a/doc/resources/CaseChangingCharStream.cs +++ /dev/null @@ -1,105 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -using System; -using Antlr4.Runtime.Misc; - -namespace Antlr4.Runtime -{ - /// - /// This class supports case-insensitive lexing by wrapping an existing - /// and forcing the lexer to see either upper or - /// lowercase characters. Grammar literals should then be either upper or - /// lower case such as 'BEGIN' or 'begin'. The text of the character - /// stream is unaffected. Example: input 'BeGiN' would match lexer rule - /// 'BEGIN' if constructor parameter upper=true but getText() would return - /// 'BeGiN'. - /// - public class CaseChangingCharStream : ICharStream - { - private ICharStream stream; - private bool upper; - - /// - /// Constructs a new CaseChangingCharStream wrapping the given forcing - /// all characters to upper case or lower case. - /// - /// The stream to wrap. - /// If true force each symbol to upper case, otherwise force to lower. - public CaseChangingCharStream(ICharStream stream, bool upper) - { - this.stream = stream; - this.upper = upper; - } - - public int Index - { - get - { - return stream.Index; - } - } - - public int Size - { - get - { - return stream.Size; - } - } - - public string SourceName - { - get - { - return stream.SourceName; - } - } - - public void Consume() - { - stream.Consume(); - } - - [return: NotNull] - public string GetText(Interval interval) - { - return stream.GetText(interval); - } - - public int LA(int i) - { - int c = stream.LA(i); - - if (c <= 0) - { - return c; - } - - char o = (char)c; - - if (upper) - { - return (int)char.ToUpperInvariant(o); - } - - return (int)char.ToLowerInvariant(o); - } - - public int Mark() - { - return stream.Mark(); - } - - public void Release(int marker) - { - stream.Release(marker); - } - - public void Seek(int index) - { - stream.Seek(index); - } - } -} diff --git a/doc/resources/CaseChangingCharStream.java b/doc/resources/CaseChangingCharStream.java deleted file mode 100644 index d069d0188a..0000000000 --- a/doc/resources/CaseChangingCharStream.java +++ /dev/null @@ -1,81 +0,0 @@ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.Interval; - -/** - * This class supports case-insensitive lexing by wrapping an existing - * {@link CharStream} and forcing the lexer to see either upper or - * lowercase characters. Grammar literals should then be either upper or - * lower case such as 'BEGIN' or 'begin'. The text of the character - * stream is unaffected. Example: input 'BeGiN' would match lexer rule - * 'BEGIN' if constructor parameter upper=true but getText() would return - * 'BeGiN'. - */ -public class CaseChangingCharStream implements CharStream { - - final CharStream stream; - final boolean upper; - - /** - * Constructs a new CaseChangingCharStream wrapping the given {@link CharStream} forcing - * all characters to upper case or lower case. - * @param stream The stream to wrap. - * @param upper If true force each symbol to upper case, otherwise force to lower. - */ - public CaseChangingCharStream(CharStream stream, boolean upper) { - this.stream = stream; - this.upper = upper; - } - - @Override - public String getText(Interval interval) { - return stream.getText(interval); - } - - @Override - public void consume() { - stream.consume(); - } - - @Override - public int LA(int i) { - int c = stream.LA(i); - if (c <= 0) { - return c; - } - if (upper) { - return Character.toUpperCase(c); - } - return Character.toLowerCase(c); - } - - @Override - public int mark() { - return stream.mark(); - } - - @Override - public void release(int marker) { - stream.release(marker); - } - - @Override - public int index() { - return stream.index(); - } - - @Override - public void seek(int index) { - stream.seek(index); - } - - @Override - public int size() { - return stream.size(); - } - - @Override - public String getSourceName() { - return stream.getSourceName(); - } -} diff --git a/doc/resources/CaseChangingStream.js b/doc/resources/CaseChangingStream.js deleted file mode 100644 index 3af1ad6127..0000000000 --- a/doc/resources/CaseChangingStream.js +++ /dev/null @@ -1,65 +0,0 @@ -// -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -// - -function CaseChangingStream(stream, upper) { - this._stream = stream; - this._upper = upper; -} - -CaseChangingStream.prototype.LA = function(offset) { - var c = this._stream.LA(offset); - if (c <= 0) { - return c; - } - return String.fromCodePoint(c)[this._upper ? "toUpperCase" : "toLowerCase"]().codePointAt(0); -}; - -CaseChangingStream.prototype.reset = function() { - return this._stream.reset(); -}; - -CaseChangingStream.prototype.consume = function() { - return this._stream.consume(); -}; - -CaseChangingStream.prototype.LT = function(offset) { - return this._stream.LT(offset); -}; - -CaseChangingStream.prototype.mark = function() { - return this._stream.mark(); -}; - -CaseChangingStream.prototype.release = function(marker) { - return this._stream.release(marker); -}; - -CaseChangingStream.prototype.seek = function(_index) { - return this._stream.seek(_index); -}; - -CaseChangingStream.prototype.getText = function(start, stop) { - return this._stream.getText(start, stop); -}; - -CaseChangingStream.prototype.toString = function() { - return this._stream.toString(); -}; - -Object.defineProperty(CaseChangingStream.prototype, "index", { - get: function() { - return this._stream.index; - } -}); - -Object.defineProperty(CaseChangingStream.prototype, "size", { - get: function() { - return this._stream.size; - } -}); - -exports.CaseChangingStream = CaseChangingStream; diff --git a/doc/resources/CaseChangingStream.py b/doc/resources/CaseChangingStream.py deleted file mode 100644 index 6d2815de41..0000000000 --- a/doc/resources/CaseChangingStream.py +++ /dev/null @@ -1,13 +0,0 @@ -class CaseChangingStream(): - def __init__(self, stream, upper): - self._stream = stream - self._upper = upper - - def __getattr__(self, name): - return self._stream.__getattribute__(name) - - def LA(self, offset): - c = self._stream.LA(offset) - if c <= 0: - return c - return ord(chr(c).upper() if self._upper else chr(c).lower()) diff --git a/doc/resources/case_changing_stream.go b/doc/resources/case_changing_stream.go deleted file mode 100644 index 5b510fa321..0000000000 --- a/doc/resources/case_changing_stream.go +++ /dev/null @@ -1,37 +0,0 @@ -package antlr_resource - -import ( - "unicode" - - "github.com/antlr/antlr4/runtime/Go/antlr" -) - -// CaseChangingStream wraps an existing CharStream, but upper cases, or -// lower cases the input before it is tokenized. -type CaseChangingStream struct { - antlr.CharStream - - upper bool -} - -// NewCaseChangingStream returns a new CaseChangingStream that forces -// all tokens read from the underlying stream to be either upper case -// or lower case based on the upper argument. -func NewCaseChangingStream(in antlr.CharStream, upper bool) *CaseChangingStream { - return &CaseChangingStream{in, upper} -} - -// LA gets the value of the symbol at offset from the current position -// from the underlying CharStream and converts it to either upper case -// or lower case. -func (is *CaseChangingStream) LA(offset int) int { - in := is.CharStream.LA(offset) - if in < 0 { - // Such as antlr.TokenEOF which is -1 - return in - } - if is.upper { - return int(unicode.ToUpper(rune(in))) - } - return int(unicode.ToLower(rune(in))) -} diff --git a/doc/resources/worker-base.js b/doc/resources/worker-base.js deleted file mode 100644 index 3494b39169..0000000000 --- a/doc/resources/worker-base.js +++ /dev/null @@ -1,1079 +0,0 @@ -"no use strict"; -(function(e) { - if (typeof e.window != "undefined" && e.document) return; - e.console = function() { - var e = Array.prototype.slice.call(arguments, 0); - postMessage({ - type: "log", - data: e - }) - }, e.console.error = e.console.warn = e.console.log = e.console.trace = e.console, e.window = e, e.ace = e, e.onerror = function(e, t, n, r, i) { - postMessage({ - type: "error", - data: { - message: e, - file: t, - line: n, - col: r, - stack: i.stack - } - }) - }, e.normalizeModule = function(t, n) { - if (n.indexOf("!") !== -1) { - var r = n.split("!"); - return e.normalizeModule(t, r[0]) + "!" + e.normalizeModule(t, r[1]) - } - if (n.charAt(0) == ".") { - var i = t.split("/") - .slice(0, -1) - .join("/"); - n = (i ? i + "/" : "") + n; - while (n.indexOf(".") !== -1 && s != n) { - var s = n; - n = n.replace(/^\.\//, "") - .replace(/\/\.\//, "/") - .replace(/[^\/]+\/\.\.\//, "") - } - } - return n - }, e.require = function(t, n) { - n || (n = t, t = null); - if (!n.charAt) throw new Error("worker.js require() accepts only (parentId, id) as arguments"); - n = e.normalizeModule(t, n); - var r = e.require.modules[n]; - if (r) return r.initialized || (r.initialized = !0, r.exports = r.factory() - .exports), r.exports; - var i = n.split("/"); - if (!e.require.tlns) return console.log("unable to load " + n); - i[0] = e.require.tlns[i[0]] || i[0]; - var s = i.join("/") + ".js"; - return e.require.id = n, importScripts(s), e.require(t, n) - }, e.require.modules = {}, e.require.tlns = {}, e.define = function(t, n, r) { - arguments.length == 2 ? (r = n, typeof t != "string" && (n = t, t = e.require.id)) : arguments.length == 1 && (r = t, n = [], t = e.require.id); - if (typeof r != "function") { - e.require.modules[t] = { - exports: r, - initialized: !0 - }; - return - } - n.length || (n = ["require", "exports", "module"]); - var i = function(n) { - return e.require(t, n) - }; - e.require.modules[t] = { - exports: {}, - factory: function() { - var e = this, - t = r.apply(this, n.map(function(t) { - switch (t) { - case "require": - return i; - case "exports": - return e.exports; - case "module": - return e; - default: - return i(t) - } - })); - return t && (e.exports = t), e - } - } - }, e.define.amd = {}, e.initBaseUrls = function(t) { - require.tlns = t - }, e.initSender = function() { - var n = e.require("ace/lib/event_emitter") - .EventEmitter, - r = e.require("ace/lib/oop"), - i = function() {}; - return function() { - r.implement(this, n), this.callback = function(e, t) { - postMessage({ - type: "call", - id: t, - data: e - }) - }, this.emit = function(e, t) { - postMessage({ - type: "event", - name: e, - data: t - }) - } - }.call(i.prototype), new i - }; - var t = e.main = null, - n = e.sender = null; - e.onmessage = function(r) { - var i = r.data; - if (i.command) { - if (!t[i.command]) throw new Error("Unknown command:" + i.command); - t[i.command].apply(t, i.args) - } else if (i.init) { - initBaseUrls(i.tlns), require("ace/lib/es5-shim"), n = e.sender = initSender(); - var s = require(i.module)[i.classname]; - t = e.main = new s(n) - } else i.event && n && n._signal(i.event, i.data) - } -})(this), ace.define("ace/lib/oop", ["require", "exports", "module"], function(e, t, n) { - "use strict"; - t.inherits = function(e, t) { - e.super_ = t, e.prototype = Object.create(t.prototype, { - constructor: { - value: e, - enumerable: !1, - writable: !0, - configurable: !0 - } - }) - }, t.mixin = function(e, t) { - for (var n in t) e[n] = t[n]; - return e - }, t.implement = function(e, n) { - t.mixin(e, n) - } -}), ace.define("ace/lib/event_emitter", ["require", "exports", "module"], function(e, t, n) { - "use strict"; - var r = {}, - i = function() { - this.propagationStopped = !0 - }, - s = function() { - this.defaultPrevented = !0 - }; - r._emit = r._dispatchEvent = function(e, t) { - this._eventRegistry || (this._eventRegistry = {}), this._defaultHandlers || (this._defaultHandlers = {}); - var n = this._eventRegistry[e] || [], - r = this._defaultHandlers[e]; - if (!n.length && !r) return; - if (typeof t != "object" || !t) t = {}; - t.type || (t.type = e), t.stopPropagation || (t.stopPropagation = i), t.preventDefault || (t.preventDefault = s), n = n.slice(); - for (var o = 0; o < n.length; o++) { - n[o](t, this); - if (t.propagationStopped) break - } - if (r && !t.defaultPrevented) return r(t, this) - }, r._signal = function(e, t) { - var n = (this._eventRegistry || {})[e]; - if (!n) return; - n = n.slice(); - for (var r = 0; r < n.length; r++) n[r](t, this) - }, r.once = function(e, t) { - var n = this; - t && this.addEventListener(e, function r() { - n.removeEventListener(e, r), t.apply(null, arguments) - }) - }, r.setDefaultHandler = function(e, t) { - var n = this._defaultHandlers; - n || (n = this._defaultHandlers = { - _disabled_: {} - }); - if (n[e]) { - var r = n[e], - i = n._disabled_[e]; - i || (n._disabled_[e] = i = []), i.push(r); - var s = i.indexOf(t); - s != -1 && i.splice(s, 1) - } - n[e] = t - }, r.removeDefaultHandler = function(e, t) { - var n = this._defaultHandlers; - if (!n) return; - var r = n._disabled_[e]; - if (n[e] == t) { - var i = n[e]; - r && this.setDefaultHandler(e, r.pop()) - } else if (r) { - var s = r.indexOf(t); - s != -1 && r.splice(s, 1) - } - }, r.on = r.addEventListener = function(e, t, n) { - this._eventRegistry = this._eventRegistry || {}; - var r = this._eventRegistry[e]; - return r || (r = this._eventRegistry[e] = []), r.indexOf(t) == -1 && r[n ? "unshift" : "push"](t), t - }, r.off = r.removeListener = r.removeEventListener = function(e, t) { - this._eventRegistry = this._eventRegistry || {}; - var n = this._eventRegistry[e]; - if (!n) return; - var r = n.indexOf(t); - r !== -1 && n.splice(r, 1) - }, r.removeAllListeners = function(e) { - this._eventRegistry && (this._eventRegistry[e] = []) - }, t.EventEmitter = r -}), ace.define("ace/range", ["require", "exports", "module"], function(e, t, n) { - "use strict"; - var r = function(e, t) { - return e.row - t.row || e.column - t.column - }, - i = function(e, t, n, r) { - this.start = { - row: e, - column: t - }, this.end = { - row: n, - column: r - } - }; - (function() { - this.isEqual = function(e) { - return this.start.row === e.start.row && this.end.row === e.end.row && this.start.column === e.start.column && this.end.column === e.end.column - }, this.toString = function() { - return "Range: [" + this.start.row + "/" + this.start.column + "] -> [" + this.end.row + "/" + this.end.column + "]" - }, this.contains = function(e, t) { - return this.compare(e, t) == 0 - }, this.compareRange = function(e) { - var t, n = e.end, - r = e.start; - return t = this.compare(n.row, n.column), t == 1 ? (t = this.compare(r.row, r.column), t == 1 ? 2 : t == 0 ? 1 : 0) : t == -1 ? -2 : (t = this.compare(r.row, r.column), t == -1 ? -1 : t == 1 ? 42 : 0) - }, this.comparePoint = function(e) { - return this.compare(e.row, e.column) - }, this.containsRange = function(e) { - return this.comparePoint(e.start) == 0 && this.comparePoint(e.end) == 0 - }, this.intersects = function(e) { - var t = this.compareRange(e); - return t == -1 || t == 0 || t == 1 - }, this.isEnd = function(e, t) { - return this.end.row == e && this.end.column == t - }, this.isStart = function(e, t) { - return this.start.row == e && this.start.column == t - }, this.setStart = function(e, t) { - typeof e == "object" ? (this.start.column = e.column, this.start.row = e.row) : (this.start.row = e, this.start.column = t) - }, this.setEnd = function(e, t) { - typeof e == "object" ? (this.end.column = e.column, this.end.row = e.row) : (this.end.row = e, this.end.column = t) - }, this.inside = function(e, t) { - return this.compare(e, t) == 0 ? this.isEnd(e, t) || this.isStart(e, t) ? !1 : !0 : !1 - }, this.insideStart = function(e, t) { - return this.compare(e, t) == 0 ? this.isEnd(e, t) ? !1 : !0 : !1 - }, this.insideEnd = function(e, t) { - return this.compare(e, t) == 0 ? this.isStart(e, t) ? !1 : !0 : !1 - }, this.compare = function(e, t) { - return !this.isMultiLine() && e === this.start.row ? t < this.start.column ? -1 : t > this.end.column ? 1 : 0 : e < this.start.row ? -1 : e > this.end.row ? 1 : this.start.row === e ? t >= this.start.column ? 0 : -1 : this.end.row === e ? t <= this.end.column ? 0 : 1 : 0 - }, this.compareStart = function(e, t) { - return this.start.row == e && this.start.column == t ? -1 : this.compare(e, t) - }, this.compareEnd = function(e, t) { - return this.end.row == e && this.end.column == t ? 1 : this.compare(e, t) - }, this.compareInside = function(e, t) { - return this.end.row == e && this.end.column == t ? 1 : this.start.row == e && this.start.column == t ? -1 : this.compare(e, t) - }, this.clipRows = function(e, t) { - if (this.end.row > t) var n = { - row: t + 1, - column: 0 - }; - else if (this.end.row < e) var n = { - row: e, - column: 0 - }; - if (this.start.row > t) var r = { - row: t + 1, - column: 0 - }; - else if (this.start.row < e) var r = { - row: e, - column: 0 - }; - return i.fromPoints(r || this.start, n || this.end) - }, this.extend = function(e, t) { - var n = this.compare(e, t); - if (n == 0) return this; - if (n == -1) var r = { - row: e, - column: t - }; - else var s = { - row: e, - column: t - }; - return i.fromPoints(r || this.start, s || this.end) - }, this.isEmpty = function() { - return this.start.row === this.end.row && this.start.column === this.end.column - }, this.isMultiLine = function() { - return this.start.row !== this.end.row - }, this.clone = function() { - return i.fromPoints(this.start, this.end) - }, this.collapseRows = function() { - return this.end.column == 0 ? new i(this.start.row, 0, Math.max(this.start.row, this.end.row - 1), 0) : new i(this.start.row, 0, this.end.row, 0) - }, this.toScreenRange = function(e) { - var t = e.documentToScreenPosition(this.start), - n = e.documentToScreenPosition(this.end); - return new i(t.row, t.column, n.row, n.column) - }, this.moveBy = function(e, t) { - this.start.row += e, this.start.column += t, this.end.row += e, this.end.column += t - } - }) - .call(i.prototype), i.fromPoints = function(e, t) { - return new i(e.row, e.column, t.row, t.column) - }, i.comparePoints = r, i.comparePoints = function(e, t) { - return e.row - t.row || e.column - t.column - }, t.Range = i -}), ace.define("ace/anchor", ["require", "exports", "module", "ace/lib/oop", "ace/lib/event_emitter"], function(e, t, n) { - "use strict"; - var r = e("./lib/oop"), - i = e("./lib/event_emitter") - .EventEmitter, - s = t.Anchor = function(e, t, n) { - this.$onChange = this.onChange.bind(this), this.attach(e), typeof n == "undefined" ? this.setPosition(t.row, t.column) : this.setPosition(t, n) - }; - (function() { - r.implement(this, i), this.getPosition = function() { - return this.$clipPositionToDocument(this.row, this.column) - }, this.getDocument = function() { - return this.document - }, this.$insertRight = !1, this.onChange = function(e) { - var t = e.data, - n = t.range; - if (n.start.row == n.end.row && n.start.row != this.row) return; - if (n.start.row > this.row) return; - if (n.start.row == this.row && n.start.column > this.column) return; - var r = this.row, - i = this.column, - s = n.start, - o = n.end; - if (t.action === "insertText") - if (s.row === r && s.column <= i) { - if (s.column !== i || !this.$insertRight) s.row === o.row ? i += o.column - s.column : (i -= s.column, r += o.row - s.row) - } else s.row !== o.row && s.row < r && (r += o.row - s.row); - else t.action === "insertLines" ? (s.row !== r || i !== 0 || !this.$insertRight) && s.row <= r && (r += o.row - s.row) : t.action === "removeText" ? s.row === r && s.column < i ? o.column >= i ? i = s.column : i = Math.max(0, i - (o.column - s.column)) : s.row !== o.row && s.row < r ? (o.row === r && (i = Math.max(0, i - o.column) + s.column), r -= o.row - s.row) : o.row === r && (r -= o.row - s.row, i = Math.max(0, i - o.column) + s.column) : t.action == "removeLines" && s.row <= r && (o.row <= r ? r -= o.row - s.row : (r = s.row, i = 0)); - this.setPosition(r, i, !0) - }, this.setPosition = function(e, t, n) { - var r; - n ? r = { - row: e, - column: t - } : r = this.$clipPositionToDocument(e, t); - if (this.row == r.row && this.column == r.column) return; - var i = { - row: this.row, - column: this.column - }; - this.row = r.row, this.column = r.column, this._signal("change", { - old: i, - value: r - }) - }, this.detach = function() { - this.document.removeEventListener("change", this.$onChange) - }, this.attach = function(e) { - this.document = e || this.document, this.document.on("change", this.$onChange) - }, this.$clipPositionToDocument = function(e, t) { - var n = {}; - return e >= this.document.getLength() ? (n.row = Math.max(0, this.document.getLength() - 1), n.column = this.document.getLine(n.row) - .length) : e < 0 ? (n.row = 0, n.column = 0) : (n.row = e, n.column = Math.min(this.document.getLine(n.row) - .length, Math.max(0, t))), t < 0 && (n.column = 0), n - } - }) - .call(s.prototype) -}), ace.define("ace/document", ["require", "exports", "module", "ace/lib/oop", "ace/lib/event_emitter", "ace/range", "ace/anchor"], function(e, t, n) { - "use strict"; - var r = e("./lib/oop"), - i = e("./lib/event_emitter") - .EventEmitter, - s = e("./range") - .Range, - o = e("./anchor") - .Anchor, - u = function(e) { - this.$lines = [], e.length === 0 ? this.$lines = [""] : Array.isArray(e) ? this._insertLines(0, e) : this.insert({ - row: 0, - column: 0 - }, e) - }; - (function() { - r.implement(this, i), this.setValue = function(e) { - var t = this.getLength(); - this.remove(new s(0, 0, t, this.getLine(t - 1) - .length)), this.insert({ - row: 0, - column: 0 - }, e) - }, this.getValue = function() { - return this.getAllLines() - .join(this.getNewLineCharacter()) - }, this.createAnchor = function(e, t) { - return new o(this, e, t) - }, "aaa".split(/a/) - .length === 0 ? this.$split = function(e) { - return e.replace(/\r\n|\r/g, "\n") - .split("\n") - } : this.$split = function(e) { - return e.split(/\r\n|\r|\n/) - }, this.$detectNewLine = function(e) { - var t = e.match(/^.*?(\r\n|\r|\n)/m); - this.$autoNewLine = t ? t[1] : "\n", this._signal("changeNewLineMode") - }, this.getNewLineCharacter = function() { - switch (this.$newLineMode) { - case "windows": - return "\r\n"; - case "unix": - return "\n"; - default: - return this.$autoNewLine || "\n" - } - }, this.$autoNewLine = "", this.$newLineMode = "auto", this.setNewLineMode = function(e) { - if (this.$newLineMode === e) return; - this.$newLineMode = e, this._signal("changeNewLineMode") - }, this.getNewLineMode = function() { - return this.$newLineMode - }, this.isNewLine = function(e) { - return e == "\r\n" || e == "\r" || e == "\n" - }, this.getLine = function(e) { - return this.$lines[e] || "" - }, this.getLines = function(e, t) { - return this.$lines.slice(e, t + 1) - }, this.getAllLines = function() { - return this.getLines(0, this.getLength()) - }, this.getLength = function() { - return this.$lines.length - }, this.getTextRange = function(e) { - if (e.start.row == e.end.row) return this.getLine(e.start.row) - .substring(e.start.column, e.end.column); - var t = this.getLines(e.start.row, e.end.row); - t[0] = (t[0] || "") - .substring(e.start.column); - var n = t.length - 1; - return e.end.row - e.start.row == n && (t[n] = t[n].substring(0, e.end.column)), t.join(this.getNewLineCharacter()) - }, this.$clipPosition = function(e) { - var t = this.getLength(); - return e.row >= t ? (e.row = Math.max(0, t - 1), e.column = this.getLine(t - 1) - .length) : e.row < 0 && (e.row = 0), e - }, this.insert = function(e, t) { - if (!t || t.length === 0) return e; - e = this.$clipPosition(e), this.getLength() <= 1 && this.$detectNewLine(t); - var n = this.$split(t), - r = n.splice(0, 1)[0], - i = n.length == 0 ? null : n.splice(n.length - 1, 1)[0]; - return e = this.insertInLine(e, r), i !== null && (e = this.insertNewLine(e), e = this._insertLines(e.row, n), e = this.insertInLine(e, i || "")), e - }, this.insertLines = function(e, t) { - return e >= this.getLength() ? this.insert({ - row: e, - column: 0 - }, "\n" + t.join("\n")) : this._insertLines(Math.max(e, 0), t) - }, this._insertLines = function(e, t) { - if (t.length == 0) return { - row: e, - column: 0 - }; - while (t.length > 2e4) { - var n = this._insertLines(e, t.slice(0, 2e4)); - t = t.slice(2e4), e = n.row - } - var r = [e, 0]; - r.push.apply(r, t), this.$lines.splice.apply(this.$lines, r); - var i = new s(e, 0, e + t.length, 0), - o = { - action: "insertLines", - range: i, - lines: t - }; - return this._signal("change", { - data: o - }), i.end - }, this.insertNewLine = function(e) { - e = this.$clipPosition(e); - var t = this.$lines[e.row] || ""; - this.$lines[e.row] = t.substring(0, e.column), this.$lines.splice(e.row + 1, 0, t.substring(e.column, t.length)); - var n = { - row: e.row + 1, - column: 0 - }, - r = { - action: "insertText", - range: s.fromPoints(e, n), - text: this.getNewLineCharacter() - }; - return this._signal("change", { - data: r - }), n - }, this.insertInLine = function(e, t) { - if (t.length == 0) return e; - var n = this.$lines[e.row] || ""; - this.$lines[e.row] = n.substring(0, e.column) + t + n.substring(e.column); - var r = { - row: e.row, - column: e.column + t.length - }, - i = { - action: "insertText", - range: s.fromPoints(e, r), - text: t - }; - return this._signal("change", { - data: i - }), r - }, this.remove = function(e) { - e instanceof s || (e = s.fromPoints(e.start, e.end)), e.start = this.$clipPosition(e.start), e.end = this.$clipPosition(e.end); - if (e.isEmpty()) return e.start; - var t = e.start.row, - n = e.end.row; - if (e.isMultiLine()) { - var r = e.start.column == 0 ? t : t + 1, - i = n - 1; - e.end.column > 0 && this.removeInLine(n, 0, e.end.column), i >= r && this._removeLines(r, i), r != t && (this.removeInLine(t, e.start.column, this.getLine(t) - .length), this.removeNewLine(e.start.row)) - } else this.removeInLine(t, e.start.column, e.end.column); - return e.start - }, this.removeInLine = function(e, t, n) { - if (t == n) return; - var r = new s(e, t, e, n), - i = this.getLine(e), - o = i.substring(t, n), - u = i.substring(0, t) + i.substring(n, i.length); - this.$lines.splice(e, 1, u); - var a = { - action: "removeText", - range: r, - text: o - }; - return this._signal("change", { - data: a - }), r.start - }, this.removeLines = function(e, t) { - return e < 0 || t >= this.getLength() ? this.remove(new s(e, 0, t + 1, 0)) : this._removeLines(e, t) - }, this._removeLines = function(e, t) { - var n = new s(e, 0, t + 1, 0), - r = this.$lines.splice(e, t - e + 1), - i = { - action: "removeLines", - range: n, - nl: this.getNewLineCharacter(), - lines: r - }; - return this._signal("change", { - data: i - }), r - }, this.removeNewLine = function(e) { - var t = this.getLine(e), - n = this.getLine(e + 1), - r = new s(e, t.length, e + 1, 0), - i = t + n; - this.$lines.splice(e, 2, i); - var o = { - action: "removeText", - range: r, - text: this.getNewLineCharacter() - }; - this._signal("change", { - data: o - }) - }, this.replace = function(e, t) { - e instanceof s || (e = s.fromPoints(e.start, e.end)); - if (t.length == 0 && e.isEmpty()) return e.start; - if (t == this.getTextRange(e)) return e.end; - this.remove(e); - if (t) var n = this.insert(e.start, t); - else n = e.start; - return n - }, this.applyDeltas = function(e) { - for (var t = 0; t < e.length; t++) { - var n = e[t], - r = s.fromPoints(n.range.start, n.range.end); - n.action == "insertLines" ? this.insertLines(r.start.row, n.lines) : n.action == "insertText" ? this.insert(r.start, n.text) : n.action == "removeLines" ? this._removeLines(r.start.row, r.end.row - 1) : n.action == "removeText" && this.remove(r) - } - }, this.revertDeltas = function(e) { - for (var t = e.length - 1; t >= 0; t--) { - var n = e[t], - r = s.fromPoints(n.range.start, n.range.end); - n.action == "insertLines" ? this._removeLines(r.start.row, r.end.row - 1) : n.action == "insertText" ? this.remove(r) : n.action == "removeLines" ? this._insertLines(r.start.row, n.lines) : n.action == "removeText" && this.insert(r.start, n.text) - } - }, this.indexToPosition = function(e, t) { - var n = this.$lines || this.getAllLines(), - r = this.getNewLineCharacter() - .length; - for (var i = t || 0, s = n.length; i < s; i++) { - e -= n[i].length + r; - if (e < 0) return { - row: i, - column: e + n[i].length + r - } - } - return { - row: s - 1, - column: n[s - 1].length - } - }, this.positionToIndex = function(e, t) { - var n = this.$lines || this.getAllLines(), - r = this.getNewLineCharacter() - .length, - i = 0, - s = Math.min(e.row, n.length); - for (var o = t || 0; o < s; ++o) i += n[o].length + r; - return i + e.column - } - }) - .call(u.prototype), t.Document = u -}), ace.define("ace/lib/lang", ["require", "exports", "module"], function(e, t, n) { - "use strict"; - t.last = function(e) { - return e[e.length - 1] - }, t.stringReverse = function(e) { - return e.split("") - .reverse() - .join("") - }, t.stringRepeat = function(e, t) { - var n = ""; - while (t > 0) { - t & 1 && (n += e); - if (t >>= 1) e += e - } - return n - }; - var r = /^\s\s*/, - i = /\s\s*$/; - t.stringTrimLeft = function(e) { - return e.replace(r, "") - }, t.stringTrimRight = function(e) { - return e.replace(i, "") - }, t.copyObject = function(e) { - var t = {}; - for (var n in e) t[n] = e[n]; - return t - }, t.copyArray = function(e) { - var t = []; - for (var n = 0, r = e.length; n < r; n++) e[n] && typeof e[n] == "object" ? t[n] = this.copyObject(e[n]) : t[n] = e[n]; - return t - }, t.deepCopy = function s(e) { - if (typeof e != "object" || !e) return e; - var t; - if (Array.isArray(e)) { - t = []; - for (var n = 0; n < e.length; n++) t[n] = s(e[n]); - return t - } - var r = e.constructor; - if (r === RegExp) return e; - t = r(); - for (var n in e) t[n] = s(e[n]); - return t - }, t.arrayToMap = function(e) { - var t = {}; - for (var n = 0; n < e.length; n++) t[e[n]] = 1; - return t - }, t.createMap = function(e) { - var t = Object.create(null); - for (var n in e) t[n] = e[n]; - return t - }, t.arrayRemove = function(e, t) { - for (var n = 0; n <= e.length; n++) t === e[n] && e.splice(n, 1) - }, t.escapeRegExp = function(e) { - return e.replace(/([.*+?^${}()|[\]\/\\])/g, "\\$1") - }, t.escapeHTML = function(e) { - return e.replace(/&/g, "&") - .replace(/"/g, """) - .replace(/'/g, "'") - .replace(/ 0 || -1) * Math.floor(Math.abs(e))), e - } - - function B(e) { - var t = typeof e; - return e === null || t === "undefined" || t === "boolean" || t === "number" || t === "string" - } - - function j(e) { - var t, n, r; - if (B(e)) return e; - n = e.valueOf; - if (typeof n == "function") { - t = n.call(e); - if (B(t)) return t - } - r = e.toString; - if (typeof r == "function") { - t = r.call(e); - if (B(t)) return t - } - throw new TypeError - } - Function.prototype.bind || (Function.prototype.bind = function(t) { - var n = this; - if (typeof n != "function") throw new TypeError("Function.prototype.bind called on incompatible " + n); - var i = u.call(arguments, 1), - s = function() { - if (this instanceof s) { - var e = n.apply(this, i.concat(u.call(arguments))); - return Object(e) === e ? e : this - } - return n.apply(t, i.concat(u.call(arguments))) - }; - return n.prototype && (r.prototype = n.prototype, s.prototype = new r, r.prototype = null), s - }); - var i = Function.prototype.call, - s = Array.prototype, - o = Object.prototype, - u = s.slice, - a = i.bind(o.toString), - f = i.bind(o.hasOwnProperty), - l, c, h, p, d; - if (d = f(o, "__defineGetter__")) l = i.bind(o.__defineGetter__), c = i.bind(o.__defineSetter__), h = i.bind(o.__lookupGetter__), p = i.bind(o.__lookupSetter__); - if ([1, 2].splice(0) - .length != 2) - if (! function() { - function e(e) { - var t = new Array(e + 2); - return t[0] = t[1] = 0, t - } - var t = [], - n; - t.splice.apply(t, e(20)), t.splice.apply(t, e(26)), n = t.length, t.splice(5, 0, "XXX"), n + 1 == t.length; - if (n + 1 == t.length) return !0 - }()) Array.prototype.splice = function(e, t) { - var n = this.length; - e > 0 ? e > n && (e = n) : e == void 0 ? e = 0 : e < 0 && (e = Math.max(n + e, 0)), e + t < n || (t = n - e); - var r = this.slice(e, e + t), - i = u.call(arguments, 2), - s = i.length; - if (e === n) s && this.push.apply(this, i); - else { - var o = Math.min(t, n - e), - a = e + o, - f = a + s - o, - l = n - a, - c = n - o; - if (f < a) - for (var h = 0; h < l; ++h) this[f + h] = this[a + h]; - else if (f > a) - for (h = l; h--;) this[f + h] = this[a + h]; - if (s && e === c) this.length = c, this.push.apply(this, i); - else { - this.length = c + s; - for (h = 0; h < s; ++h) this[e + h] = i[h] - } - } - return r - }; - else { - var v = Array.prototype.splice; - Array.prototype.splice = function(e, t) { - return arguments.length ? v.apply(this, [e === void 0 ? 0 : e, t === void 0 ? this.length - e : t].concat(u.call(arguments, 2))) : [] - } - } - Array.isArray || (Array.isArray = function(t) { - return a(t) == "[object Array]" - }); - var m = Object("a"), - g = m[0] != "a" || !(0 in m); - Array.prototype.forEach || (Array.prototype.forEach = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = arguments[1], - s = -1, - o = r.length >>> 0; - if (a(t) != "[object Function]") throw new TypeError; - while (++s < o) s in r && t.call(i, r[s], s, n) - }), Array.prototype.map || (Array.prototype.map = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = r.length >>> 0, - s = Array(i), - o = arguments[1]; - if (a(t) != "[object Function]") throw new TypeError(t + " is not a function"); - for (var u = 0; u < i; u++) u in r && (s[u] = t.call(o, r[u], u, n)); - return s - }), Array.prototype.filter || (Array.prototype.filter = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = r.length >>> 0, - s = [], - o, u = arguments[1]; - if (a(t) != "[object Function]") throw new TypeError(t + " is not a function"); - for (var f = 0; f < i; f++) f in r && (o = r[f], t.call(u, o, f, n) && s.push(o)); - return s - }), Array.prototype.every || (Array.prototype.every = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = r.length >>> 0, - s = arguments[1]; - if (a(t) != "[object Function]") throw new TypeError(t + " is not a function"); - for (var o = 0; o < i; o++) - if (o in r && !t.call(s, r[o], o, n)) return !1; - return !0 - }), Array.prototype.some || (Array.prototype.some = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = r.length >>> 0, - s = arguments[1]; - if (a(t) != "[object Function]") throw new TypeError(t + " is not a function"); - for (var o = 0; o < i; o++) - if (o in r && t.call(s, r[o], o, n)) return !0; - return !1 - }), Array.prototype.reduce || (Array.prototype.reduce = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = r.length >>> 0; - if (a(t) != "[object Function]") throw new TypeError(t + " is not a function"); - if (!i && arguments.length == 1) throw new TypeError("reduce of empty array with no initial value"); - var s = 0, - o; - if (arguments.length >= 2) o = arguments[1]; - else - do { - if (s in r) { - o = r[s++]; - break - } - if (++s >= i) throw new TypeError("reduce of empty array with no initial value") - } while (!0); - for (; s < i; s++) s in r && (o = t.call(void 0, o, r[s], s, n)); - return o - }), Array.prototype.reduceRight || (Array.prototype.reduceRight = function(t) { - var n = F(this), - r = g && a(this) == "[object String]" ? this.split("") : n, - i = r.length >>> 0; - if (a(t) != "[object Function]") throw new TypeError(t + " is not a function"); - if (!i && arguments.length == 1) throw new TypeError("reduceRight of empty array with no initial value"); - var s, o = i - 1; - if (arguments.length >= 2) s = arguments[1]; - else - do { - if (o in r) { - s = r[o--]; - break - } - if (--o < 0) throw new TypeError("reduceRight of empty array with no initial value") - } while (!0); - do o in this && (s = t.call(void 0, s, r[o], o, n)); while (o--); - return s - }); - if (!Array.prototype.indexOf || [0, 1].indexOf(1, 2) != -1) Array.prototype.indexOf = function(t) { - var n = g && a(this) == "[object String]" ? this.split("") : F(this), - r = n.length >>> 0; - if (!r) return -1; - var i = 0; - arguments.length > 1 && (i = H(arguments[1])), i = i >= 0 ? i : Math.max(0, r + i); - for (; i < r; i++) - if (i in n && n[i] === t) return i; - return -1 - }; - if (!Array.prototype.lastIndexOf || [0, 1].lastIndexOf(0, -3) != -1) Array.prototype.lastIndexOf = function(t) { - var n = g && a(this) == "[object String]" ? this.split("") : F(this), - r = n.length >>> 0; - if (!r) return -1; - var i = r - 1; - arguments.length > 1 && (i = Math.min(i, H(arguments[1]))), i = i >= 0 ? i : r - Math.abs(i); - for (; i >= 0; i--) - if (i in n && t === n[i]) return i; - return -1 - }; - Object.getPrototypeOf || (Object.getPrototypeOf = function(t) { - return t.__proto__ || (t.constructor ? t.constructor.prototype : o) - }); - if (!Object.getOwnPropertyDescriptor) { - var y = "Object.getOwnPropertyDescriptor called on a non-object: "; - Object.getOwnPropertyDescriptor = function(t, n) { - if (typeof t != "object" && typeof t != "function" || t === null) throw new TypeError(y + t); - if (!f(t, n)) return; - var r, i, s; - r = { - enumerable: !0, - configurable: !0 - }; - if (d) { - var u = t.__proto__; - t.__proto__ = o; - var i = h(t, n), - s = p(t, n); - t.__proto__ = u; - if (i || s) return i && (r.get = i), s && (r.set = s), r - } - return r.value = t[n], r - } - } - Object.getOwnPropertyNames || (Object.getOwnPropertyNames = function(t) { - return Object.keys(t) - }); - if (!Object.create) { - var b; - Object.prototype.__proto__ === null ? b = function() { - return { - __proto__: null - } - } : b = function() { - var e = {}; - for (var t in e) e[t] = null; - return e.constructor = e.hasOwnProperty = e.propertyIsEnumerable = e.isPrototypeOf = e.toLocaleString = e.toString = e.valueOf = e.__proto__ = null, e - }, Object.create = function(t, n) { - var r; - if (t === null) r = b(); - else { - if (typeof t != "object") throw new TypeError("typeof prototype[" + typeof t + "] != 'object'"); - var i = function() {}; - i.prototype = t, r = new i, r.__proto__ = t - } - return n !== void 0 && Object.defineProperties(r, n), r - } - } - if (Object.defineProperty) { - var E = w({}), - S = typeof document == "undefined" || w(document.createElement("div")); - if (!E || !S) var x = Object.defineProperty - } - if (!Object.defineProperty || x) { - var T = "Property description must be an object: ", - N = "Object.defineProperty called on non-object: ", - C = "getters & setters can not be defined on this javascript engine"; - Object.defineProperty = function(t, n, r) { - if (typeof t != "object" && typeof t != "function" || t === null) throw new TypeError(N + t); - if (typeof r != "object" && typeof r != "function" || r === null) throw new TypeError(T + r); - if (x) try { - return x.call(Object, t, n, r) - } catch (i) {} - if (f(r, "value")) - if (d && (h(t, n) || p(t, n))) { - var s = t.__proto__; - t.__proto__ = o, delete t[n], t[n] = r.value, t.__proto__ = s - } else t[n] = r.value; - else { - if (!d) throw new TypeError(C); - f(r, "get") && l(t, n, r.get), f(r, "set") && c(t, n, r.set) - } - return t - } - } - Object.defineProperties || (Object.defineProperties = function(t, n) { - for (var r in n) f(n, r) && Object.defineProperty(t, r, n[r]); - return t - }), Object.seal || (Object.seal = function(t) { - return t - }), Object.freeze || (Object.freeze = function(t) { - return t - }); - try { - Object.freeze(function() {}) - } catch (k) { - Object.freeze = function(t) { - return function(n) { - return typeof n == "function" ? n : t(n) - } - }(Object.freeze) - } - Object.preventExtensions || (Object.preventExtensions = function(t) { - return t - }), Object.isSealed || (Object.isSealed = function(t) { - return !1 - }), Object.isFrozen || (Object.isFrozen = function(t) { - return !1 - }), Object.isExtensible || (Object.isExtensible = function(t) { - if (Object(t) === t) throw new TypeError; - var n = ""; - while (f(t, n)) n += "?"; - t[n] = !0; - var r = f(t, n); - return delete t[n], r - }); - if (!Object.keys) { - var L = !0, - A = ["toString", "toLocaleString", "valueOf", "hasOwnProperty", "isPrototypeOf", "propertyIsEnumerable", "constructor"], - O = A.length; - for (var M in { - toString: null - }) L = !1; - Object.keys = function I(e) { - if (typeof e != "object" && typeof e != "function" || e === null) throw new TypeError("Object.keys called on a non-object"); - var I = []; - for (var t in e) f(e, t) && I.push(t); - if (L) - for (var n = 0, r = O; n < r; n++) { - var i = A[n]; - f(e, i) && I.push(i) - } - return I - } - } - Date.now || (Date.now = function() { - return (new Date) - .getTime() - }); - var _ = " \n \f\r \u00a0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029\ufeff"; - if (!String.prototype.trim || _.trim()) { - _ = "[" + _ + "]"; - var D = new RegExp("^" + _ + _ + "*"), - P = new RegExp(_ + _ + "*$"); - String.prototype.trim = function() { - return String(this) - .replace(D, "") - .replace(P, "") - } - } - var F = function(e) { - if (e == null) throw new TypeError("can't convert " + e + " to object"); - return Object(e) - } -}) \ No newline at end of file diff --git a/doc/swift-target.md b/doc/swift-target.md index d023ee4f70..9f04349b9c 100644 --- a/doc/swift-target.md +++ b/doc/swift-target.md @@ -1,15 +1,10 @@ # ANTLR4 Language Target, Runtime for Swift -## Requirements - -ANTLR 4.7.2 requires Swift 4.2. It works on Swift 4.2.1 also. - -ANTLR 4.7.1 requires Swift 4.0, and does not work on Swift 4.2. (The status of -Swift 4.1 support is unknown.) - ## Performance Note -To use ANTLR4 Swift target in production environment, make sure to turn on compiler optimizations by following [these instructions](https://github.com/apple/swift-package-manager/blob/master/Documentation/Usage.md#build-configurations) if you use SwiftPM to build your project. If you are using Xcode to build your project, it's unlikely you will not use `release` build for production build. +To use ANTLR4 Swift target in production environment, make sure to turn on compiler optimizations by following [these instructions](https://github.com/apple/swift-package-manager/blob/main/Documentation/Usage.md#setting-the-build-configuration) if you use SwiftPM to build your project. + +If you are using Xcode to build your project, it's unlikely you will not use `release` build for production build. Conclusion is, you need to turn on `release` mode (which will have all the optimization pre configured for you) so the ANTLR4 Swift target can have reasonable parsing speed. @@ -60,10 +55,7 @@ Note that even if you are otherwise using ANTLR from a binary distribution, you should compile the ANTLR Swift runtime from source, because the Swift language does not yet have a stable ABI. -ANTLR uses Swift Package Manager to generate Xcode project files. Note that -Swift Package Manager does not currently support iOS, watchOS, or tvOS, so -if you wish to use those platforms, you will need to alter the project build -settings manually as appropriate. +ANTLR uses Swift Package Manager to generate Xcode project files. #### Download source code for ANTLR @@ -135,29 +127,12 @@ The runtime and generated grammar should now build correctly. ### Swift Package Manager Projects -Since we cannot have a separate repository for Swift target (see issue [#1774](https://github.com/antlr/antlr4/issues/1774)), -and Swift is currently not ABI stable. We currently support support SPM-based -projects by creating temporary local repository. +Add Antlr4 as a dependency to your `Package.swift` file. For more information, please see the [Swift Package Manager documentation](https://github.com/apple/swift-package-manager/tree/master/Documentation). -For people using [Swift Package Manager](https://swift.org/package-manager/), -the __boot.py__ script supports generating local repository that can be used -as a dependency to your project. Simply run: +```swift +.package(url: "https://github.com/antlr/antlr4", from: "4.13.2") ``` -python boot.py --gen-spm-module -``` - -The prompt will show something like below: - - - -Put the SPM directive that contains the url to temporary repository to your -project's Package.swift. And run `swift build` in your project. - -The project is generated in your system's `/tmp/` directory, if you find it -inconvenient, consider copy that generated ANTLR repository to some place -that won't be cleaned automatically and update `url` parameter in your -`Package.swift` file. ## Swift access levels diff --git a/doc/target-agnostic-grammars.md b/doc/target-agnostic-grammars.md new file mode 100644 index 0000000000..62ee48231a --- /dev/null +++ b/doc/target-agnostic-grammars.md @@ -0,0 +1,82 @@ +# Writing target-agnostic grammars + +Some grammars require +[semantic predicates](https://github.com/antlr/antlr4/blob/dev/doc/predicates.md) +to add context-sensitive parsing to what would generally be a context-free grammar. + +For example: +* In Fortran90, [lines that begin with a 'C' in column 1 +are comments](https://github.com/antlr/grammars-v4/blob/43fbb16fec1d474d38a603cc6a6bcbe5edf07b1e/fortran/fortran90/slow/hw.f90#L1), +which should be placed on a token stream other than the default. +But, if the 'C' does not begin in +column 1, then the input is invalid and should be flagged as so. + ```fortran + c Hello World. + c This is a syntax error because 'c' does not start in column 1 + program hello + print *, 'Hello World!' + end + ``` + +* In CSharp, two [greater-than signs](https://util.unicode.org/UnicodeJsps/character.jsp?a=003E) +`'>>'` can either mean +[a right shift expression](https://github.com/antlr/grammars-v4/blob/43fbb16fec1d474d38a603cc6a6bcbe5edf07b1e/csharp/examples/AllInOneNoPreprocessor.cs#L657C15-L657C17) +or [part of a type declaration with templates](https://github.com/antlr/grammars-v4/blob/master/csharp/examples/AllInOneNoPreprocessor.cs#L463C33-L463C35). +Since lexers in Antlr are not parser aware, +the lexer must tokenize the two greater-than signs as two separate tokens. +A semantic predicate should be added to disallow a space between the two greater-than signs in the context +of an expression, but allowed in the context of a type declaration. + ```C# + class Foo { + void Func() + { + int x = 1000 > > 2; // syntax error if a space exists in the double greater-than sign + } + Dictionary > mapping; // nested template declaration, valid + } + ``` + +Antlr does not have a general-purpose language for predicates. These must be +written in the target language of the generated parser. The problem is that +a grammar would need to be forked for each target desired, which adds to the +burden of maintenance. + +However, it is possible to write the grammar such that forking is not required, +using _target-agnostic format_. + +## Rules in writing target-agnostic grammars + +1) You will need to [split your grammar](https://github.com/antlr/antlr4/blob/dev/doc/grammars.md#grammar-structure) +into separate lexer and parser grammars. Then, add `options { tokenVocab=...; }` to the parser grammar. +2) Create target-specific source code files that contain methods in a base class for +the parser or lexer grammar. In these source code files, write the code for the semantic +predicate. For example, the files for the Cpp target would be `Python3LexerBase.{cpp,h}`, `Python3ParserBase.{cpp,h}`. +3) In the grammar(s), add `options { superClass=... }`. This will +[superclass the recognizer](https://github.com/antlr/antlr4/blob/dev/doc/options.md#superclass). +For example, `options { superclass=Python3ParserBase; }`. +4) In the grammar(s), write code to make a single +call to the base-class method. The call should have a `this.` string +before the name of the method, e.g., `OPEN_PAREN : '(' {this.openBrace();};` +The action code must not reference Antlr attributes, +variables, types, or have semi-colons as statement separators or +control-flow statements of any kind. +5) For some targets like Cpp and PHP, you may need to add code to include source +code files so that the generated code compiles. +For these, add a comment +such as `// Insert here @header for lexer include.` or `// Insert here @header for parser include.` +to the grammar, before the first rule. +5) Add a Python script called "transformGrammar.py" that rewrites the grammar(s) +with some target-specific code syntax. + a) For Cpp: replace `this.` strings with `this->`. + b) For PHP: replace `this.` strings with `$this->`. + c) For Python: replace `this.` strings with `self.`, `l.`, or `p.` depending on +where the action or predicate is in the grammar. + d) For Cpp: replace `// Insert here @header for lexer include.` (or parser) with +`@header::lexer {#include ...}`. + e) For PHP: replace `// Insert here @header for lexer include.` (or parser) with +`@header::lexer {require ...}`. + e) Run `python transformGrammar.py *.g4` before generating the parser and lexer. + +## Examples of target-agnostic grammars +* [fortran90](https://github.com/antlr/grammars-v4/tree/master/fortran/fortran90) +* [csharp](https://github.com/antlr/grammars-v4/tree/master/csharp) diff --git a/doc/targets.md b/doc/targets.md index f2a63689ae..0c61b6f3a8 100644 --- a/doc/targets.md +++ b/doc/targets.md @@ -4,19 +4,21 @@ This page lists the available and upcoming ANTLR runtimes. Please note that you * [Java](java-target.md). The [ANTLR v4 book](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) has a decent summary of the runtime library. We have added a useful XPath feature since the book was printed that lets you select bits of parse trees. See [Runtime API](http://www.antlr.org/api/Java/index.html) and [Getting Started with ANTLR v4](getting-started.md) * [C#](csharp-target.md) -* [Python](python-target.md) (2 and 3) +* [Python](python-target.md) (3) * [JavaScript](javascript-target.md) +* [TypeScript](typescript-target.md) * [Go](go-target.md) * [C++](cpp-target.md) * [Swift](swift-target.md) * [PHP](php-target.md) +* [Dart](dart-target.md) * [Rust](rust-target.md) (Unstable) ## Target feature parity New features generally appear in the Java target and then migrate to the other targets, but these other targets don't always get updated in the same overall tool release. This section tries to identify features added to Java that have not been added to the other targets. -|Feature|Java|C♯|Python2|Python3|JavaScript|Go|C++|Swift|PHP +|Feature|Java|C♯|Python3|JavaScript|Go|C++|Swift|PHP|Dart |---|---|---|---|---|---|---|---|---|---| |Ambiguous tree construction|4.5.1|-|-|-|-|-|-|-|-| diff --git a/doc/tree-matching.md b/doc/tree-matching.md index f4c6d278cc..b0ec83bb5a 100644 --- a/doc/tree-matching.md +++ b/doc/tree-matching.md @@ -52,15 +52,6 @@ m.setDelimiters("<<", ">>", "$"); // $ is the escape character This would allow pattern `<> = <> ;$<< ick $>>` to be interpreted as elements: `ID`, ` = `, `expr`, and ` ;<< ick >>`. -```java -String xpath = "//blockStatement/*"; -String treePattern = "int = ;"; -ParseTreePattern p = -parser.compileParseTreePattern(treePattern, -JavaParser.RULE_localVariableDeclarationStatement); -List matches = p.findAll(tree, xpath); -``` - ### Pattern labels The tree pattern matcher tracks the nodes in the tree at matches against the tags in a tree pattern. That way we can use the `get()` and `getAll()` methods to retrieve components of the matched subtree. For example, for pattern ``, `get("ID")` returns the node matched for that `ID`. If more than one node matched the specified token or rule tag, only the first match is returned. If there is no node associated with the label, this returns null. diff --git a/doc/typescript-target.md b/doc/typescript-target.md new file mode 100644 index 0000000000..af03575d03 --- /dev/null +++ b/doc/typescript-target.md @@ -0,0 +1,125 @@ +# TypeScript + +Antlr4 TypeScript runtime uses the JavaScript runtime and adds type files to it. +This guarantees the same behaviour and performance across both target languages. +Generated lexers, parsers, listeners and visitors are generated in TypeScript. + +The runtime is built using TypeScript v4.8.3, node 16.17 and webpack 5.66. +It may work with older versions but they have not been tested and they will not be supported. + + +## How to create a TypeScript lexer or parser? + +This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example: + +```bash +$ antlr4 -Dlanguage=TypeScript MyGrammar.g4 +``` + +For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). + +## Where can I get the runtime? + +Once you've generated the lexer and/or parser code, you need to download the runtime from [npm](https://www.npmjs.com/package/antlr4). + +We will not document here how to refer to the runtime from your project, since this would differ a lot depending on your project type and IDE. + +## How do I get the runtime in my browser? + +The runtime is webpacked and sits in the dist folder. A .map file is also provided. + +## How do I run the generated lexer and/or parser? + +Let's suppose that your grammar is named, as above, "MyGrammar". Let's suppose this parser comprises a rule named "MyStartRule". The tool will have generated for you the following files: + +* MyGrammarLexer.ts +* MyGrammarParser.ts +* MyGrammarListener.ts (if you have not activated the -no-listener option) +* MyGrammarVisitor.ts (if you have activated the -visitor option) + +There is no listener or visitor interface generated, instead the generated listener and visitor class methods are implemented using lambdas. + +Now a fully functioning script might look like the following: + +```typescript +import { CharStream, CommonTokenStream } from 'antlr4'; +import MyGrammarLexer from './MyGrammarLexer'; +import MyGrammarParser from './MyGrammarParser'; + +const input = "your text to parse here" +const chars = new CharStream(input); // replace this with a FileStream as required +const lexer = new MyGrammarLexer(chars); +const tokens = new CommonTokenStream(lexer); +const parser = new MyGrammarParser(tokens); +const tree = parser.MyStartRule(); + +``` + +Tha above program will work. But it won't be useful unless you do one of the following: + +* you visit the parse tree using a custom listener +* you visit the parse tree using a custom visitor +* your grammar contains production code (like AntLR3) + +(please note that production code is target specific, so you can't have multi target grammars that include production code) + +## How do I create and run a visitor? + +You need to create a custom visitor and use it to visit the parse tree, as follows: +```typescript + +import { ParserRuleContext } from 'antlr4'; +import MyGrammarVisitor from './MyGrammarVisitor'; + +class CustomVisitor extends MyGrammarVisitor { + + visitChildren(ctx: ParserRuleContext) { + if (!ctx) { + return; + } + if (ctx.children) { + return ctx.children.map(child => { + if (child.children && child.children.length != 0) { + return child.accept(this); + } else { + return child.getText(); + } + }); + } + } +} + +tree.accept(new CustomVisitor()); +```` + +## How do I create and run a custom listener? + +You need to create a custom listener and use it to visit the parse tree, as follows: + +```typescript + +import { ParseTreeWalker } from 'antlr4'; +import MyGrammarListener from './MyGrammarListener'; + +class MyTreeWalker extends MyGrammarListener { + + exitMyStartRule = (ctx: MyStartRuleContext) => { + console.log("In MyStartRule"); + }; + +} + +const walker = new MyTreeWalker(); +ParseTreeWalker.DEFAULT.walk(walker, tree); + +``` + +## How do I integrate my parser with ACE editor? + +This specific task is described in this [dedicated page](ace-javascript-target.md). + +## How can I learn more about ANTLR? + +Further information can be found from "The definitive ANTLR 4 reference" book. + +The TypeScript implementation of ANTLR is based on the JavaScript implementation, which is as close as possible to the Java one, so you shouldn't find it difficult to adapt the book's examples to TypeScript. diff --git a/doc/wildcard.md b/doc/wildcard.md index f3d1c3a0b5..a597c649c9 100644 --- a/doc/wildcard.md +++ b/doc/wildcard.md @@ -61,7 +61,7 @@ END : '>>' ;

After crossing through a nongreedy subrule within a lexical rule, all decision-making from then on is "first match wins."

-For example, literal `ab` in rule right-hand side (grammar fragment) `.*? (’a’|’ab’)` is dead code and can never be matched. If the input is ab, the first alternative, ’a’, matches the first character and therefore succeeds. (’a’|’ab’) by itself on the right-hand side of a rule properly matches the second alternative for input ab. This quirk arises from a nongreedy design decision that’s too complicated to go into here.

+For example, literal `ab` in rule right-hand side (grammar fragment) `.*? ('a'|'ab')` is dead code and can never be matched. If the input is ab, the first alternative, 'a', matches the first character and therefore succeeds. ('a'|'ab') by itself on the right-hand side of a rule properly matches the second alternative for input ab. This quirk arises from a nongreedy design decision that’s too complicated to go into here.

  • @@ -74,7 +74,7 @@ ACTION3 : '<' ( STRING | ~[">] )* '>' ; // Doesn't allow <"foo>; greedy * STRING : '"' ( '\\"' | . )*? '"' ; ``` -Rule `ACTION1` allows unterminated strings, such as `"foo`, because input `"foo` matches to the wildcard part of the loop. It doesn’t have to go into rule `STRING` to match a quote. To fix that, rule `ACTION2` uses `~’"’` to match any character but the quote. Expression `~’"’` is still ambiguous with the `’]’` that ends the rule, but the fact that the subrule is nongreedy means that the lexer will exit the loop upon a right square bracket. To avoid a nongreedy subrule, make the alternatives explicit. Expression `~[">]` matches anything but the quote and right angle bracket. Here’s a sample run: +Rule `ACTION1` allows unterminated strings, such as `"foo`, because input `"foo` matches to the wildcard part of the loop. It doesn’t have to go into rule `STRING` to match a quote. To fix that, rule `ACTION2` uses `~'"'` to match any character but the quote. Expression `~'"'` is still ambiguous with the `']'` that ends the rule, but the fact that the subrule is nongreedy means that the lexer will exit the loop upon a right square bracket. To avoid a nongreedy subrule, make the alternatives explicit. Expression `~[">]` matches anything but the quote and right angle bracket. Here’s a sample run: ```bash $ antlr4 Actions.g4 @@ -225,4 +225,4 @@ ANY : . ; You get: - \ No newline at end of file + diff --git a/docker/.dockerignore b/docker/.dockerignore new file mode 100644 index 0000000000..1455bc2c3d --- /dev/null +++ b/docker/.dockerignore @@ -0,0 +1,7 @@ +tool-testsuite +runtime-testsuite +.git* +README.adoc +README.md +cache +tests diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000000..c5c2a9e655 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,21 @@ +FROM eclipse-temurin:21 AS builder + +WORKDIR /opt/antlr4 + +ARG ANTLR_VERSION="4.13.2" +ARG MAVEN_OPTS="-Xmx1G" + + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install maven git -y \ + && git clone https://github.com/antlr/antlr4.git \ + && cd antlr4 \ + && git checkout $ANTLR_VERSION \ + && mvn clean --projects tool --also-make \ + && mvn -DskipTests install --projects tool --also-make \ + && mv ./tool/target/antlr4-*-complete.jar antlr4-tool.jar + +FROM eclipse-temurin:21-jre + +COPY --from=builder /opt/antlr4/antlr4/antlr4-tool.jar /usr/local/lib/ +WORKDIR /work +ENTRYPOINT ["java", "-Xmx500M", "-cp", "/usr/local/lib/antlr4-tool.jar", "org.antlr.v4.Tool"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000..b1d9e77ce2 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,34 @@ +# Docker Image for ANTLR4 + +This Docker image wraps current version of **ANTLR4** inclusive **Java runtime environment** so it can be executed as transparent command line tool even on machines without installed Java. + +## Docker Image + +The image uses the official [eclipse-temurin:11](https://hub.docker.com/_/eclipse-temurin/tags?page=1&name=11&ordering=-name) image +for building a distribution of ANTLR4 and [eclipse-temurin:11-jre](https://hub.docker.com/_/eclipse-temurin/tags?page=1&name=11-jre&ordering=-name) for runtime. + +## Build + +You can build docker image from source code locally. + + git clone https://github.com/antlr/antlr4.git + cd antlr4/docker + docker build -t antlr/antlr4 --platform linux/amd64 . + + +## Run + +For security reasons is **ANTLR4 Docker image** designed to run in the current folder only, so a container doesn't have any access to any other folders on a host system. Since this is a transparent call of Docker image from command line, where new files are generated, it is also a good idea to execute code inside a Docker as a non root user and match it to the host caller. + +Calling a dockerized ANTLR4 image can look like this: + +```shell +wget https://raw.githubusercontent.com/antlr/grammars-v4/master/json/JSON.g4 +docker run --rm -u $(id -u ${USER}):$(id -g ${USER}) -v `pwd`:/work antlr/antlr4 -Dlanguage=Go JSON.g4 +``` + +## Integration as alias + + alias antlr4='docker run -it -u $(id -u ${USER}):$(id -g ${USER}) -v $(pwd):/work antlr/antlr4 $@' + + diff --git a/historical-contributors-agreement.txt b/historical-contributors-agreement.txt new file mode 100644 index 0000000000..a7d5ca8f84 --- /dev/null +++ b/historical-contributors-agreement.txt @@ -0,0 +1,335 @@ +ANTLR Project Contributors Certification of Origin and Rights + +All contributors to ANTLR v4 must formally agree to abide by this +certificate of origin by signing on the bottom with their github +userid, full name, email address (you can obscure your e-mail, but it +must be computable by human), and date. + +By signing this agreement, you are warranting and representing that +you have the right to release code contributions or other content free +of any obligations to third parties and are granting Terence Parr and +ANTLR project contributors, henceforth referred to as The ANTLR +Project, a license to incorporate it into The ANTLR Project tools +(such as ANTLRWorks and StringTemplate) or related works under the BSD +license. You understand that The ANTLR Project may or may not +incorporate your contribution and you warrant and represent the +following: + +1. I am the creator of all my contributions. I am the author of all + contributed work submitted and further warrant and represent that + such work is my original creation and I have the right to license + it to The ANTLR Project for release under the 3-clause BSD + license. I hereby grant The ANTLR Project a nonexclusive, + irrevocable, royalty-free, worldwide license to reproduce, + distribute, prepare derivative works, and otherwise use this + contribution as part of the ANTLR project, associated + documentation, books, and tools at no cost to The ANTLR Project. + +2. I have the right to submit. This submission does not violate the + rights of any person or entity and that I have legal authority over + this submission and to make this certification. + +3. If I violate another's rights, liability lies with me. I agree to + defend, indemnify, and hold The ANTLR Project and ANTLR users + harmless from any claim or demand, including reasonable attorney + fees, made by any third party due to or arising out of my violation + of these terms and conditions or my violation of the rights of + another person or entity. + +4. I understand and agree that this project and the contribution are + public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license indicated in the file. + +I have read this agreement and do so certify by adding my signoff to +the end of the following contributors list. + +CONTRIBUTORS: + +YYYY/MM/DD, github id, Full name, email +2012/07/12, parrt, Terence Parr, parrt@antlr.org +2012/09/18, sharwell, Sam Harwell, sam@tunnelvisionlabs.com +2012/10/10, stephengaito, Stephen Gaito, stephen@percepitsys.co.uk +2012/11/23, maguro, Alan Cabrera, adc@toolazydogs.com +2013/01/29, metadave, Dave Parfitt, diparfitt@gmail.com +2013/03/06, bkiers, Bart Kiers, bkiers@gmail.com +2013/08/20, cayhorstmann, Cay Horstmann, cay@horstmann.com +2014/03/18, aphyr, Kyle Kingsbury, aphyr@aphyr.com +2014/06/07, ericvergnaud, Eric Vergnaud, eric.vergnaud@wanadoo.fr +2014/07/04, jimidle, Jim Idle, jimi@Idle.ws +2014/01/01, danmclaughlin, Dan McLaughlin, dan.mclaughlin@gmail.com +2014/09/04. jeduden, Jan-Eric Duden, jeduden@gmail.com +2014/09/27, petrbel, Petr Bělohlávek, antlr@petrbel.cz +2014/10/18, sergiusignacius, Sérgio Silva, serge.a.silva@gmail.com +2014/10/26, bdkearns, Brian Kearns, bdkearns@gmail.com +2014/10/27, michaelpj, Michael Peyton Jones, michaelpj@gmail.com +2015/01/29, TomLottermann, Thomas Lottermann, tomlottermann@gmail.com +2015/02/15, pavlo, Pavlo Lysov, pavlikus@gmail.com +2015/03/07, RedTailedHawk, Lawrence Parker, larry@answerrocket.com +2015/04/03, rljacobson, Robert Jacobson, rljacobson@gmail.com +2015/04/06, ojakubcik, Ondrej Jakubcik, ojakubcik@gmail.com +2015/04/29, jszheng, Jinshan Zheng, zheng_js@hotmail.com +2015/05/08, ViceIce, Michael Kriese, michael.kriese@gmx.de +2015/05/09, lkraz, Luke Krasnoff, luke.krasnoff@gmail.com +2015/05/12, Pursuit92, Josh Chase, jcjoshuachase@gmail.com +2015/05/20, peturingi, Pétur Ingi Egilsson, petur@petur.eu +2015/05/27, jcbrinfo, Jean-Christophe Beaupré, jcbrinfo@users.noreply.github.com +2015/06/29, jvanzyl, Jason van Zyl, jason@takari.io +2015/08/18, krzkaczor, Krzysztof Kaczor, krzysztof@kaczor.io +2015/09/18, worsht, Rajiv Subrahmanyam, rajiv.public@gmail.com +2015/09/24, HSorensen, Henrik Sorensen, henrik.b.sorensen@gmail.com +2015/10/06, brwml, Bryan Wilhelm, bryan.wilhelm@microsoft.com +2015/10/08, fedotovalex, Alex Fedotov, me@alexfedotov.com +2015/10/12, KvanTTT, Ivan Kochurkin, ivan.kochurkin@gmail.com +2015/10/21, martin-probst, Martin Probst, martin-probst@web.de +2015/10/21, hkff, Walid Benghabrit, walid.benghabrit@mines-nantes.fr +2015/11/12, cooperra, Robbie Cooper, cooperra@users.noreply.github.com +2015/11/25, abego, Udo Borkowski, ub@abego.org +2015/12/17, sebadur, Sebastian Badur, sebadur@users.noreply.github.com +2015/12/23, pboyer, Peter Boyer, peter.b.boyer@gmail.com +2015/12/24, dtymon, David Tymon, david.tymon@gmail.com +2016/02/18, reitzig, Raphael Reitzig, reitzig[at]cs.uni-kl.de +2016/03/10, mike-lischke, Mike Lischke, mike@lischke-online.de +2016/03/27, beardlybread, Bradley Steinbacher, bradley.j.steinbacher@gmail.com +2016/03/29, msteiger, Martin Steiger, antlr@martin-steiger.de +2016/03/28, gagern, Martin von Gagern, gagern@ma.tum.de +2016/07/10, twz123, Tom Wieczorek, tom.wieczorek@zalando.de +2016/07/20, chrisheller, Chris Heller, chris.heller.greyheller@gmail.com +2016/07/20, nburles, Nathan Burles, nburles@gmail.com +2016/07/20, kosl90, Li Liqiang, kos1990l@gmail.com +2016/07/27, timoc, Tim O'Callaghan, timo@linux.com +2016/07/26, nic30, Michal Orsák, michal.o.socials@gmail.com +2016/07/18, willfaught, Will Faught, will.faught@gmail.com +2016/08/08, wjkohnen, Wolfgang Johannes Kohnen, wjkohnen-go-antlr@ko-sys.com +2016/08/11, BurtHarris, Ralph "Burt" Harris, Burt_Harris_antlr4@azxs.33mail.com +2016/08/19, andjo403, Andreas Jonson, andjo403@hotmail.com +2016/09/27, harriman, Kurt Harriman, harriman@acm.org +2016/10/13, cgudrian, Christian Gudrian, christian.gudrian@gmx.de +2016/10/13, nielsbasjes, Niels Basjes, niels@basjes.nl +2016/10/21, FloorGoddijn, Floor Goddijn, floor.goddijn[at]aimms.com +2016/11/01, RYDB3RG, Kai Stammerjohann, RYDB3RG@users.noreply.github.com +2016/11/05, runner-mei, meifakun, runner.mei@gmail.com +2016/11/15, hanjoes, Hanzhou Shi, hanzhou87@gmail.com +2016/11/16, sridharxp, Sridharan S, aurosridhar@gmail.com +2016/11/06, NoodleOfDeath, Thom Morgan, github@bytemeapp.com +2016/11/01, sebkur, Sebastian Kürten, sebastian@topobyte.de +2016/04/13, renatahodovan, Renata Hodovan, reni@inf.u-szeged.hu +2016/11/05, ewanmellor, Ewan Mellor, github@ewanmellor.org +2016/11/06, janyou, Janyou, janyou.antlr@outlook.com +2016/11/20, marcohu, Marco Hunsicker, antlr@hunsicker.de +2016/09/02, lygav, Vladimir (Vladi) Lyga, lyvladi@gmail.com +2016/09/23, ghosthope, Dmitry Shakhtanov, sudstrike@gmail.com +2016/11/25, MrSampson, Oliver Sampson, olsam@quickaudio.com +2016/11/29, millergarym, Gary Miller, miller.garym@gmail.com +2016/11/29, wxio, Gary Miller, gm@wx.io +2016/11/29, Naios, Denis Blank, naios@users.noreply.github.com +2016/12/01, samtatasurya, Samuel Tatasurya, xemradiant@gmail.com +2016/12/03, redxdev, Samuel Bloomberg, sam@redxdev.com +2016/12/11, Gaulouis, Gaulouis, gaulouis.com@gmail.com +2016/12/22, akosthekiss, Akos Kiss, akiss@inf.u-szeged.hu +2016/12/24, adrpo, Adrian Pop, adrian.pop@liu.se +2017/01/11, robertbrignull, Robert Brignull, robertbrignull@gmail.com +2017/01/13, marcelo-rocha, Marcelo Rocha, mcrocha@gmail.com +2017/01/23, bhamiltoncx, Ben Hamilton, bhamiltoncx+antlr@gmail.com +2017/01/18, mshockwave, Bekket McClane, yihshyng223@gmail.com +2017/02/10, lionelplessis, Lionel Plessis, lionelplessis@users.noreply.github.com +2017/02/14, lecode-official, David Neumann, david.neumann@lecode.de +2017/02/14, xied75, Dong Xie, xied75@gmail.com +2017/02/20, Thomasb81, Thomas Burg, thomasb81@gmail.com +2017/02/26, jvasileff, John Vasileff, john@vasileff.com +2017/03/08, harry-tallbelt, Igor Vysokopoyasny, harry.tallbelt@gmail.com +2017/03/09, teverett, Tom Everett, tom@khubla.com +2017/03/03, chund, Christian Hund, christian.hund@gmail.com +2017/03/15, robertvanderhulst, Robert van der Hulst, robert@xsharp.eu +2017/03/28, cmd-johnson, Jonas Auer, jonas.auer.94@gmail.com +2017/04/12, lys0716, Yishuang Lu, luyscmu@gmail.com +2017/04/30, shravanrn, Shravan Narayan, shravanrn@gmail.com +2017/05/11, jimallman, Jim Allman, jim@ibang.com +2017/05/26, waf, Will Fuqua, wafuqua@gmail.com +2017/05/29, kosak, Corey Kosak, kosak@kosak.com +2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net +2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com +2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me +2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com +2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in +2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com +2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com +2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com +2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com +2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com +2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com +2017/08/20, tiagomazzutti, Tiago Mazzutti, tiagomzt@gmail.com +2017/08/20, milanaleksic, Milan Aleksic, milanaleksic@gmail.com +2017/08/29, Eddy Reyes, eddy@mindsight.io +2017/09/09, brauliobz, Bráulio Bezerra, brauliobezerra@gmail.com +2017/09/11, sachinjain024, Sachin Jain, sachinjain024@gmail.com +2017/09/25, kaedvann, Rostislav Listerenko, r.listerenko@gmail.com +2017/10/06, bramp, Andrew Brampton, brampton@gmail.com +2017/10/15, simkimsia, Sim Kim Sia, kimcity@gmail.com +2017/10/27, Griffon26, Maurice van der Pot, griffon26@kfk4ever.com +2017/05/29, rlfnb, Ralf Neeb, rlfnb@rlfnb.de +2017/10/29, gendalph, Максим Прохоренко, Maxim\dotProhorenko@gm@il.com +2017/11/02, jasonmoo, Jason Mooberry, jason.mooberry@gmail.com +2017/11/05, ajaypanyala, Ajay Panyala, ajay.panyala@gmail.com +2017/11/24, zqlu.cn, Zhiqiang Lu, zqlu.cn@gmail.com +2017/11/28, niccroad, Nicolas Croad, nic.croad@gmail.com +2017/12/01, DavidMoraisFerreira, David Morais Ferreira, david.moraisferreira@gmail.com +2017/12/01, SebastianLng, Sebastian Lang, sebastian.lang@outlook.com +2017/12/03, oranoran, Oran Epelbaum, oran / epelbaum me +2017/12/12, janlinde, Jan Lindemann, jan@janware.com +2017/12/13, enessoylu, Enes Soylu, enessoylutr@gmail.com +2017/12/20, kbsletten, Kyle Sletten, kbsletten@gmail.com +2017/12/27, jkmar, Jakub Marciniszyn, marciniszyn.jk@gmail.com +2018/03/08, dannoc, Daniel Clifford, danno@google.com +2018/03/10, uvguy, kangjoni76@gmail.com +2018/01/06, kasbah, Kaspar Emanuel, kaspar@monostable.co.uk +2018/01/15, xgcssch, Sönke Schau, xgcssch@users.noreply.github.com +2018/02/08, razfriman, Raz Friman, raz@razfriman.com +2018/02/11, io7m, Mark Raynsford, code@io7m.com +2018/04/24, solussd, Joe Smith, joe@uwcreations.com +2018/15/05, johnvanderholt, jan dillingh johnvanderholte@gmail.com +2018/06/14, scadgek, Sergey Chupov, scadgek@live.com +2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com +2018/06/27, wu-sheng, Wu Sheng, wu.sheng@foxmail.com +2018/02/25, chaseoxide, Marcus Ong, taccs97[at]gmail[dot]com +2018/05/15, johnvanderholt, jan dillingh johnvanderholte@gmail.com +2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com +2018/05/15, johnvanderholt, jan dillingh johnvanderholte@gmail.com +2018/05/17, sinopsysHK, Eric Bardes, sinofwd@gmail.com +2018/05/23, srvance, Stephen Vance, steve@vance.com +2018/06/14, alecont, Alessandro Contenti, alecontenti@hotmail.com +2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com +2018/07/03, jgoppert, James Goppert, james.goppert@gmail.com +2018/07/27, Maksim Novikov, mnovikov.work@gmail.com +2018/08/03, ENDOH takanao, djmchl@gmail.com +2018/10/08, xsIceman, Andreas Skaar, andreas.skaar@gmail.com +2018/10/18, edirgarcia, Edir García Lazo, edirgl@hotmail.com +2018/07/31, Lucas Henrqiue, lucashenrique580@gmail.com +2018/08/03, ENDOH takanao, djmchl@gmail.com +2018/10/29, chrisaycock, Christopher Aycock, chris[at]chrisaycock[dot]com +2018/11/12, vinoski, Steve Vinoski, vinoski@ieee.org +2018/11/14, nxtstep, Adriaan (Arjan) Duz, codewithadriaan[et]gmail[dot]com +2018/11/15, amykyta3, Alex Mykyta, amykyta3@users.noreply.github.com +2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com +2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com +2018/12/23, youkaichao, Kaichao You, youkaichao@gmail.com +2019/01/01, khoroshilov, Alexey Khoroshilov, khoroshilov@ispras.ru +2019/01/02, wkhemir, Wail Khemir, khemirwail@gmail.com +2019/01/16, kuegi, Markus Zancolo, markus.zancolo@roomle.com +2019/01/29, hc-codersatlas, Harry Chan, harry.chan@codersatlas.com +2019/02/06, ralucado, Cristina Raluca Vijulie, ralucris.v[at]gmail[dot]com +2019/02/23, gedimitr, Gerasimos Dimitriadis, gedimitr@gmail.com +2019/03/13, base698, Justin Thomas, justin.thomas1@gmail.com +2019/03/18, carlodri, Carlo Dri, carlo.dri@gmail.com +2019/05/02, askingalot, Andy Collins, askingalot@gmail.com +2019/05/13, mapio, Massimo Santini, massimo.santini@gmail.com +2019/07/11, olowo726, Olof Wolgast, olof@baah.se +2019/07/16, abhijithneilabraham, Abhijith Neil Abraham, abhijithneilabrahampk@gmail.com +2019/07/26, Braavos96, Eric Hettiaratchi, erichettiaratchi@gmail.com +2019/08/02, thron7, Thomas Herchenroeder, thron7[at]users[dot]sourceforge[dot]net +2019/08/12, easonlin404, Eason Lin, easonlin404@gmail.com +2019/08/23, akaJes, Oleksandr Mamchyts, akaJes@gmail.com +2019/08/27, wurzelpeter, Markus Franke, markus[hyphen]franke[at]web[dot]de +2019/09/10, ImanHosseini, Iman Hosseini, hosseini.iman@yahoo.com +2019/09/03, João Henrique, johnnyonflame@hotmail.com +2019/09/10, neko1235, Ihar Mokharau, igor.mohorev@gmail.com +2019/09/10, yar3333, Yaroslav Sivakov, yar3333@gmail.com +2019/09/10, marcospassos, Marcos Passos, marcospassos.com@gmail.com +2019/09/10, amorimjuliana, Juliana Amorim, juu.amorim@gmail.com +2019/09/15, sullis, Sean Sullivan, github@seansullivan.com +2019/09/17, kaz, Kazuki Sawada, kazuki@6715.jp +2019/09/28, lmy269, Mingyang Liu, lmy040758@gmail.com +2019/10/29, tehbone, Tabari Alexander, tehbone@gmail.com +2019/10/31, a-square, Alexei Averchenko, lex.aver@gmail.com +2019/11/05, listba, Ben List, ben.list89@gmail.com +2019/11/11, foxeverl, Liu Xinfeng, liuxf1986[at]gmail[dot]com +2019/11/17, felixn, Felix Nieuwenhuizhen, felix@tdlrali.com +2019/11/18, mlilback, Mark Lilback, mark@lilback.com +2020/01/19, lingyv-li, Lingyu Li, lingyv.li@gmail.com +2020/02/02, carocad, Camilo Roca, carocad@unal.edu.co +2020/02/10, rrevenantt, Konstantin Anisimov, rrevenantt[at]gmail.com +2020/02/10, julibert, Julián Bermúdez Ortega, julibert.dev@gmail.com +2020/02/17, quantumsheep, Nathanael Demacon, nathanael.dmc@outlook.fr +2020/02/21, StochasticTinkr, Daniel Pitts, github@coloraura.com +2020/03/17, XsongyangX, Song Yang, songyang1218@gmail.com +2020/04/07, deniskyashif, Denis Kyashif, denis.kyashif@gmail.com +2020/04/08, lwehmeier, Leon Wehmeier, wehmeier@st.ovgu.de +2020/04/10, agrabski, Adam Grabski, adam.gr@outlook.com +2020/04/23, martinvw, Martin van Wingerden, martin@martinvw.nl +2020/04/23, kaczmarj, Jakub Kaczmarzyk, jakub.kaczmarzyk@stonybrookmedicine.edu +2020/04/30, TristonianJones, Tristan Swadell, tswadell@google.com +2020/05/06, iammosespaulr, Moses Paul R, iammosespaulr@gmail.com +2020/05/10, gomerser, Erik Gomersbach, gomerser@gomersba.ch +2020/05/22, keywan-ghadami-oxid, Keywan Ghadami, keywan.ghadami@oxid-esales.com +2020/05/25, graknol, Sindre van der Linden, graknol@gmail.com +2020/05/31, d-markey, David Markey, dmarkey@free.fr +2020/06/02, cohomology, Kilian Kilger, kkilger AT gmail.com +2020/06/04, IohannRabeson, Iohann Rabeson, iotaka6@gmail.com +2020/06/04, sigmasoldi3r, Pablo Blanco, pablobc.1995@gmail.com +2020/06/15, mattpaletta, Matthew Paletta, mattpaletta@gmail.com +2020/07/01, sha-N, Shan M Mathews, admin@bluestarqatar.com +2020/08/22, stevenjohnstone, Steven Johnstone, steven.james.johnstone@gmail.com +2020/09/06, ArthurSonzogni, Sonzogni Arthur, arthursonzogni@gmail.com +2020/09/10, Khailian, Arunav Sanyal, arunav.sanyal91@gmail.com +2020/09/12, Clcanny, Charles Ruan, a837940593@gmail.com +2020/09/15, rmcgregor1990, Robert McGregor, rmcgregor1990@gmail.com +2020/09/16, trenki2, Markus Trenkwalder, trenki2[at]gmx[dot]net +2020/10/08, Marti2203, Martin Mirchev, mirchevmartin2203@gmail.com +2020/10/16, adarshbhat, Adarsh Bhat, adarshbhat@users.noreply.github.com +2020/10/20, adamwojs, Adam Wójs, adam[at]wojs.pl +2020/10/24, cliid, Jiwu Jang, jiwujang@naver.com +2020/11/05, MichelHartmann, Michel Hartmann, MichelHartmann@users.noreply.github.com +2020/11/26, mr-c, Michael R. Crusoe, 1330696+mr-c@users.noreply.github.com +2020/12/01, maxence-lefebvre, Maxence Lefebvre, maxence-lefebvre@users.noreply.github.com +2020/12/03, electrum, David Phillips, david@acz.org +2021/01/03, niw, Yoshimasa Niwa, niw@niw.at +2021/01/25, l215884529, Qiheng Liu, 13607681+l215884529@users.noreply.github.com +2021/02/02, tsotnikov, Taras Sotnikov, taras.sotnikov@gmail.com +2021/02/10, jirislaby, Jiri Slaby, jirislaby@gmail.com +2021/02/21, namasikanam, Xingyu Xie, namasikanam@gmail.com +2021/02/26, ahooper, Andrew Hooper, ahooper at kos dot net +2021/02/27, khmarbaise, Karl Heinz Marbaise, github@soebes.com +2021/02/28, Dante-Broggi, Dante Broggi, 34220985+Dante-Broggi@users.noreply.github.com +2021/03/02, hackeris +2021/03/03, xTachyon, Damian Andrei, xTachyon@users.noreply.github.com +2021/03/22, 100mango, Fangqiu Fang, 100mango@gmail.com +2021/04/07, b1f6c1c4, Jinzheng Tu, b1f6c1c4@gmail.com +2021/04/17, jaggerjo, Josua Jäger, mail@jaggerjo.com +2021/04/24, bigerl, Alexander Bigerl, alexander [äät] bigerl [pkt] eu +2021/05/02, michalharakal, Michal Harakal, michal.harakal@users.noreply.github.com +2021/05/03, redexp, Sergii Kliuchnyk, redexp@users.noreply.github.com +2021/05/03, mitar, Mi Tar, mitar.git@tnode.com +2021/05/04, joakker, Joaquín León, joaquinandresleon108@gmail.com +2021/05/06, renancaraujo, Renan C. Araújo, renancaraujo@users.noreply.github.com +2021/05/06, canastro, Ricardo Canastro, ricardocanastro@users.noreply.github.com +2021/06/19, abe149, Abe Skolnik, abe 149 at gmail . com +2021/07/01, appel1, Johan Appelgren, johan.appelgren@gmail.com +2021/07/01, marcauberer, Marc Auberer, marc.auberer@chillibits.com +2021/07/14, renzhentaxibaerde, Renzhentaxi Baerde, renzhentaxibaerde@gmail.com +2021/07/21, skittlepox, Benjamin Spiegel, bspiegel@cs.brown.edu +2021/07/29, ksyx, Qixing Xue, qixingxue@outlook.com +2021/07/29, rachidlamouri, Rachid Lamouri, rachidlamouri@gmail.com +2021/08/02, minjoosur, Minjoo Sur, msur@salesforce.com +2021/08/05, jjeffcaii, Jeff Tsai, caiweiwei.cww@alibaba-inc.com +2021/08/08, ansiemens, Yi-Hong Lin, ansiemens@gmail.com +2021/08/25, XenoAmess, Jin Xu, xenoamess@gmail.com +2021/09/08, jmcken8, Joel McKenzie, joel.b.mckenzie@gmail.com +2021/09/23, skalt, Steven Kalt, kalt.steven@gmail.com +2021/09/26, idraper, Isaac Draper, idraper@byu.edu +2021/10/10, tools4origins, Erwan Guyomarc'h, contact@erwan-guyomarch.fr +2021/10/19, jcking, Justin King, jcking@google.com +2021/10/31, skef, Skef Iterum, github@skef.org +2021/10/31, hlstwizard, h.l.s.t@163.com +2021/11/30, bollwyvl, Nick Bollweg, bollwyvl@users.noreply.github.com +2021/12/03, eneko, Eneko Alonso, eneko.alonso@gmail.com +2021/12/16, Ketler13, Oleksandr Martyshchenko, oleksandr.martyshchenko@gmail.com +2021/12/25, Tinker1024, Tinker1024, tinker@huawei.com +2021/12/31, Biswa96, Biswapriyo Nath, nathbappai@gmail.com +2022/03/07, chenquan, chenquan, chenquan.dev@gmail.com +2022/03/15, hzeller, Henner Zeller, h.zeller@acm.org +2025/05/28, alexsnaps, Alex Snaps, alex@wcgw.dev +2025/07/09, cyqw, Chen Yong, cyqw@163.com + +2025/09/12, torbensen, Torben Magne, torbenmagne@gmail.com diff --git a/pom.xml b/pom.xml index 7cf5813dee..f85f7af2a7 100644 --- a/pom.xml +++ b/pom.xml @@ -13,22 +13,26 @@ org.antlr antlr4-master - 4.8-2-SNAPSHOT + 4.13.3-SNAPSHOT pom ANTLR 4 ANTLR 4 Master Build POM - http://www.antlr.org + https://www.antlr.org/ 1992 ANTLR - http://www.antlr.org + https://www.antlr.org/ - + + 3.8 + + + - The BSD License - http://www.antlr.org/license.html + BSD-3-Clause + https://www.antlr.org/license.html repo @@ -36,7 +40,7 @@ Terence Parr - http://parrt.cs.usfca.edu + https://github.com/parrt Project lead - ANTLR @@ -63,9 +67,10 @@ Jim Idle jimi@idle.ws - http://www.linkedin.com/in/jimidle + https://www.linkedin.com/in/jimidle/ Developer - Maven Plugin + Developer - Go runtime @@ -87,17 +92,16 @@ tool antlr4-maven-plugin tool-testsuite - runtime-testsuite/annotations - runtime-testsuite/processors runtime-testsuite UTF-8 UTF-8 + 1722710576 true - 1.8 - 1.8 + 11 + 11 @@ -113,10 +117,10 @@ - https://github.com/antlr/antlr4/tree/master + https://github.com/antlr/antlr4/tree/master scm:git:git://github.com/antlr/antlr4.git scm:git:git@github.com:antlr/antlr4.git - HEAD + 4.13.0 @@ -133,7 +137,7 @@ maven-clean-plugin - 3.0.0 + 3.1.0 @@ -151,7 +155,7 @@ org.apache.maven.plugins maven-compiler-plugin - 3.6.0 + 3.8.1 ${maven.compiler.source} ${maven.compiler.target} @@ -161,6 +165,16 @@ org.apache.maven.plugins maven-javadoc-plugin + + org.apache.maven.plugins + maven-jar-plugin + 3.2.2 + + + org.apache.maven.plugins + maven-release-plugin + 3.0.0-M6 + diff --git a/runtime-testsuite/annotations/pom.xml b/runtime-testsuite/annotations/pom.xml deleted file mode 100644 index 97bf786495..0000000000 --- a/runtime-testsuite/annotations/pom.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - 4.0.0 - - org.antlr - antlr4-master - 4.8-2-SNAPSHOT - ../../pom.xml - - antlr4-runtime-test-annotations - ANTLR 4 Runtime Test Annotations - The ANTLR 4 Runtime - - - src - - - org.apache.felix - maven-bundle-plugin - 2.5.4 - - - bundle-manifest - process-classes - - - org.antlr.antlr4-runtime-osgi - ANTLR 4 Runtime - ANTLR - org.antlr - ${project.version} - - - - manifest - - - - - - maven-jar-plugin - 2.4 - - - - diff --git a/runtime-testsuite/annotations/src/org/antlr/v4/test/runtime/CommentHasStringValue.java b/runtime-testsuite/annotations/src/org/antlr/v4/test/runtime/CommentHasStringValue.java deleted file mode 100644 index 8b48eb1569..0000000000 --- a/runtime-testsuite/annotations/src/org/antlr/v4/test/runtime/CommentHasStringValue.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** This is just a tag that indicates the javadoc comment has a multi-line string */ -@Retention(RetentionPolicy.SOURCE) -@Target({ElementType.FIELD, ElementType.METHOD}) -@Inherited -public @interface CommentHasStringValue { -} diff --git a/runtime-testsuite/pom.xml b/runtime-testsuite/pom.xml index ff068e0a3e..d2dfc73189 100644 --- a/runtime-testsuite/pom.xml +++ b/runtime-testsuite/pom.xml @@ -10,23 +10,27 @@ org.antlr antlr4-master - 4.8-2-SNAPSHOT + 4.13.3-SNAPSHOT antlr4-runtime-testsuite - ANTLR 4 Runtime Tests (2nd generation) + ANTLR 4 Runtime Tests (4th generation) A collection of tests for ANTLR 4 Runtime libraries. - 3.0 + 3.8 2009 + + 5.9.0 + + org.antlr ST4 - 4.3 + 4.3.4 test @@ -42,89 +46,46 @@ test - org.antlr - antlr4-runtime-test-annotations - ${project.version} - test - - - org.antlr - antlr4-runtime-test-annotation-processors - ${project.version} - test - - - junit - junit - 4.12 + org.junit.jupiter + junit-jupiter-api + ${jUnitVersion} test - org.seleniumhq.selenium - selenium-java - 2.46.0 + org.junit.jupiter + junit-jupiter-engine + ${jUnitVersion} test - - org.eclipse.jetty - jetty-server - - 9.4.19.v20190610 - test - org.glassfish javax.json - 1.0.4 + 1.1.4 test org.openjdk.jol jol-core - 0.8 + 0.16 test - - - resources - - - ../runtime - - **/.build/** - **/target/** - Swift/*.xcodeproj/** - - - org.apache.maven.plugins maven-surefire-plugin - 2.19.1 + 2.22.0 -Dfile.encoding=UTF-8 - - **/csharp/Test*.java - **/java/Test*.java - **/rust/Test*.java - **/go/Test*.java - **/javascript/node/Test*.java - **/python2/Test*.java - **/python3/Test*.java - **/php/Test*.java - ${antlr.tests.swift} - - + org.apache.maven.plugins maven-jar-plugin - 2.4 + 3.2.0 @@ -151,20 +112,15 @@ - + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 9 + 9 + + + - - - - includeSwiftTests - - - mac - - - - **/swift/Test*.java - - - diff --git a/runtime-testsuite/processors/pom.xml b/runtime-testsuite/processors/pom.xml deleted file mode 100644 index c417c626d1..0000000000 --- a/runtime-testsuite/processors/pom.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - 4.0.0 - - org.antlr - antlr4-master - 4.8-2-SNAPSHOT - ../../pom.xml - - antlr4-runtime-test-annotation-processors - ANTLR 4 Runtime Test Processors - The ANTLR 4 Runtime - - - - com.github.olivergondza - maven-jdk-tools-wrapper - 0.1 - - - org.antlr - antlr4-runtime-test-annotations - ${project.version} - - - - - src - - - resources - - - - - org.apache.maven.plugins - maven-compiler-plugin - - true - ${maven.compiler.source} - ${maven.compiler.target} - - -proc:none - - - - - - diff --git a/runtime-testsuite/processors/resources/META-INF/services/javax.annotation.processing.Processor b/runtime-testsuite/processors/resources/META-INF/services/javax.annotation.processing.Processor deleted file mode 100644 index 95b9a3a03a..0000000000 --- a/runtime-testsuite/processors/resources/META-INF/services/javax.annotation.processing.Processor +++ /dev/null @@ -1,7 +0,0 @@ -# -# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -# Use of this file is governed by the BSD 3-clause license that -# can be found in the LICENSE.txt file in the project root. -# - -org.antlr.v4.test.runtime.CommentHasStringValueProcessor diff --git a/runtime-testsuite/processors/src/org/antlr/v4/test/runtime/CommentHasStringValueProcessor.java b/runtime-testsuite/processors/src/org/antlr/v4/test/runtime/CommentHasStringValueProcessor.java deleted file mode 100644 index 9c902c01fc..0000000000 --- a/runtime-testsuite/processors/src/org/antlr/v4/test/runtime/CommentHasStringValueProcessor.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import com.sun.tools.javac.model.JavacElements; -import com.sun.tools.javac.processing.JavacProcessingEnvironment; -import com.sun.tools.javac.tree.JCTree; -import com.sun.tools.javac.tree.TreeMaker; -import com.sun.tools.javac.util.List; - -import javax.annotation.processing.AbstractProcessor; -import javax.annotation.processing.ProcessingEnvironment; -import javax.annotation.processing.RoundEnvironment; -import javax.annotation.processing.SupportedAnnotationTypes; -import javax.annotation.processing.SupportedSourceVersion; -import javax.lang.model.SourceVersion; -import javax.lang.model.element.Element; -import javax.lang.model.element.TypeElement; -import java.util.Set; - -/** - I think I figured out how to use annotation processors in maven. It's - more or less automatic and you don't even need to tell maven, with one minor - exception. The idea is to create a project for the annotation and another - for the annotation processor. Then, a project that uses the annotation - can simply set up the dependency on the other projects. You have to turn - off processing, -proc:none on the processor project itself but other than - that, java 6+ more or less tries to apply any processors it finds during - compilation. maven just works. - - Also you need a META-INF/services/javax.annotation.processing.Processor file - with "org.antlr.v4.test.runtime.CommentHasStringValueProcessor" in it. - */ -@SupportedAnnotationTypes({"org.antlr.v4.test.runtime.CommentHasStringValue"}) -@SupportedSourceVersion(SourceVersion.RELEASE_7) -public class CommentHasStringValueProcessor extends AbstractProcessor { - protected JavacElements utilities; - protected TreeMaker treeMaker; - - @Override - public synchronized void init(ProcessingEnvironment processingEnv) { - super.init(processingEnv); -// Messager messager = processingEnv.getMessager(); -// messager.printMessage(Diagnostic.Kind.NOTE, "WOW INIT--------------------"); - JavacProcessingEnvironment javacProcessingEnv = (JavacProcessingEnvironment) processingEnv; - utilities = javacProcessingEnv.getElementUtils(); - treeMaker = TreeMaker.instance(javacProcessingEnv.getContext()); - } - - @Override - public boolean process(Set annotations, RoundEnvironment roundEnv) { -// Messager messager = processingEnv.getMessager(); -// messager.printMessage(Diagnostic.Kind.NOTE, "PROCESS--------------------"); - Set annotatedElements = roundEnv.getElementsAnnotatedWith(CommentHasStringValue.class); - for (Element annotatedElement : annotatedElements) { - String docComment = utilities.getDocComment(annotatedElement); - JCTree.JCLiteral literal = treeMaker.Literal(docComment!=null ? docComment : ""); - JCTree elementTree = utilities.getTree(annotatedElement); - if ( elementTree instanceof JCTree.JCVariableDecl ) { - ((JCTree.JCVariableDecl)elementTree).init = literal; - } - else if ( elementTree instanceof JCTree.JCMethodDecl ) { - JCTree.JCStatement[] statements = new JCTree.JCStatement[1]; - statements[0] = treeMaker.Return(literal); - JCTree.JCBlock body = treeMaker.Block(0, List.from(statements)); - ((JCTree.JCMethodDecl)elementTree).body = body; - } - } - return true; - } - - @Override - public SourceVersion getSupportedSourceVersion() { - return SourceVersion.latestSupported(); - } -} diff --git a/runtime-testsuite/resources/junit-platform.properties b/runtime-testsuite/resources/junit-platform.properties new file mode 100644 index 0000000000..ad19ea833b --- /dev/null +++ b/runtime-testsuite/resources/junit-platform.properties @@ -0,0 +1,3 @@ +junit.jupiter.execution.parallel.enabled = true +junit.jupiter.execution.parallel.mode.default = concurrent +junit.jupiter.execution.parallel.mode.classes.default = concurrent \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/LargeLexer.g4 b/runtime-testsuite/resources/org/antlr/v4/test/runtime/LargeLexer.g4 deleted file mode 100644 index 07572dae8e..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/LargeLexer.g4 +++ /dev/null @@ -1,4003 +0,0 @@ -/** Has to be in separate file; LexerExec group loads this as resource */ -lexer grammar L; -WS : [ \t\r\n]+ -> skip; -KW0 : 'KW' '0'; -KW1 : 'KW' '1'; -KW2 : 'KW' '2'; -KW3 : 'KW' '3'; -KW4 : 'KW' '4'; -KW5 : 'KW' '5'; -KW6 : 'KW' '6'; -KW7 : 'KW' '7'; -KW8 : 'KW' '8'; -KW9 : 'KW' '9'; -KW10 : 'KW' '10'; -KW11 : 'KW' '11'; -KW12 : 'KW' '12'; -KW13 : 'KW' '13'; -KW14 : 'KW' '14'; -KW15 : 'KW' '15'; -KW16 : 'KW' '16'; -KW17 : 'KW' '17'; -KW18 : 'KW' '18'; -KW19 : 'KW' '19'; -KW20 : 'KW' '20'; -KW21 : 'KW' '21'; -KW22 : 'KW' '22'; -KW23 : 'KW' '23'; -KW24 : 'KW' '24'; -KW25 : 'KW' '25'; -KW26 : 'KW' '26'; -KW27 : 'KW' '27'; -KW28 : 'KW' '28'; -KW29 : 'KW' '29'; -KW30 : 'KW' '30'; -KW31 : 'KW' '31'; -KW32 : 'KW' '32'; -KW33 : 'KW' '33'; -KW34 : 'KW' '34'; -KW35 : 'KW' '35'; -KW36 : 'KW' '36'; -KW37 : 'KW' '37'; -KW38 : 'KW' '38'; -KW39 : 'KW' '39'; -KW40 : 'KW' '40'; -KW41 : 'KW' '41'; -KW42 : 'KW' '42'; -KW43 : 'KW' '43'; -KW44 : 'KW' '44'; -KW45 : 'KW' '45'; -KW46 : 'KW' '46'; -KW47 : 'KW' '47'; -KW48 : 'KW' '48'; -KW49 : 'KW' '49'; -KW50 : 'KW' '50'; -KW51 : 'KW' '51'; -KW52 : 'KW' '52'; -KW53 : 'KW' '53'; -KW54 : 'KW' '54'; -KW55 : 'KW' '55'; -KW56 : 'KW' '56'; -KW57 : 'KW' '57'; -KW58 : 'KW' '58'; -KW59 : 'KW' '59'; -KW60 : 'KW' '60'; -KW61 : 'KW' '61'; -KW62 : 'KW' '62'; -KW63 : 'KW' '63'; -KW64 : 'KW' '64'; -KW65 : 'KW' '65'; -KW66 : 'KW' '66'; -KW67 : 'KW' '67'; -KW68 : 'KW' '68'; -KW69 : 'KW' '69'; -KW70 : 'KW' '70'; -KW71 : 'KW' '71'; -KW72 : 'KW' '72'; -KW73 : 'KW' '73'; -KW74 : 'KW' '74'; -KW75 : 'KW' '75'; -KW76 : 'KW' '76'; -KW77 : 'KW' '77'; -KW78 : 'KW' '78'; -KW79 : 'KW' '79'; -KW80 : 'KW' '80'; -KW81 : 'KW' '81'; -KW82 : 'KW' '82'; -KW83 : 'KW' '83'; -KW84 : 'KW' '84'; -KW85 : 'KW' '85'; -KW86 : 'KW' '86'; -KW87 : 'KW' '87'; -KW88 : 'KW' '88'; -KW89 : 'KW' '89'; -KW90 : 'KW' '90'; -KW91 : 'KW' '91'; -KW92 : 'KW' '92'; -KW93 : 'KW' '93'; -KW94 : 'KW' '94'; -KW95 : 'KW' '95'; -KW96 : 'KW' '96'; -KW97 : 'KW' '97'; -KW98 : 'KW' '98'; -KW99 : 'KW' '99'; -KW100 : 'KW' '100'; -KW101 : 'KW' '101'; -KW102 : 'KW' '102'; -KW103 : 'KW' '103'; -KW104 : 'KW' '104'; -KW105 : 'KW' '105'; -KW106 : 'KW' '106'; -KW107 : 'KW' '107'; -KW108 : 'KW' '108'; -KW109 : 'KW' '109'; -KW110 : 'KW' '110'; -KW111 : 'KW' '111'; -KW112 : 'KW' '112'; -KW113 : 'KW' '113'; -KW114 : 'KW' '114'; -KW115 : 'KW' '115'; -KW116 : 'KW' '116'; -KW117 : 'KW' '117'; -KW118 : 'KW' '118'; -KW119 : 'KW' '119'; -KW120 : 'KW' '120'; -KW121 : 'KW' '121'; -KW122 : 'KW' '122'; -KW123 : 'KW' '123'; -KW124 : 'KW' '124'; -KW125 : 'KW' '125'; -KW126 : 'KW' '126'; -KW127 : 'KW' '127'; -KW128 : 'KW' '128'; -KW129 : 'KW' '129'; -KW130 : 'KW' '130'; -KW131 : 'KW' '131'; -KW132 : 'KW' '132'; -KW133 : 'KW' '133'; -KW134 : 'KW' '134'; -KW135 : 'KW' '135'; -KW136 : 'KW' '136'; -KW137 : 'KW' '137'; -KW138 : 'KW' '138'; -KW139 : 'KW' '139'; -KW140 : 'KW' '140'; -KW141 : 'KW' '141'; -KW142 : 'KW' '142'; -KW143 : 'KW' '143'; -KW144 : 'KW' '144'; -KW145 : 'KW' '145'; -KW146 : 'KW' '146'; -KW147 : 'KW' '147'; -KW148 : 'KW' '148'; -KW149 : 'KW' '149'; -KW150 : 'KW' '150'; -KW151 : 'KW' '151'; -KW152 : 'KW' '152'; -KW153 : 'KW' '153'; -KW154 : 'KW' '154'; -KW155 : 'KW' '155'; -KW156 : 'KW' '156'; -KW157 : 'KW' '157'; -KW158 : 'KW' '158'; -KW159 : 'KW' '159'; -KW160 : 'KW' '160'; -KW161 : 'KW' '161'; -KW162 : 'KW' '162'; -KW163 : 'KW' '163'; -KW164 : 'KW' '164'; -KW165 : 'KW' '165'; -KW166 : 'KW' '166'; -KW167 : 'KW' '167'; -KW168 : 'KW' '168'; -KW169 : 'KW' '169'; -KW170 : 'KW' '170'; -KW171 : 'KW' '171'; -KW172 : 'KW' '172'; -KW173 : 'KW' '173'; -KW174 : 'KW' '174'; -KW175 : 'KW' '175'; -KW176 : 'KW' '176'; -KW177 : 'KW' '177'; -KW178 : 'KW' '178'; -KW179 : 'KW' '179'; -KW180 : 'KW' '180'; -KW181 : 'KW' '181'; -KW182 : 'KW' '182'; -KW183 : 'KW' '183'; -KW184 : 'KW' '184'; -KW185 : 'KW' '185'; -KW186 : 'KW' '186'; -KW187 : 'KW' '187'; -KW188 : 'KW' '188'; -KW189 : 'KW' '189'; -KW190 : 'KW' '190'; -KW191 : 'KW' '191'; -KW192 : 'KW' '192'; -KW193 : 'KW' '193'; -KW194 : 'KW' '194'; -KW195 : 'KW' '195'; -KW196 : 'KW' '196'; -KW197 : 'KW' '197'; -KW198 : 'KW' '198'; -KW199 : 'KW' '199'; -KW200 : 'KW' '200'; -KW201 : 'KW' '201'; -KW202 : 'KW' '202'; -KW203 : 'KW' '203'; -KW204 : 'KW' '204'; -KW205 : 'KW' '205'; -KW206 : 'KW' '206'; -KW207 : 'KW' '207'; -KW208 : 'KW' '208'; -KW209 : 'KW' '209'; -KW210 : 'KW' '210'; -KW211 : 'KW' '211'; -KW212 : 'KW' '212'; -KW213 : 'KW' '213'; -KW214 : 'KW' '214'; -KW215 : 'KW' '215'; -KW216 : 'KW' '216'; -KW217 : 'KW' '217'; -KW218 : 'KW' '218'; -KW219 : 'KW' '219'; -KW220 : 'KW' '220'; -KW221 : 'KW' '221'; -KW222 : 'KW' '222'; -KW223 : 'KW' '223'; -KW224 : 'KW' '224'; -KW225 : 'KW' '225'; -KW226 : 'KW' '226'; -KW227 : 'KW' '227'; -KW228 : 'KW' '228'; -KW229 : 'KW' '229'; -KW230 : 'KW' '230'; -KW231 : 'KW' '231'; -KW232 : 'KW' '232'; -KW233 : 'KW' '233'; -KW234 : 'KW' '234'; -KW235 : 'KW' '235'; -KW236 : 'KW' '236'; -KW237 : 'KW' '237'; -KW238 : 'KW' '238'; -KW239 : 'KW' '239'; -KW240 : 'KW' '240'; -KW241 : 'KW' '241'; -KW242 : 'KW' '242'; -KW243 : 'KW' '243'; -KW244 : 'KW' '244'; -KW245 : 'KW' '245'; -KW246 : 'KW' '246'; -KW247 : 'KW' '247'; -KW248 : 'KW' '248'; -KW249 : 'KW' '249'; -KW250 : 'KW' '250'; -KW251 : 'KW' '251'; -KW252 : 'KW' '252'; -KW253 : 'KW' '253'; -KW254 : 'KW' '254'; -KW255 : 'KW' '255'; -KW256 : 'KW' '256'; -KW257 : 'KW' '257'; -KW258 : 'KW' '258'; -KW259 : 'KW' '259'; -KW260 : 'KW' '260'; -KW261 : 'KW' '261'; -KW262 : 'KW' '262'; -KW263 : 'KW' '263'; -KW264 : 'KW' '264'; -KW265 : 'KW' '265'; -KW266 : 'KW' '266'; -KW267 : 'KW' '267'; -KW268 : 'KW' '268'; -KW269 : 'KW' '269'; -KW270 : 'KW' '270'; -KW271 : 'KW' '271'; -KW272 : 'KW' '272'; -KW273 : 'KW' '273'; -KW274 : 'KW' '274'; -KW275 : 'KW' '275'; -KW276 : 'KW' '276'; -KW277 : 'KW' '277'; -KW278 : 'KW' '278'; -KW279 : 'KW' '279'; -KW280 : 'KW' '280'; -KW281 : 'KW' '281'; -KW282 : 'KW' '282'; -KW283 : 'KW' '283'; -KW284 : 'KW' '284'; -KW285 : 'KW' '285'; -KW286 : 'KW' '286'; -KW287 : 'KW' '287'; -KW288 : 'KW' '288'; -KW289 : 'KW' '289'; -KW290 : 'KW' '290'; -KW291 : 'KW' '291'; -KW292 : 'KW' '292'; -KW293 : 'KW' '293'; -KW294 : 'KW' '294'; -KW295 : 'KW' '295'; -KW296 : 'KW' '296'; -KW297 : 'KW' '297'; -KW298 : 'KW' '298'; -KW299 : 'KW' '299'; -KW300 : 'KW' '300'; -KW301 : 'KW' '301'; -KW302 : 'KW' '302'; -KW303 : 'KW' '303'; -KW304 : 'KW' '304'; -KW305 : 'KW' '305'; -KW306 : 'KW' '306'; -KW307 : 'KW' '307'; -KW308 : 'KW' '308'; -KW309 : 'KW' '309'; -KW310 : 'KW' '310'; -KW311 : 'KW' '311'; -KW312 : 'KW' '312'; -KW313 : 'KW' '313'; -KW314 : 'KW' '314'; -KW315 : 'KW' '315'; -KW316 : 'KW' '316'; -KW317 : 'KW' '317'; -KW318 : 'KW' '318'; -KW319 : 'KW' '319'; -KW320 : 'KW' '320'; -KW321 : 'KW' '321'; -KW322 : 'KW' '322'; -KW323 : 'KW' '323'; -KW324 : 'KW' '324'; -KW325 : 'KW' '325'; -KW326 : 'KW' '326'; -KW327 : 'KW' '327'; -KW328 : 'KW' '328'; -KW329 : 'KW' '329'; -KW330 : 'KW' '330'; -KW331 : 'KW' '331'; -KW332 : 'KW' '332'; -KW333 : 'KW' '333'; -KW334 : 'KW' '334'; -KW335 : 'KW' '335'; -KW336 : 'KW' '336'; -KW337 : 'KW' '337'; -KW338 : 'KW' '338'; -KW339 : 'KW' '339'; -KW340 : 'KW' '340'; -KW341 : 'KW' '341'; -KW342 : 'KW' '342'; -KW343 : 'KW' '343'; -KW344 : 'KW' '344'; -KW345 : 'KW' '345'; -KW346 : 'KW' '346'; -KW347 : 'KW' '347'; -KW348 : 'KW' '348'; -KW349 : 'KW' '349'; -KW350 : 'KW' '350'; -KW351 : 'KW' '351'; -KW352 : 'KW' '352'; -KW353 : 'KW' '353'; -KW354 : 'KW' '354'; -KW355 : 'KW' '355'; -KW356 : 'KW' '356'; -KW357 : 'KW' '357'; -KW358 : 'KW' '358'; -KW359 : 'KW' '359'; -KW360 : 'KW' '360'; -KW361 : 'KW' '361'; -KW362 : 'KW' '362'; -KW363 : 'KW' '363'; -KW364 : 'KW' '364'; -KW365 : 'KW' '365'; -KW366 : 'KW' '366'; -KW367 : 'KW' '367'; -KW368 : 'KW' '368'; -KW369 : 'KW' '369'; -KW370 : 'KW' '370'; -KW371 : 'KW' '371'; -KW372 : 'KW' '372'; -KW373 : 'KW' '373'; -KW374 : 'KW' '374'; -KW375 : 'KW' '375'; -KW376 : 'KW' '376'; -KW377 : 'KW' '377'; -KW378 : 'KW' '378'; -KW379 : 'KW' '379'; -KW380 : 'KW' '380'; -KW381 : 'KW' '381'; -KW382 : 'KW' '382'; -KW383 : 'KW' '383'; -KW384 : 'KW' '384'; -KW385 : 'KW' '385'; -KW386 : 'KW' '386'; -KW387 : 'KW' '387'; -KW388 : 'KW' '388'; -KW389 : 'KW' '389'; -KW390 : 'KW' '390'; -KW391 : 'KW' '391'; -KW392 : 'KW' '392'; -KW393 : 'KW' '393'; -KW394 : 'KW' '394'; -KW395 : 'KW' '395'; -KW396 : 'KW' '396'; -KW397 : 'KW' '397'; -KW398 : 'KW' '398'; -KW399 : 'KW' '399'; -KW400 : 'KW' '400'; -KW401 : 'KW' '401'; -KW402 : 'KW' '402'; -KW403 : 'KW' '403'; -KW404 : 'KW' '404'; -KW405 : 'KW' '405'; -KW406 : 'KW' '406'; -KW407 : 'KW' '407'; -KW408 : 'KW' '408'; -KW409 : 'KW' '409'; -KW410 : 'KW' '410'; -KW411 : 'KW' '411'; -KW412 : 'KW' '412'; -KW413 : 'KW' '413'; -KW414 : 'KW' '414'; -KW415 : 'KW' '415'; -KW416 : 'KW' '416'; -KW417 : 'KW' '417'; -KW418 : 'KW' '418'; -KW419 : 'KW' '419'; -KW420 : 'KW' '420'; -KW421 : 'KW' '421'; -KW422 : 'KW' '422'; -KW423 : 'KW' '423'; -KW424 : 'KW' '424'; -KW425 : 'KW' '425'; -KW426 : 'KW' '426'; -KW427 : 'KW' '427'; -KW428 : 'KW' '428'; -KW429 : 'KW' '429'; -KW430 : 'KW' '430'; -KW431 : 'KW' '431'; -KW432 : 'KW' '432'; -KW433 : 'KW' '433'; -KW434 : 'KW' '434'; -KW435 : 'KW' '435'; -KW436 : 'KW' '436'; -KW437 : 'KW' '437'; -KW438 : 'KW' '438'; -KW439 : 'KW' '439'; -KW440 : 'KW' '440'; -KW441 : 'KW' '441'; -KW442 : 'KW' '442'; -KW443 : 'KW' '443'; -KW444 : 'KW' '444'; -KW445 : 'KW' '445'; -KW446 : 'KW' '446'; -KW447 : 'KW' '447'; -KW448 : 'KW' '448'; -KW449 : 'KW' '449'; -KW450 : 'KW' '450'; -KW451 : 'KW' '451'; -KW452 : 'KW' '452'; -KW453 : 'KW' '453'; -KW454 : 'KW' '454'; -KW455 : 'KW' '455'; -KW456 : 'KW' '456'; -KW457 : 'KW' '457'; -KW458 : 'KW' '458'; -KW459 : 'KW' '459'; -KW460 : 'KW' '460'; -KW461 : 'KW' '461'; -KW462 : 'KW' '462'; -KW463 : 'KW' '463'; -KW464 : 'KW' '464'; -KW465 : 'KW' '465'; -KW466 : 'KW' '466'; -KW467 : 'KW' '467'; -KW468 : 'KW' '468'; -KW469 : 'KW' '469'; -KW470 : 'KW' '470'; -KW471 : 'KW' '471'; -KW472 : 'KW' '472'; -KW473 : 'KW' '473'; -KW474 : 'KW' '474'; -KW475 : 'KW' '475'; -KW476 : 'KW' '476'; -KW477 : 'KW' '477'; -KW478 : 'KW' '478'; -KW479 : 'KW' '479'; -KW480 : 'KW' '480'; -KW481 : 'KW' '481'; -KW482 : 'KW' '482'; -KW483 : 'KW' '483'; -KW484 : 'KW' '484'; -KW485 : 'KW' '485'; -KW486 : 'KW' '486'; -KW487 : 'KW' '487'; -KW488 : 'KW' '488'; -KW489 : 'KW' '489'; -KW490 : 'KW' '490'; -KW491 : 'KW' '491'; -KW492 : 'KW' '492'; -KW493 : 'KW' '493'; -KW494 : 'KW' '494'; -KW495 : 'KW' '495'; -KW496 : 'KW' '496'; -KW497 : 'KW' '497'; -KW498 : 'KW' '498'; -KW499 : 'KW' '499'; -KW500 : 'KW' '500'; -KW501 : 'KW' '501'; -KW502 : 'KW' '502'; -KW503 : 'KW' '503'; -KW504 : 'KW' '504'; -KW505 : 'KW' '505'; -KW506 : 'KW' '506'; -KW507 : 'KW' '507'; -KW508 : 'KW' '508'; -KW509 : 'KW' '509'; -KW510 : 'KW' '510'; -KW511 : 'KW' '511'; -KW512 : 'KW' '512'; -KW513 : 'KW' '513'; -KW514 : 'KW' '514'; -KW515 : 'KW' '515'; -KW516 : 'KW' '516'; -KW517 : 'KW' '517'; -KW518 : 'KW' '518'; -KW519 : 'KW' '519'; -KW520 : 'KW' '520'; -KW521 : 'KW' '521'; -KW522 : 'KW' '522'; -KW523 : 'KW' '523'; -KW524 : 'KW' '524'; -KW525 : 'KW' '525'; -KW526 : 'KW' '526'; -KW527 : 'KW' '527'; -KW528 : 'KW' '528'; -KW529 : 'KW' '529'; -KW530 : 'KW' '530'; -KW531 : 'KW' '531'; -KW532 : 'KW' '532'; -KW533 : 'KW' '533'; -KW534 : 'KW' '534'; -KW535 : 'KW' '535'; -KW536 : 'KW' '536'; -KW537 : 'KW' '537'; -KW538 : 'KW' '538'; -KW539 : 'KW' '539'; -KW540 : 'KW' '540'; -KW541 : 'KW' '541'; -KW542 : 'KW' '542'; -KW543 : 'KW' '543'; -KW544 : 'KW' '544'; -KW545 : 'KW' '545'; -KW546 : 'KW' '546'; -KW547 : 'KW' '547'; -KW548 : 'KW' '548'; -KW549 : 'KW' '549'; -KW550 : 'KW' '550'; -KW551 : 'KW' '551'; -KW552 : 'KW' '552'; -KW553 : 'KW' '553'; -KW554 : 'KW' '554'; -KW555 : 'KW' '555'; -KW556 : 'KW' '556'; -KW557 : 'KW' '557'; -KW558 : 'KW' '558'; -KW559 : 'KW' '559'; -KW560 : 'KW' '560'; -KW561 : 'KW' '561'; -KW562 : 'KW' '562'; -KW563 : 'KW' '563'; -KW564 : 'KW' '564'; -KW565 : 'KW' '565'; -KW566 : 'KW' '566'; -KW567 : 'KW' '567'; -KW568 : 'KW' '568'; -KW569 : 'KW' '569'; -KW570 : 'KW' '570'; -KW571 : 'KW' '571'; -KW572 : 'KW' '572'; -KW573 : 'KW' '573'; -KW574 : 'KW' '574'; -KW575 : 'KW' '575'; -KW576 : 'KW' '576'; -KW577 : 'KW' '577'; -KW578 : 'KW' '578'; -KW579 : 'KW' '579'; -KW580 : 'KW' '580'; -KW581 : 'KW' '581'; -KW582 : 'KW' '582'; -KW583 : 'KW' '583'; -KW584 : 'KW' '584'; -KW585 : 'KW' '585'; -KW586 : 'KW' '586'; -KW587 : 'KW' '587'; -KW588 : 'KW' '588'; -KW589 : 'KW' '589'; -KW590 : 'KW' '590'; -KW591 : 'KW' '591'; -KW592 : 'KW' '592'; -KW593 : 'KW' '593'; -KW594 : 'KW' '594'; -KW595 : 'KW' '595'; -KW596 : 'KW' '596'; -KW597 : 'KW' '597'; -KW598 : 'KW' '598'; -KW599 : 'KW' '599'; -KW600 : 'KW' '600'; -KW601 : 'KW' '601'; -KW602 : 'KW' '602'; -KW603 : 'KW' '603'; -KW604 : 'KW' '604'; -KW605 : 'KW' '605'; -KW606 : 'KW' '606'; -KW607 : 'KW' '607'; -KW608 : 'KW' '608'; -KW609 : 'KW' '609'; -KW610 : 'KW' '610'; -KW611 : 'KW' '611'; -KW612 : 'KW' '612'; -KW613 : 'KW' '613'; -KW614 : 'KW' '614'; -KW615 : 'KW' '615'; -KW616 : 'KW' '616'; -KW617 : 'KW' '617'; -KW618 : 'KW' '618'; -KW619 : 'KW' '619'; -KW620 : 'KW' '620'; -KW621 : 'KW' '621'; -KW622 : 'KW' '622'; -KW623 : 'KW' '623'; -KW624 : 'KW' '624'; -KW625 : 'KW' '625'; -KW626 : 'KW' '626'; -KW627 : 'KW' '627'; -KW628 : 'KW' '628'; -KW629 : 'KW' '629'; -KW630 : 'KW' '630'; -KW631 : 'KW' '631'; -KW632 : 'KW' '632'; -KW633 : 'KW' '633'; -KW634 : 'KW' '634'; -KW635 : 'KW' '635'; -KW636 : 'KW' '636'; -KW637 : 'KW' '637'; -KW638 : 'KW' '638'; -KW639 : 'KW' '639'; -KW640 : 'KW' '640'; -KW641 : 'KW' '641'; -KW642 : 'KW' '642'; -KW643 : 'KW' '643'; -KW644 : 'KW' '644'; -KW645 : 'KW' '645'; -KW646 : 'KW' '646'; -KW647 : 'KW' '647'; -KW648 : 'KW' '648'; -KW649 : 'KW' '649'; -KW650 : 'KW' '650'; -KW651 : 'KW' '651'; -KW652 : 'KW' '652'; -KW653 : 'KW' '653'; -KW654 : 'KW' '654'; -KW655 : 'KW' '655'; -KW656 : 'KW' '656'; -KW657 : 'KW' '657'; -KW658 : 'KW' '658'; -KW659 : 'KW' '659'; -KW660 : 'KW' '660'; -KW661 : 'KW' '661'; -KW662 : 'KW' '662'; -KW663 : 'KW' '663'; -KW664 : 'KW' '664'; -KW665 : 'KW' '665'; -KW666 : 'KW' '666'; -KW667 : 'KW' '667'; -KW668 : 'KW' '668'; -KW669 : 'KW' '669'; -KW670 : 'KW' '670'; -KW671 : 'KW' '671'; -KW672 : 'KW' '672'; -KW673 : 'KW' '673'; -KW674 : 'KW' '674'; -KW675 : 'KW' '675'; -KW676 : 'KW' '676'; -KW677 : 'KW' '677'; -KW678 : 'KW' '678'; -KW679 : 'KW' '679'; -KW680 : 'KW' '680'; -KW681 : 'KW' '681'; -KW682 : 'KW' '682'; -KW683 : 'KW' '683'; -KW684 : 'KW' '684'; -KW685 : 'KW' '685'; -KW686 : 'KW' '686'; -KW687 : 'KW' '687'; -KW688 : 'KW' '688'; -KW689 : 'KW' '689'; -KW690 : 'KW' '690'; -KW691 : 'KW' '691'; -KW692 : 'KW' '692'; -KW693 : 'KW' '693'; -KW694 : 'KW' '694'; -KW695 : 'KW' '695'; -KW696 : 'KW' '696'; -KW697 : 'KW' '697'; -KW698 : 'KW' '698'; -KW699 : 'KW' '699'; -KW700 : 'KW' '700'; -KW701 : 'KW' '701'; -KW702 : 'KW' '702'; -KW703 : 'KW' '703'; -KW704 : 'KW' '704'; -KW705 : 'KW' '705'; -KW706 : 'KW' '706'; -KW707 : 'KW' '707'; -KW708 : 'KW' '708'; -KW709 : 'KW' '709'; -KW710 : 'KW' '710'; -KW711 : 'KW' '711'; -KW712 : 'KW' '712'; -KW713 : 'KW' '713'; -KW714 : 'KW' '714'; -KW715 : 'KW' '715'; -KW716 : 'KW' '716'; -KW717 : 'KW' '717'; -KW718 : 'KW' '718'; -KW719 : 'KW' '719'; -KW720 : 'KW' '720'; -KW721 : 'KW' '721'; -KW722 : 'KW' '722'; -KW723 : 'KW' '723'; -KW724 : 'KW' '724'; -KW725 : 'KW' '725'; -KW726 : 'KW' '726'; -KW727 : 'KW' '727'; -KW728 : 'KW' '728'; -KW729 : 'KW' '729'; -KW730 : 'KW' '730'; -KW731 : 'KW' '731'; -KW732 : 'KW' '732'; -KW733 : 'KW' '733'; -KW734 : 'KW' '734'; -KW735 : 'KW' '735'; -KW736 : 'KW' '736'; -KW737 : 'KW' '737'; -KW738 : 'KW' '738'; -KW739 : 'KW' '739'; -KW740 : 'KW' '740'; -KW741 : 'KW' '741'; -KW742 : 'KW' '742'; -KW743 : 'KW' '743'; -KW744 : 'KW' '744'; -KW745 : 'KW' '745'; -KW746 : 'KW' '746'; -KW747 : 'KW' '747'; -KW748 : 'KW' '748'; -KW749 : 'KW' '749'; -KW750 : 'KW' '750'; -KW751 : 'KW' '751'; -KW752 : 'KW' '752'; -KW753 : 'KW' '753'; -KW754 : 'KW' '754'; -KW755 : 'KW' '755'; -KW756 : 'KW' '756'; -KW757 : 'KW' '757'; -KW758 : 'KW' '758'; -KW759 : 'KW' '759'; -KW760 : 'KW' '760'; -KW761 : 'KW' '761'; -KW762 : 'KW' '762'; -KW763 : 'KW' '763'; -KW764 : 'KW' '764'; -KW765 : 'KW' '765'; -KW766 : 'KW' '766'; -KW767 : 'KW' '767'; -KW768 : 'KW' '768'; -KW769 : 'KW' '769'; -KW770 : 'KW' '770'; -KW771 : 'KW' '771'; -KW772 : 'KW' '772'; -KW773 : 'KW' '773'; -KW774 : 'KW' '774'; -KW775 : 'KW' '775'; -KW776 : 'KW' '776'; -KW777 : 'KW' '777'; -KW778 : 'KW' '778'; -KW779 : 'KW' '779'; -KW780 : 'KW' '780'; -KW781 : 'KW' '781'; -KW782 : 'KW' '782'; -KW783 : 'KW' '783'; -KW784 : 'KW' '784'; -KW785 : 'KW' '785'; -KW786 : 'KW' '786'; -KW787 : 'KW' '787'; -KW788 : 'KW' '788'; -KW789 : 'KW' '789'; -KW790 : 'KW' '790'; -KW791 : 'KW' '791'; -KW792 : 'KW' '792'; -KW793 : 'KW' '793'; -KW794 : 'KW' '794'; -KW795 : 'KW' '795'; -KW796 : 'KW' '796'; -KW797 : 'KW' '797'; -KW798 : 'KW' '798'; -KW799 : 'KW' '799'; -KW800 : 'KW' '800'; -KW801 : 'KW' '801'; -KW802 : 'KW' '802'; -KW803 : 'KW' '803'; -KW804 : 'KW' '804'; -KW805 : 'KW' '805'; -KW806 : 'KW' '806'; -KW807 : 'KW' '807'; -KW808 : 'KW' '808'; -KW809 : 'KW' '809'; -KW810 : 'KW' '810'; -KW811 : 'KW' '811'; -KW812 : 'KW' '812'; -KW813 : 'KW' '813'; -KW814 : 'KW' '814'; -KW815 : 'KW' '815'; -KW816 : 'KW' '816'; -KW817 : 'KW' '817'; -KW818 : 'KW' '818'; -KW819 : 'KW' '819'; -KW820 : 'KW' '820'; -KW821 : 'KW' '821'; -KW822 : 'KW' '822'; -KW823 : 'KW' '823'; -KW824 : 'KW' '824'; -KW825 : 'KW' '825'; -KW826 : 'KW' '826'; -KW827 : 'KW' '827'; -KW828 : 'KW' '828'; -KW829 : 'KW' '829'; -KW830 : 'KW' '830'; -KW831 : 'KW' '831'; -KW832 : 'KW' '832'; -KW833 : 'KW' '833'; -KW834 : 'KW' '834'; -KW835 : 'KW' '835'; -KW836 : 'KW' '836'; -KW837 : 'KW' '837'; -KW838 : 'KW' '838'; -KW839 : 'KW' '839'; -KW840 : 'KW' '840'; -KW841 : 'KW' '841'; -KW842 : 'KW' '842'; -KW843 : 'KW' '843'; -KW844 : 'KW' '844'; -KW845 : 'KW' '845'; -KW846 : 'KW' '846'; -KW847 : 'KW' '847'; -KW848 : 'KW' '848'; -KW849 : 'KW' '849'; -KW850 : 'KW' '850'; -KW851 : 'KW' '851'; -KW852 : 'KW' '852'; -KW853 : 'KW' '853'; -KW854 : 'KW' '854'; -KW855 : 'KW' '855'; -KW856 : 'KW' '856'; -KW857 : 'KW' '857'; -KW858 : 'KW' '858'; -KW859 : 'KW' '859'; -KW860 : 'KW' '860'; -KW861 : 'KW' '861'; -KW862 : 'KW' '862'; -KW863 : 'KW' '863'; -KW864 : 'KW' '864'; -KW865 : 'KW' '865'; -KW866 : 'KW' '866'; -KW867 : 'KW' '867'; -KW868 : 'KW' '868'; -KW869 : 'KW' '869'; -KW870 : 'KW' '870'; -KW871 : 'KW' '871'; -KW872 : 'KW' '872'; -KW873 : 'KW' '873'; -KW874 : 'KW' '874'; -KW875 : 'KW' '875'; -KW876 : 'KW' '876'; -KW877 : 'KW' '877'; -KW878 : 'KW' '878'; -KW879 : 'KW' '879'; -KW880 : 'KW' '880'; -KW881 : 'KW' '881'; -KW882 : 'KW' '882'; -KW883 : 'KW' '883'; -KW884 : 'KW' '884'; -KW885 : 'KW' '885'; -KW886 : 'KW' '886'; -KW887 : 'KW' '887'; -KW888 : 'KW' '888'; -KW889 : 'KW' '889'; -KW890 : 'KW' '890'; -KW891 : 'KW' '891'; -KW892 : 'KW' '892'; -KW893 : 'KW' '893'; -KW894 : 'KW' '894'; -KW895 : 'KW' '895'; -KW896 : 'KW' '896'; -KW897 : 'KW' '897'; -KW898 : 'KW' '898'; -KW899 : 'KW' '899'; -KW900 : 'KW' '900'; -KW901 : 'KW' '901'; -KW902 : 'KW' '902'; -KW903 : 'KW' '903'; -KW904 : 'KW' '904'; -KW905 : 'KW' '905'; -KW906 : 'KW' '906'; -KW907 : 'KW' '907'; -KW908 : 'KW' '908'; -KW909 : 'KW' '909'; -KW910 : 'KW' '910'; -KW911 : 'KW' '911'; -KW912 : 'KW' '912'; -KW913 : 'KW' '913'; -KW914 : 'KW' '914'; -KW915 : 'KW' '915'; -KW916 : 'KW' '916'; -KW917 : 'KW' '917'; -KW918 : 'KW' '918'; -KW919 : 'KW' '919'; -KW920 : 'KW' '920'; -KW921 : 'KW' '921'; -KW922 : 'KW' '922'; -KW923 : 'KW' '923'; -KW924 : 'KW' '924'; -KW925 : 'KW' '925'; -KW926 : 'KW' '926'; -KW927 : 'KW' '927'; -KW928 : 'KW' '928'; -KW929 : 'KW' '929'; -KW930 : 'KW' '930'; -KW931 : 'KW' '931'; -KW932 : 'KW' '932'; -KW933 : 'KW' '933'; -KW934 : 'KW' '934'; -KW935 : 'KW' '935'; -KW936 : 'KW' '936'; -KW937 : 'KW' '937'; -KW938 : 'KW' '938'; -KW939 : 'KW' '939'; -KW940 : 'KW' '940'; -KW941 : 'KW' '941'; -KW942 : 'KW' '942'; -KW943 : 'KW' '943'; -KW944 : 'KW' '944'; -KW945 : 'KW' '945'; -KW946 : 'KW' '946'; -KW947 : 'KW' '947'; -KW948 : 'KW' '948'; -KW949 : 'KW' '949'; -KW950 : 'KW' '950'; -KW951 : 'KW' '951'; -KW952 : 'KW' '952'; -KW953 : 'KW' '953'; -KW954 : 'KW' '954'; -KW955 : 'KW' '955'; -KW956 : 'KW' '956'; -KW957 : 'KW' '957'; -KW958 : 'KW' '958'; -KW959 : 'KW' '959'; -KW960 : 'KW' '960'; -KW961 : 'KW' '961'; -KW962 : 'KW' '962'; -KW963 : 'KW' '963'; -KW964 : 'KW' '964'; -KW965 : 'KW' '965'; -KW966 : 'KW' '966'; -KW967 : 'KW' '967'; -KW968 : 'KW' '968'; -KW969 : 'KW' '969'; -KW970 : 'KW' '970'; -KW971 : 'KW' '971'; -KW972 : 'KW' '972'; -KW973 : 'KW' '973'; -KW974 : 'KW' '974'; -KW975 : 'KW' '975'; -KW976 : 'KW' '976'; -KW977 : 'KW' '977'; -KW978 : 'KW' '978'; -KW979 : 'KW' '979'; -KW980 : 'KW' '980'; -KW981 : 'KW' '981'; -KW982 : 'KW' '982'; -KW983 : 'KW' '983'; -KW984 : 'KW' '984'; -KW985 : 'KW' '985'; -KW986 : 'KW' '986'; -KW987 : 'KW' '987'; -KW988 : 'KW' '988'; -KW989 : 'KW' '989'; -KW990 : 'KW' '990'; -KW991 : 'KW' '991'; -KW992 : 'KW' '992'; -KW993 : 'KW' '993'; -KW994 : 'KW' '994'; -KW995 : 'KW' '995'; -KW996 : 'KW' '996'; -KW997 : 'KW' '997'; -KW998 : 'KW' '998'; -KW999 : 'KW' '999'; -KW1000 : 'KW' '1000'; -KW1001 : 'KW' '1001'; -KW1002 : 'KW' '1002'; -KW1003 : 'KW' '1003'; -KW1004 : 'KW' '1004'; -KW1005 : 'KW' '1005'; -KW1006 : 'KW' '1006'; -KW1007 : 'KW' '1007'; -KW1008 : 'KW' '1008'; -KW1009 : 'KW' '1009'; -KW1010 : 'KW' '1010'; -KW1011 : 'KW' '1011'; -KW1012 : 'KW' '1012'; -KW1013 : 'KW' '1013'; -KW1014 : 'KW' '1014'; -KW1015 : 'KW' '1015'; -KW1016 : 'KW' '1016'; -KW1017 : 'KW' '1017'; -KW1018 : 'KW' '1018'; -KW1019 : 'KW' '1019'; -KW1020 : 'KW' '1020'; -KW1021 : 'KW' '1021'; -KW1022 : 'KW' '1022'; -KW1023 : 'KW' '1023'; -KW1024 : 'KW' '1024'; -KW1025 : 'KW' '1025'; -KW1026 : 'KW' '1026'; -KW1027 : 'KW' '1027'; -KW1028 : 'KW' '1028'; -KW1029 : 'KW' '1029'; -KW1030 : 'KW' '1030'; -KW1031 : 'KW' '1031'; -KW1032 : 'KW' '1032'; -KW1033 : 'KW' '1033'; -KW1034 : 'KW' '1034'; -KW1035 : 'KW' '1035'; -KW1036 : 'KW' '1036'; -KW1037 : 'KW' '1037'; -KW1038 : 'KW' '1038'; -KW1039 : 'KW' '1039'; -KW1040 : 'KW' '1040'; -KW1041 : 'KW' '1041'; -KW1042 : 'KW' '1042'; -KW1043 : 'KW' '1043'; -KW1044 : 'KW' '1044'; -KW1045 : 'KW' '1045'; -KW1046 : 'KW' '1046'; -KW1047 : 'KW' '1047'; -KW1048 : 'KW' '1048'; -KW1049 : 'KW' '1049'; -KW1050 : 'KW' '1050'; -KW1051 : 'KW' '1051'; -KW1052 : 'KW' '1052'; -KW1053 : 'KW' '1053'; -KW1054 : 'KW' '1054'; -KW1055 : 'KW' '1055'; -KW1056 : 'KW' '1056'; -KW1057 : 'KW' '1057'; -KW1058 : 'KW' '1058'; -KW1059 : 'KW' '1059'; -KW1060 : 'KW' '1060'; -KW1061 : 'KW' '1061'; -KW1062 : 'KW' '1062'; -KW1063 : 'KW' '1063'; -KW1064 : 'KW' '1064'; -KW1065 : 'KW' '1065'; -KW1066 : 'KW' '1066'; -KW1067 : 'KW' '1067'; -KW1068 : 'KW' '1068'; -KW1069 : 'KW' '1069'; -KW1070 : 'KW' '1070'; -KW1071 : 'KW' '1071'; -KW1072 : 'KW' '1072'; -KW1073 : 'KW' '1073'; -KW1074 : 'KW' '1074'; -KW1075 : 'KW' '1075'; -KW1076 : 'KW' '1076'; -KW1077 : 'KW' '1077'; -KW1078 : 'KW' '1078'; -KW1079 : 'KW' '1079'; -KW1080 : 'KW' '1080'; -KW1081 : 'KW' '1081'; -KW1082 : 'KW' '1082'; -KW1083 : 'KW' '1083'; -KW1084 : 'KW' '1084'; -KW1085 : 'KW' '1085'; -KW1086 : 'KW' '1086'; -KW1087 : 'KW' '1087'; -KW1088 : 'KW' '1088'; -KW1089 : 'KW' '1089'; -KW1090 : 'KW' '1090'; -KW1091 : 'KW' '1091'; -KW1092 : 'KW' '1092'; -KW1093 : 'KW' '1093'; -KW1094 : 'KW' '1094'; -KW1095 : 'KW' '1095'; -KW1096 : 'KW' '1096'; -KW1097 : 'KW' '1097'; -KW1098 : 'KW' '1098'; -KW1099 : 'KW' '1099'; -KW1100 : 'KW' '1100'; -KW1101 : 'KW' '1101'; -KW1102 : 'KW' '1102'; -KW1103 : 'KW' '1103'; -KW1104 : 'KW' '1104'; -KW1105 : 'KW' '1105'; -KW1106 : 'KW' '1106'; -KW1107 : 'KW' '1107'; -KW1108 : 'KW' '1108'; -KW1109 : 'KW' '1109'; -KW1110 : 'KW' '1110'; -KW1111 : 'KW' '1111'; -KW1112 : 'KW' '1112'; -KW1113 : 'KW' '1113'; -KW1114 : 'KW' '1114'; -KW1115 : 'KW' '1115'; -KW1116 : 'KW' '1116'; -KW1117 : 'KW' '1117'; -KW1118 : 'KW' '1118'; -KW1119 : 'KW' '1119'; -KW1120 : 'KW' '1120'; -KW1121 : 'KW' '1121'; -KW1122 : 'KW' '1122'; -KW1123 : 'KW' '1123'; -KW1124 : 'KW' '1124'; -KW1125 : 'KW' '1125'; -KW1126 : 'KW' '1126'; -KW1127 : 'KW' '1127'; -KW1128 : 'KW' '1128'; -KW1129 : 'KW' '1129'; -KW1130 : 'KW' '1130'; -KW1131 : 'KW' '1131'; -KW1132 : 'KW' '1132'; -KW1133 : 'KW' '1133'; -KW1134 : 'KW' '1134'; -KW1135 : 'KW' '1135'; -KW1136 : 'KW' '1136'; -KW1137 : 'KW' '1137'; -KW1138 : 'KW' '1138'; -KW1139 : 'KW' '1139'; -KW1140 : 'KW' '1140'; -KW1141 : 'KW' '1141'; -KW1142 : 'KW' '1142'; -KW1143 : 'KW' '1143'; -KW1144 : 'KW' '1144'; -KW1145 : 'KW' '1145'; -KW1146 : 'KW' '1146'; -KW1147 : 'KW' '1147'; -KW1148 : 'KW' '1148'; -KW1149 : 'KW' '1149'; -KW1150 : 'KW' '1150'; -KW1151 : 'KW' '1151'; -KW1152 : 'KW' '1152'; -KW1153 : 'KW' '1153'; -KW1154 : 'KW' '1154'; -KW1155 : 'KW' '1155'; -KW1156 : 'KW' '1156'; -KW1157 : 'KW' '1157'; -KW1158 : 'KW' '1158'; -KW1159 : 'KW' '1159'; -KW1160 : 'KW' '1160'; -KW1161 : 'KW' '1161'; -KW1162 : 'KW' '1162'; -KW1163 : 'KW' '1163'; -KW1164 : 'KW' '1164'; -KW1165 : 'KW' '1165'; -KW1166 : 'KW' '1166'; -KW1167 : 'KW' '1167'; -KW1168 : 'KW' '1168'; -KW1169 : 'KW' '1169'; -KW1170 : 'KW' '1170'; -KW1171 : 'KW' '1171'; -KW1172 : 'KW' '1172'; -KW1173 : 'KW' '1173'; -KW1174 : 'KW' '1174'; -KW1175 : 'KW' '1175'; -KW1176 : 'KW' '1176'; -KW1177 : 'KW' '1177'; -KW1178 : 'KW' '1178'; -KW1179 : 'KW' '1179'; -KW1180 : 'KW' '1180'; -KW1181 : 'KW' '1181'; -KW1182 : 'KW' '1182'; -KW1183 : 'KW' '1183'; -KW1184 : 'KW' '1184'; -KW1185 : 'KW' '1185'; -KW1186 : 'KW' '1186'; -KW1187 : 'KW' '1187'; -KW1188 : 'KW' '1188'; -KW1189 : 'KW' '1189'; -KW1190 : 'KW' '1190'; -KW1191 : 'KW' '1191'; -KW1192 : 'KW' '1192'; -KW1193 : 'KW' '1193'; -KW1194 : 'KW' '1194'; -KW1195 : 'KW' '1195'; -KW1196 : 'KW' '1196'; -KW1197 : 'KW' '1197'; -KW1198 : 'KW' '1198'; -KW1199 : 'KW' '1199'; -KW1200 : 'KW' '1200'; -KW1201 : 'KW' '1201'; -KW1202 : 'KW' '1202'; -KW1203 : 'KW' '1203'; -KW1204 : 'KW' '1204'; -KW1205 : 'KW' '1205'; -KW1206 : 'KW' '1206'; -KW1207 : 'KW' '1207'; -KW1208 : 'KW' '1208'; -KW1209 : 'KW' '1209'; -KW1210 : 'KW' '1210'; -KW1211 : 'KW' '1211'; -KW1212 : 'KW' '1212'; -KW1213 : 'KW' '1213'; -KW1214 : 'KW' '1214'; -KW1215 : 'KW' '1215'; -KW1216 : 'KW' '1216'; -KW1217 : 'KW' '1217'; -KW1218 : 'KW' '1218'; -KW1219 : 'KW' '1219'; -KW1220 : 'KW' '1220'; -KW1221 : 'KW' '1221'; -KW1222 : 'KW' '1222'; -KW1223 : 'KW' '1223'; -KW1224 : 'KW' '1224'; -KW1225 : 'KW' '1225'; -KW1226 : 'KW' '1226'; -KW1227 : 'KW' '1227'; -KW1228 : 'KW' '1228'; -KW1229 : 'KW' '1229'; -KW1230 : 'KW' '1230'; -KW1231 : 'KW' '1231'; -KW1232 : 'KW' '1232'; -KW1233 : 'KW' '1233'; -KW1234 : 'KW' '1234'; -KW1235 : 'KW' '1235'; -KW1236 : 'KW' '1236'; -KW1237 : 'KW' '1237'; -KW1238 : 'KW' '1238'; -KW1239 : 'KW' '1239'; -KW1240 : 'KW' '1240'; -KW1241 : 'KW' '1241'; -KW1242 : 'KW' '1242'; -KW1243 : 'KW' '1243'; -KW1244 : 'KW' '1244'; -KW1245 : 'KW' '1245'; -KW1246 : 'KW' '1246'; -KW1247 : 'KW' '1247'; -KW1248 : 'KW' '1248'; -KW1249 : 'KW' '1249'; -KW1250 : 'KW' '1250'; -KW1251 : 'KW' '1251'; -KW1252 : 'KW' '1252'; -KW1253 : 'KW' '1253'; -KW1254 : 'KW' '1254'; -KW1255 : 'KW' '1255'; -KW1256 : 'KW' '1256'; -KW1257 : 'KW' '1257'; -KW1258 : 'KW' '1258'; -KW1259 : 'KW' '1259'; -KW1260 : 'KW' '1260'; -KW1261 : 'KW' '1261'; -KW1262 : 'KW' '1262'; -KW1263 : 'KW' '1263'; -KW1264 : 'KW' '1264'; -KW1265 : 'KW' '1265'; -KW1266 : 'KW' '1266'; -KW1267 : 'KW' '1267'; -KW1268 : 'KW' '1268'; -KW1269 : 'KW' '1269'; -KW1270 : 'KW' '1270'; -KW1271 : 'KW' '1271'; -KW1272 : 'KW' '1272'; -KW1273 : 'KW' '1273'; -KW1274 : 'KW' '1274'; -KW1275 : 'KW' '1275'; -KW1276 : 'KW' '1276'; -KW1277 : 'KW' '1277'; -KW1278 : 'KW' '1278'; -KW1279 : 'KW' '1279'; -KW1280 : 'KW' '1280'; -KW1281 : 'KW' '1281'; -KW1282 : 'KW' '1282'; -KW1283 : 'KW' '1283'; -KW1284 : 'KW' '1284'; -KW1285 : 'KW' '1285'; -KW1286 : 'KW' '1286'; -KW1287 : 'KW' '1287'; -KW1288 : 'KW' '1288'; -KW1289 : 'KW' '1289'; -KW1290 : 'KW' '1290'; -KW1291 : 'KW' '1291'; -KW1292 : 'KW' '1292'; -KW1293 : 'KW' '1293'; -KW1294 : 'KW' '1294'; -KW1295 : 'KW' '1295'; -KW1296 : 'KW' '1296'; -KW1297 : 'KW' '1297'; -KW1298 : 'KW' '1298'; -KW1299 : 'KW' '1299'; -KW1300 : 'KW' '1300'; -KW1301 : 'KW' '1301'; -KW1302 : 'KW' '1302'; -KW1303 : 'KW' '1303'; -KW1304 : 'KW' '1304'; -KW1305 : 'KW' '1305'; -KW1306 : 'KW' '1306'; -KW1307 : 'KW' '1307'; -KW1308 : 'KW' '1308'; -KW1309 : 'KW' '1309'; -KW1310 : 'KW' '1310'; -KW1311 : 'KW' '1311'; -KW1312 : 'KW' '1312'; -KW1313 : 'KW' '1313'; -KW1314 : 'KW' '1314'; -KW1315 : 'KW' '1315'; -KW1316 : 'KW' '1316'; -KW1317 : 'KW' '1317'; -KW1318 : 'KW' '1318'; -KW1319 : 'KW' '1319'; -KW1320 : 'KW' '1320'; -KW1321 : 'KW' '1321'; -KW1322 : 'KW' '1322'; -KW1323 : 'KW' '1323'; -KW1324 : 'KW' '1324'; -KW1325 : 'KW' '1325'; -KW1326 : 'KW' '1326'; -KW1327 : 'KW' '1327'; -KW1328 : 'KW' '1328'; -KW1329 : 'KW' '1329'; -KW1330 : 'KW' '1330'; -KW1331 : 'KW' '1331'; -KW1332 : 'KW' '1332'; -KW1333 : 'KW' '1333'; -KW1334 : 'KW' '1334'; -KW1335 : 'KW' '1335'; -KW1336 : 'KW' '1336'; -KW1337 : 'KW' '1337'; -KW1338 : 'KW' '1338'; -KW1339 : 'KW' '1339'; -KW1340 : 'KW' '1340'; -KW1341 : 'KW' '1341'; -KW1342 : 'KW' '1342'; -KW1343 : 'KW' '1343'; -KW1344 : 'KW' '1344'; -KW1345 : 'KW' '1345'; -KW1346 : 'KW' '1346'; -KW1347 : 'KW' '1347'; -KW1348 : 'KW' '1348'; -KW1349 : 'KW' '1349'; -KW1350 : 'KW' '1350'; -KW1351 : 'KW' '1351'; -KW1352 : 'KW' '1352'; -KW1353 : 'KW' '1353'; -KW1354 : 'KW' '1354'; -KW1355 : 'KW' '1355'; -KW1356 : 'KW' '1356'; -KW1357 : 'KW' '1357'; -KW1358 : 'KW' '1358'; -KW1359 : 'KW' '1359'; -KW1360 : 'KW' '1360'; -KW1361 : 'KW' '1361'; -KW1362 : 'KW' '1362'; -KW1363 : 'KW' '1363'; -KW1364 : 'KW' '1364'; -KW1365 : 'KW' '1365'; -KW1366 : 'KW' '1366'; -KW1367 : 'KW' '1367'; -KW1368 : 'KW' '1368'; -KW1369 : 'KW' '1369'; -KW1370 : 'KW' '1370'; -KW1371 : 'KW' '1371'; -KW1372 : 'KW' '1372'; -KW1373 : 'KW' '1373'; -KW1374 : 'KW' '1374'; -KW1375 : 'KW' '1375'; -KW1376 : 'KW' '1376'; -KW1377 : 'KW' '1377'; -KW1378 : 'KW' '1378'; -KW1379 : 'KW' '1379'; -KW1380 : 'KW' '1380'; -KW1381 : 'KW' '1381'; -KW1382 : 'KW' '1382'; -KW1383 : 'KW' '1383'; -KW1384 : 'KW' '1384'; -KW1385 : 'KW' '1385'; -KW1386 : 'KW' '1386'; -KW1387 : 'KW' '1387'; -KW1388 : 'KW' '1388'; -KW1389 : 'KW' '1389'; -KW1390 : 'KW' '1390'; -KW1391 : 'KW' '1391'; -KW1392 : 'KW' '1392'; -KW1393 : 'KW' '1393'; -KW1394 : 'KW' '1394'; -KW1395 : 'KW' '1395'; -KW1396 : 'KW' '1396'; -KW1397 : 'KW' '1397'; -KW1398 : 'KW' '1398'; -KW1399 : 'KW' '1399'; -KW1400 : 'KW' '1400'; -KW1401 : 'KW' '1401'; -KW1402 : 'KW' '1402'; -KW1403 : 'KW' '1403'; -KW1404 : 'KW' '1404'; -KW1405 : 'KW' '1405'; -KW1406 : 'KW' '1406'; -KW1407 : 'KW' '1407'; -KW1408 : 'KW' '1408'; -KW1409 : 'KW' '1409'; -KW1410 : 'KW' '1410'; -KW1411 : 'KW' '1411'; -KW1412 : 'KW' '1412'; -KW1413 : 'KW' '1413'; -KW1414 : 'KW' '1414'; -KW1415 : 'KW' '1415'; -KW1416 : 'KW' '1416'; -KW1417 : 'KW' '1417'; -KW1418 : 'KW' '1418'; -KW1419 : 'KW' '1419'; -KW1420 : 'KW' '1420'; -KW1421 : 'KW' '1421'; -KW1422 : 'KW' '1422'; -KW1423 : 'KW' '1423'; -KW1424 : 'KW' '1424'; -KW1425 : 'KW' '1425'; -KW1426 : 'KW' '1426'; -KW1427 : 'KW' '1427'; -KW1428 : 'KW' '1428'; -KW1429 : 'KW' '1429'; -KW1430 : 'KW' '1430'; -KW1431 : 'KW' '1431'; -KW1432 : 'KW' '1432'; -KW1433 : 'KW' '1433'; -KW1434 : 'KW' '1434'; -KW1435 : 'KW' '1435'; -KW1436 : 'KW' '1436'; -KW1437 : 'KW' '1437'; -KW1438 : 'KW' '1438'; -KW1439 : 'KW' '1439'; -KW1440 : 'KW' '1440'; -KW1441 : 'KW' '1441'; -KW1442 : 'KW' '1442'; -KW1443 : 'KW' '1443'; -KW1444 : 'KW' '1444'; -KW1445 : 'KW' '1445'; -KW1446 : 'KW' '1446'; -KW1447 : 'KW' '1447'; -KW1448 : 'KW' '1448'; -KW1449 : 'KW' '1449'; -KW1450 : 'KW' '1450'; -KW1451 : 'KW' '1451'; -KW1452 : 'KW' '1452'; -KW1453 : 'KW' '1453'; -KW1454 : 'KW' '1454'; -KW1455 : 'KW' '1455'; -KW1456 : 'KW' '1456'; -KW1457 : 'KW' '1457'; -KW1458 : 'KW' '1458'; -KW1459 : 'KW' '1459'; -KW1460 : 'KW' '1460'; -KW1461 : 'KW' '1461'; -KW1462 : 'KW' '1462'; -KW1463 : 'KW' '1463'; -KW1464 : 'KW' '1464'; -KW1465 : 'KW' '1465'; -KW1466 : 'KW' '1466'; -KW1467 : 'KW' '1467'; -KW1468 : 'KW' '1468'; -KW1469 : 'KW' '1469'; -KW1470 : 'KW' '1470'; -KW1471 : 'KW' '1471'; -KW1472 : 'KW' '1472'; -KW1473 : 'KW' '1473'; -KW1474 : 'KW' '1474'; -KW1475 : 'KW' '1475'; -KW1476 : 'KW' '1476'; -KW1477 : 'KW' '1477'; -KW1478 : 'KW' '1478'; -KW1479 : 'KW' '1479'; -KW1480 : 'KW' '1480'; -KW1481 : 'KW' '1481'; -KW1482 : 'KW' '1482'; -KW1483 : 'KW' '1483'; -KW1484 : 'KW' '1484'; -KW1485 : 'KW' '1485'; -KW1486 : 'KW' '1486'; -KW1487 : 'KW' '1487'; -KW1488 : 'KW' '1488'; -KW1489 : 'KW' '1489'; -KW1490 : 'KW' '1490'; -KW1491 : 'KW' '1491'; -KW1492 : 'KW' '1492'; -KW1493 : 'KW' '1493'; -KW1494 : 'KW' '1494'; -KW1495 : 'KW' '1495'; -KW1496 : 'KW' '1496'; -KW1497 : 'KW' '1497'; -KW1498 : 'KW' '1498'; -KW1499 : 'KW' '1499'; -KW1500 : 'KW' '1500'; -KW1501 : 'KW' '1501'; -KW1502 : 'KW' '1502'; -KW1503 : 'KW' '1503'; -KW1504 : 'KW' '1504'; -KW1505 : 'KW' '1505'; -KW1506 : 'KW' '1506'; -KW1507 : 'KW' '1507'; -KW1508 : 'KW' '1508'; -KW1509 : 'KW' '1509'; -KW1510 : 'KW' '1510'; -KW1511 : 'KW' '1511'; -KW1512 : 'KW' '1512'; -KW1513 : 'KW' '1513'; -KW1514 : 'KW' '1514'; -KW1515 : 'KW' '1515'; -KW1516 : 'KW' '1516'; -KW1517 : 'KW' '1517'; -KW1518 : 'KW' '1518'; -KW1519 : 'KW' '1519'; -KW1520 : 'KW' '1520'; -KW1521 : 'KW' '1521'; -KW1522 : 'KW' '1522'; -KW1523 : 'KW' '1523'; -KW1524 : 'KW' '1524'; -KW1525 : 'KW' '1525'; -KW1526 : 'KW' '1526'; -KW1527 : 'KW' '1527'; -KW1528 : 'KW' '1528'; -KW1529 : 'KW' '1529'; -KW1530 : 'KW' '1530'; -KW1531 : 'KW' '1531'; -KW1532 : 'KW' '1532'; -KW1533 : 'KW' '1533'; -KW1534 : 'KW' '1534'; -KW1535 : 'KW' '1535'; -KW1536 : 'KW' '1536'; -KW1537 : 'KW' '1537'; -KW1538 : 'KW' '1538'; -KW1539 : 'KW' '1539'; -KW1540 : 'KW' '1540'; -KW1541 : 'KW' '1541'; -KW1542 : 'KW' '1542'; -KW1543 : 'KW' '1543'; -KW1544 : 'KW' '1544'; -KW1545 : 'KW' '1545'; -KW1546 : 'KW' '1546'; -KW1547 : 'KW' '1547'; -KW1548 : 'KW' '1548'; -KW1549 : 'KW' '1549'; -KW1550 : 'KW' '1550'; -KW1551 : 'KW' '1551'; -KW1552 : 'KW' '1552'; -KW1553 : 'KW' '1553'; -KW1554 : 'KW' '1554'; -KW1555 : 'KW' '1555'; -KW1556 : 'KW' '1556'; -KW1557 : 'KW' '1557'; -KW1558 : 'KW' '1558'; -KW1559 : 'KW' '1559'; -KW1560 : 'KW' '1560'; -KW1561 : 'KW' '1561'; -KW1562 : 'KW' '1562'; -KW1563 : 'KW' '1563'; -KW1564 : 'KW' '1564'; -KW1565 : 'KW' '1565'; -KW1566 : 'KW' '1566'; -KW1567 : 'KW' '1567'; -KW1568 : 'KW' '1568'; -KW1569 : 'KW' '1569'; -KW1570 : 'KW' '1570'; -KW1571 : 'KW' '1571'; -KW1572 : 'KW' '1572'; -KW1573 : 'KW' '1573'; -KW1574 : 'KW' '1574'; -KW1575 : 'KW' '1575'; -KW1576 : 'KW' '1576'; -KW1577 : 'KW' '1577'; -KW1578 : 'KW' '1578'; -KW1579 : 'KW' '1579'; -KW1580 : 'KW' '1580'; -KW1581 : 'KW' '1581'; -KW1582 : 'KW' '1582'; -KW1583 : 'KW' '1583'; -KW1584 : 'KW' '1584'; -KW1585 : 'KW' '1585'; -KW1586 : 'KW' '1586'; -KW1587 : 'KW' '1587'; -KW1588 : 'KW' '1588'; -KW1589 : 'KW' '1589'; -KW1590 : 'KW' '1590'; -KW1591 : 'KW' '1591'; -KW1592 : 'KW' '1592'; -KW1593 : 'KW' '1593'; -KW1594 : 'KW' '1594'; -KW1595 : 'KW' '1595'; -KW1596 : 'KW' '1596'; -KW1597 : 'KW' '1597'; -KW1598 : 'KW' '1598'; -KW1599 : 'KW' '1599'; -KW1600 : 'KW' '1600'; -KW1601 : 'KW' '1601'; -KW1602 : 'KW' '1602'; -KW1603 : 'KW' '1603'; -KW1604 : 'KW' '1604'; -KW1605 : 'KW' '1605'; -KW1606 : 'KW' '1606'; -KW1607 : 'KW' '1607'; -KW1608 : 'KW' '1608'; -KW1609 : 'KW' '1609'; -KW1610 : 'KW' '1610'; -KW1611 : 'KW' '1611'; -KW1612 : 'KW' '1612'; -KW1613 : 'KW' '1613'; -KW1614 : 'KW' '1614'; -KW1615 : 'KW' '1615'; -KW1616 : 'KW' '1616'; -KW1617 : 'KW' '1617'; -KW1618 : 'KW' '1618'; -KW1619 : 'KW' '1619'; -KW1620 : 'KW' '1620'; -KW1621 : 'KW' '1621'; -KW1622 : 'KW' '1622'; -KW1623 : 'KW' '1623'; -KW1624 : 'KW' '1624'; -KW1625 : 'KW' '1625'; -KW1626 : 'KW' '1626'; -KW1627 : 'KW' '1627'; -KW1628 : 'KW' '1628'; -KW1629 : 'KW' '1629'; -KW1630 : 'KW' '1630'; -KW1631 : 'KW' '1631'; -KW1632 : 'KW' '1632'; -KW1633 : 'KW' '1633'; -KW1634 : 'KW' '1634'; -KW1635 : 'KW' '1635'; -KW1636 : 'KW' '1636'; -KW1637 : 'KW' '1637'; -KW1638 : 'KW' '1638'; -KW1639 : 'KW' '1639'; -KW1640 : 'KW' '1640'; -KW1641 : 'KW' '1641'; -KW1642 : 'KW' '1642'; -KW1643 : 'KW' '1643'; -KW1644 : 'KW' '1644'; -KW1645 : 'KW' '1645'; -KW1646 : 'KW' '1646'; -KW1647 : 'KW' '1647'; -KW1648 : 'KW' '1648'; -KW1649 : 'KW' '1649'; -KW1650 : 'KW' '1650'; -KW1651 : 'KW' '1651'; -KW1652 : 'KW' '1652'; -KW1653 : 'KW' '1653'; -KW1654 : 'KW' '1654'; -KW1655 : 'KW' '1655'; -KW1656 : 'KW' '1656'; -KW1657 : 'KW' '1657'; -KW1658 : 'KW' '1658'; -KW1659 : 'KW' '1659'; -KW1660 : 'KW' '1660'; -KW1661 : 'KW' '1661'; -KW1662 : 'KW' '1662'; -KW1663 : 'KW' '1663'; -KW1664 : 'KW' '1664'; -KW1665 : 'KW' '1665'; -KW1666 : 'KW' '1666'; -KW1667 : 'KW' '1667'; -KW1668 : 'KW' '1668'; -KW1669 : 'KW' '1669'; -KW1670 : 'KW' '1670'; -KW1671 : 'KW' '1671'; -KW1672 : 'KW' '1672'; -KW1673 : 'KW' '1673'; -KW1674 : 'KW' '1674'; -KW1675 : 'KW' '1675'; -KW1676 : 'KW' '1676'; -KW1677 : 'KW' '1677'; -KW1678 : 'KW' '1678'; -KW1679 : 'KW' '1679'; -KW1680 : 'KW' '1680'; -KW1681 : 'KW' '1681'; -KW1682 : 'KW' '1682'; -KW1683 : 'KW' '1683'; -KW1684 : 'KW' '1684'; -KW1685 : 'KW' '1685'; -KW1686 : 'KW' '1686'; -KW1687 : 'KW' '1687'; -KW1688 : 'KW' '1688'; -KW1689 : 'KW' '1689'; -KW1690 : 'KW' '1690'; -KW1691 : 'KW' '1691'; -KW1692 : 'KW' '1692'; -KW1693 : 'KW' '1693'; -KW1694 : 'KW' '1694'; -KW1695 : 'KW' '1695'; -KW1696 : 'KW' '1696'; -KW1697 : 'KW' '1697'; -KW1698 : 'KW' '1698'; -KW1699 : 'KW' '1699'; -KW1700 : 'KW' '1700'; -KW1701 : 'KW' '1701'; -KW1702 : 'KW' '1702'; -KW1703 : 'KW' '1703'; -KW1704 : 'KW' '1704'; -KW1705 : 'KW' '1705'; -KW1706 : 'KW' '1706'; -KW1707 : 'KW' '1707'; -KW1708 : 'KW' '1708'; -KW1709 : 'KW' '1709'; -KW1710 : 'KW' '1710'; -KW1711 : 'KW' '1711'; -KW1712 : 'KW' '1712'; -KW1713 : 'KW' '1713'; -KW1714 : 'KW' '1714'; -KW1715 : 'KW' '1715'; -KW1716 : 'KW' '1716'; -KW1717 : 'KW' '1717'; -KW1718 : 'KW' '1718'; -KW1719 : 'KW' '1719'; -KW1720 : 'KW' '1720'; -KW1721 : 'KW' '1721'; -KW1722 : 'KW' '1722'; -KW1723 : 'KW' '1723'; -KW1724 : 'KW' '1724'; -KW1725 : 'KW' '1725'; -KW1726 : 'KW' '1726'; -KW1727 : 'KW' '1727'; -KW1728 : 'KW' '1728'; -KW1729 : 'KW' '1729'; -KW1730 : 'KW' '1730'; -KW1731 : 'KW' '1731'; -KW1732 : 'KW' '1732'; -KW1733 : 'KW' '1733'; -KW1734 : 'KW' '1734'; -KW1735 : 'KW' '1735'; -KW1736 : 'KW' '1736'; -KW1737 : 'KW' '1737'; -KW1738 : 'KW' '1738'; -KW1739 : 'KW' '1739'; -KW1740 : 'KW' '1740'; -KW1741 : 'KW' '1741'; -KW1742 : 'KW' '1742'; -KW1743 : 'KW' '1743'; -KW1744 : 'KW' '1744'; -KW1745 : 'KW' '1745'; -KW1746 : 'KW' '1746'; -KW1747 : 'KW' '1747'; -KW1748 : 'KW' '1748'; -KW1749 : 'KW' '1749'; -KW1750 : 'KW' '1750'; -KW1751 : 'KW' '1751'; -KW1752 : 'KW' '1752'; -KW1753 : 'KW' '1753'; -KW1754 : 'KW' '1754'; -KW1755 : 'KW' '1755'; -KW1756 : 'KW' '1756'; -KW1757 : 'KW' '1757'; -KW1758 : 'KW' '1758'; -KW1759 : 'KW' '1759'; -KW1760 : 'KW' '1760'; -KW1761 : 'KW' '1761'; -KW1762 : 'KW' '1762'; -KW1763 : 'KW' '1763'; -KW1764 : 'KW' '1764'; -KW1765 : 'KW' '1765'; -KW1766 : 'KW' '1766'; -KW1767 : 'KW' '1767'; -KW1768 : 'KW' '1768'; -KW1769 : 'KW' '1769'; -KW1770 : 'KW' '1770'; -KW1771 : 'KW' '1771'; -KW1772 : 'KW' '1772'; -KW1773 : 'KW' '1773'; -KW1774 : 'KW' '1774'; -KW1775 : 'KW' '1775'; -KW1776 : 'KW' '1776'; -KW1777 : 'KW' '1777'; -KW1778 : 'KW' '1778'; -KW1779 : 'KW' '1779'; -KW1780 : 'KW' '1780'; -KW1781 : 'KW' '1781'; -KW1782 : 'KW' '1782'; -KW1783 : 'KW' '1783'; -KW1784 : 'KW' '1784'; -KW1785 : 'KW' '1785'; -KW1786 : 'KW' '1786'; -KW1787 : 'KW' '1787'; -KW1788 : 'KW' '1788'; -KW1789 : 'KW' '1789'; -KW1790 : 'KW' '1790'; -KW1791 : 'KW' '1791'; -KW1792 : 'KW' '1792'; -KW1793 : 'KW' '1793'; -KW1794 : 'KW' '1794'; -KW1795 : 'KW' '1795'; -KW1796 : 'KW' '1796'; -KW1797 : 'KW' '1797'; -KW1798 : 'KW' '1798'; -KW1799 : 'KW' '1799'; -KW1800 : 'KW' '1800'; -KW1801 : 'KW' '1801'; -KW1802 : 'KW' '1802'; -KW1803 : 'KW' '1803'; -KW1804 : 'KW' '1804'; -KW1805 : 'KW' '1805'; -KW1806 : 'KW' '1806'; -KW1807 : 'KW' '1807'; -KW1808 : 'KW' '1808'; -KW1809 : 'KW' '1809'; -KW1810 : 'KW' '1810'; -KW1811 : 'KW' '1811'; -KW1812 : 'KW' '1812'; -KW1813 : 'KW' '1813'; -KW1814 : 'KW' '1814'; -KW1815 : 'KW' '1815'; -KW1816 : 'KW' '1816'; -KW1817 : 'KW' '1817'; -KW1818 : 'KW' '1818'; -KW1819 : 'KW' '1819'; -KW1820 : 'KW' '1820'; -KW1821 : 'KW' '1821'; -KW1822 : 'KW' '1822'; -KW1823 : 'KW' '1823'; -KW1824 : 'KW' '1824'; -KW1825 : 'KW' '1825'; -KW1826 : 'KW' '1826'; -KW1827 : 'KW' '1827'; -KW1828 : 'KW' '1828'; -KW1829 : 'KW' '1829'; -KW1830 : 'KW' '1830'; -KW1831 : 'KW' '1831'; -KW1832 : 'KW' '1832'; -KW1833 : 'KW' '1833'; -KW1834 : 'KW' '1834'; -KW1835 : 'KW' '1835'; -KW1836 : 'KW' '1836'; -KW1837 : 'KW' '1837'; -KW1838 : 'KW' '1838'; -KW1839 : 'KW' '1839'; -KW1840 : 'KW' '1840'; -KW1841 : 'KW' '1841'; -KW1842 : 'KW' '1842'; -KW1843 : 'KW' '1843'; -KW1844 : 'KW' '1844'; -KW1845 : 'KW' '1845'; -KW1846 : 'KW' '1846'; -KW1847 : 'KW' '1847'; -KW1848 : 'KW' '1848'; -KW1849 : 'KW' '1849'; -KW1850 : 'KW' '1850'; -KW1851 : 'KW' '1851'; -KW1852 : 'KW' '1852'; -KW1853 : 'KW' '1853'; -KW1854 : 'KW' '1854'; -KW1855 : 'KW' '1855'; -KW1856 : 'KW' '1856'; -KW1857 : 'KW' '1857'; -KW1858 : 'KW' '1858'; -KW1859 : 'KW' '1859'; -KW1860 : 'KW' '1860'; -KW1861 : 'KW' '1861'; -KW1862 : 'KW' '1862'; -KW1863 : 'KW' '1863'; -KW1864 : 'KW' '1864'; -KW1865 : 'KW' '1865'; -KW1866 : 'KW' '1866'; -KW1867 : 'KW' '1867'; -KW1868 : 'KW' '1868'; -KW1869 : 'KW' '1869'; -KW1870 : 'KW' '1870'; -KW1871 : 'KW' '1871'; -KW1872 : 'KW' '1872'; -KW1873 : 'KW' '1873'; -KW1874 : 'KW' '1874'; -KW1875 : 'KW' '1875'; -KW1876 : 'KW' '1876'; -KW1877 : 'KW' '1877'; -KW1878 : 'KW' '1878'; -KW1879 : 'KW' '1879'; -KW1880 : 'KW' '1880'; -KW1881 : 'KW' '1881'; -KW1882 : 'KW' '1882'; -KW1883 : 'KW' '1883'; -KW1884 : 'KW' '1884'; -KW1885 : 'KW' '1885'; -KW1886 : 'KW' '1886'; -KW1887 : 'KW' '1887'; -KW1888 : 'KW' '1888'; -KW1889 : 'KW' '1889'; -KW1890 : 'KW' '1890'; -KW1891 : 'KW' '1891'; -KW1892 : 'KW' '1892'; -KW1893 : 'KW' '1893'; -KW1894 : 'KW' '1894'; -KW1895 : 'KW' '1895'; -KW1896 : 'KW' '1896'; -KW1897 : 'KW' '1897'; -KW1898 : 'KW' '1898'; -KW1899 : 'KW' '1899'; -KW1900 : 'KW' '1900'; -KW1901 : 'KW' '1901'; -KW1902 : 'KW' '1902'; -KW1903 : 'KW' '1903'; -KW1904 : 'KW' '1904'; -KW1905 : 'KW' '1905'; -KW1906 : 'KW' '1906'; -KW1907 : 'KW' '1907'; -KW1908 : 'KW' '1908'; -KW1909 : 'KW' '1909'; -KW1910 : 'KW' '1910'; -KW1911 : 'KW' '1911'; -KW1912 : 'KW' '1912'; -KW1913 : 'KW' '1913'; -KW1914 : 'KW' '1914'; -KW1915 : 'KW' '1915'; -KW1916 : 'KW' '1916'; -KW1917 : 'KW' '1917'; -KW1918 : 'KW' '1918'; -KW1919 : 'KW' '1919'; -KW1920 : 'KW' '1920'; -KW1921 : 'KW' '1921'; -KW1922 : 'KW' '1922'; -KW1923 : 'KW' '1923'; -KW1924 : 'KW' '1924'; -KW1925 : 'KW' '1925'; -KW1926 : 'KW' '1926'; -KW1927 : 'KW' '1927'; -KW1928 : 'KW' '1928'; -KW1929 : 'KW' '1929'; -KW1930 : 'KW' '1930'; -KW1931 : 'KW' '1931'; -KW1932 : 'KW' '1932'; -KW1933 : 'KW' '1933'; -KW1934 : 'KW' '1934'; -KW1935 : 'KW' '1935'; -KW1936 : 'KW' '1936'; -KW1937 : 'KW' '1937'; -KW1938 : 'KW' '1938'; -KW1939 : 'KW' '1939'; -KW1940 : 'KW' '1940'; -KW1941 : 'KW' '1941'; -KW1942 : 'KW' '1942'; -KW1943 : 'KW' '1943'; -KW1944 : 'KW' '1944'; -KW1945 : 'KW' '1945'; -KW1946 : 'KW' '1946'; -KW1947 : 'KW' '1947'; -KW1948 : 'KW' '1948'; -KW1949 : 'KW' '1949'; -KW1950 : 'KW' '1950'; -KW1951 : 'KW' '1951'; -KW1952 : 'KW' '1952'; -KW1953 : 'KW' '1953'; -KW1954 : 'KW' '1954'; -KW1955 : 'KW' '1955'; -KW1956 : 'KW' '1956'; -KW1957 : 'KW' '1957'; -KW1958 : 'KW' '1958'; -KW1959 : 'KW' '1959'; -KW1960 : 'KW' '1960'; -KW1961 : 'KW' '1961'; -KW1962 : 'KW' '1962'; -KW1963 : 'KW' '1963'; -KW1964 : 'KW' '1964'; -KW1965 : 'KW' '1965'; -KW1966 : 'KW' '1966'; -KW1967 : 'KW' '1967'; -KW1968 : 'KW' '1968'; -KW1969 : 'KW' '1969'; -KW1970 : 'KW' '1970'; -KW1971 : 'KW' '1971'; -KW1972 : 'KW' '1972'; -KW1973 : 'KW' '1973'; -KW1974 : 'KW' '1974'; -KW1975 : 'KW' '1975'; -KW1976 : 'KW' '1976'; -KW1977 : 'KW' '1977'; -KW1978 : 'KW' '1978'; -KW1979 : 'KW' '1979'; -KW1980 : 'KW' '1980'; -KW1981 : 'KW' '1981'; -KW1982 : 'KW' '1982'; -KW1983 : 'KW' '1983'; -KW1984 : 'KW' '1984'; -KW1985 : 'KW' '1985'; -KW1986 : 'KW' '1986'; -KW1987 : 'KW' '1987'; -KW1988 : 'KW' '1988'; -KW1989 : 'KW' '1989'; -KW1990 : 'KW' '1990'; -KW1991 : 'KW' '1991'; -KW1992 : 'KW' '1992'; -KW1993 : 'KW' '1993'; -KW1994 : 'KW' '1994'; -KW1995 : 'KW' '1995'; -KW1996 : 'KW' '1996'; -KW1997 : 'KW' '1997'; -KW1998 : 'KW' '1998'; -KW1999 : 'KW' '1999'; -KW2000 : 'KW' '2000'; -KW2001 : 'KW' '2001'; -KW2002 : 'KW' '2002'; -KW2003 : 'KW' '2003'; -KW2004 : 'KW' '2004'; -KW2005 : 'KW' '2005'; -KW2006 : 'KW' '2006'; -KW2007 : 'KW' '2007'; -KW2008 : 'KW' '2008'; -KW2009 : 'KW' '2009'; -KW2010 : 'KW' '2010'; -KW2011 : 'KW' '2011'; -KW2012 : 'KW' '2012'; -KW2013 : 'KW' '2013'; -KW2014 : 'KW' '2014'; -KW2015 : 'KW' '2015'; -KW2016 : 'KW' '2016'; -KW2017 : 'KW' '2017'; -KW2018 : 'KW' '2018'; -KW2019 : 'KW' '2019'; -KW2020 : 'KW' '2020'; -KW2021 : 'KW' '2021'; -KW2022 : 'KW' '2022'; -KW2023 : 'KW' '2023'; -KW2024 : 'KW' '2024'; -KW2025 : 'KW' '2025'; -KW2026 : 'KW' '2026'; -KW2027 : 'KW' '2027'; -KW2028 : 'KW' '2028'; -KW2029 : 'KW' '2029'; -KW2030 : 'KW' '2030'; -KW2031 : 'KW' '2031'; -KW2032 : 'KW' '2032'; -KW2033 : 'KW' '2033'; -KW2034 : 'KW' '2034'; -KW2035 : 'KW' '2035'; -KW2036 : 'KW' '2036'; -KW2037 : 'KW' '2037'; -KW2038 : 'KW' '2038'; -KW2039 : 'KW' '2039'; -KW2040 : 'KW' '2040'; -KW2041 : 'KW' '2041'; -KW2042 : 'KW' '2042'; -KW2043 : 'KW' '2043'; -KW2044 : 'KW' '2044'; -KW2045 : 'KW' '2045'; -KW2046 : 'KW' '2046'; -KW2047 : 'KW' '2047'; -KW2048 : 'KW' '2048'; -KW2049 : 'KW' '2049'; -KW2050 : 'KW' '2050'; -KW2051 : 'KW' '2051'; -KW2052 : 'KW' '2052'; -KW2053 : 'KW' '2053'; -KW2054 : 'KW' '2054'; -KW2055 : 'KW' '2055'; -KW2056 : 'KW' '2056'; -KW2057 : 'KW' '2057'; -KW2058 : 'KW' '2058'; -KW2059 : 'KW' '2059'; -KW2060 : 'KW' '2060'; -KW2061 : 'KW' '2061'; -KW2062 : 'KW' '2062'; -KW2063 : 'KW' '2063'; -KW2064 : 'KW' '2064'; -KW2065 : 'KW' '2065'; -KW2066 : 'KW' '2066'; -KW2067 : 'KW' '2067'; -KW2068 : 'KW' '2068'; -KW2069 : 'KW' '2069'; -KW2070 : 'KW' '2070'; -KW2071 : 'KW' '2071'; -KW2072 : 'KW' '2072'; -KW2073 : 'KW' '2073'; -KW2074 : 'KW' '2074'; -KW2075 : 'KW' '2075'; -KW2076 : 'KW' '2076'; -KW2077 : 'KW' '2077'; -KW2078 : 'KW' '2078'; -KW2079 : 'KW' '2079'; -KW2080 : 'KW' '2080'; -KW2081 : 'KW' '2081'; -KW2082 : 'KW' '2082'; -KW2083 : 'KW' '2083'; -KW2084 : 'KW' '2084'; -KW2085 : 'KW' '2085'; -KW2086 : 'KW' '2086'; -KW2087 : 'KW' '2087'; -KW2088 : 'KW' '2088'; -KW2089 : 'KW' '2089'; -KW2090 : 'KW' '2090'; -KW2091 : 'KW' '2091'; -KW2092 : 'KW' '2092'; -KW2093 : 'KW' '2093'; -KW2094 : 'KW' '2094'; -KW2095 : 'KW' '2095'; -KW2096 : 'KW' '2096'; -KW2097 : 'KW' '2097'; -KW2098 : 'KW' '2098'; -KW2099 : 'KW' '2099'; -KW2100 : 'KW' '2100'; -KW2101 : 'KW' '2101'; -KW2102 : 'KW' '2102'; -KW2103 : 'KW' '2103'; -KW2104 : 'KW' '2104'; -KW2105 : 'KW' '2105'; -KW2106 : 'KW' '2106'; -KW2107 : 'KW' '2107'; -KW2108 : 'KW' '2108'; -KW2109 : 'KW' '2109'; -KW2110 : 'KW' '2110'; -KW2111 : 'KW' '2111'; -KW2112 : 'KW' '2112'; -KW2113 : 'KW' '2113'; -KW2114 : 'KW' '2114'; -KW2115 : 'KW' '2115'; -KW2116 : 'KW' '2116'; -KW2117 : 'KW' '2117'; -KW2118 : 'KW' '2118'; -KW2119 : 'KW' '2119'; -KW2120 : 'KW' '2120'; -KW2121 : 'KW' '2121'; -KW2122 : 'KW' '2122'; -KW2123 : 'KW' '2123'; -KW2124 : 'KW' '2124'; -KW2125 : 'KW' '2125'; -KW2126 : 'KW' '2126'; -KW2127 : 'KW' '2127'; -KW2128 : 'KW' '2128'; -KW2129 : 'KW' '2129'; -KW2130 : 'KW' '2130'; -KW2131 : 'KW' '2131'; -KW2132 : 'KW' '2132'; -KW2133 : 'KW' '2133'; -KW2134 : 'KW' '2134'; -KW2135 : 'KW' '2135'; -KW2136 : 'KW' '2136'; -KW2137 : 'KW' '2137'; -KW2138 : 'KW' '2138'; -KW2139 : 'KW' '2139'; -KW2140 : 'KW' '2140'; -KW2141 : 'KW' '2141'; -KW2142 : 'KW' '2142'; -KW2143 : 'KW' '2143'; -KW2144 : 'KW' '2144'; -KW2145 : 'KW' '2145'; -KW2146 : 'KW' '2146'; -KW2147 : 'KW' '2147'; -KW2148 : 'KW' '2148'; -KW2149 : 'KW' '2149'; -KW2150 : 'KW' '2150'; -KW2151 : 'KW' '2151'; -KW2152 : 'KW' '2152'; -KW2153 : 'KW' '2153'; -KW2154 : 'KW' '2154'; -KW2155 : 'KW' '2155'; -KW2156 : 'KW' '2156'; -KW2157 : 'KW' '2157'; -KW2158 : 'KW' '2158'; -KW2159 : 'KW' '2159'; -KW2160 : 'KW' '2160'; -KW2161 : 'KW' '2161'; -KW2162 : 'KW' '2162'; -KW2163 : 'KW' '2163'; -KW2164 : 'KW' '2164'; -KW2165 : 'KW' '2165'; -KW2166 : 'KW' '2166'; -KW2167 : 'KW' '2167'; -KW2168 : 'KW' '2168'; -KW2169 : 'KW' '2169'; -KW2170 : 'KW' '2170'; -KW2171 : 'KW' '2171'; -KW2172 : 'KW' '2172'; -KW2173 : 'KW' '2173'; -KW2174 : 'KW' '2174'; -KW2175 : 'KW' '2175'; -KW2176 : 'KW' '2176'; -KW2177 : 'KW' '2177'; -KW2178 : 'KW' '2178'; -KW2179 : 'KW' '2179'; -KW2180 : 'KW' '2180'; -KW2181 : 'KW' '2181'; -KW2182 : 'KW' '2182'; -KW2183 : 'KW' '2183'; -KW2184 : 'KW' '2184'; -KW2185 : 'KW' '2185'; -KW2186 : 'KW' '2186'; -KW2187 : 'KW' '2187'; -KW2188 : 'KW' '2188'; -KW2189 : 'KW' '2189'; -KW2190 : 'KW' '2190'; -KW2191 : 'KW' '2191'; -KW2192 : 'KW' '2192'; -KW2193 : 'KW' '2193'; -KW2194 : 'KW' '2194'; -KW2195 : 'KW' '2195'; -KW2196 : 'KW' '2196'; -KW2197 : 'KW' '2197'; -KW2198 : 'KW' '2198'; -KW2199 : 'KW' '2199'; -KW2200 : 'KW' '2200'; -KW2201 : 'KW' '2201'; -KW2202 : 'KW' '2202'; -KW2203 : 'KW' '2203'; -KW2204 : 'KW' '2204'; -KW2205 : 'KW' '2205'; -KW2206 : 'KW' '2206'; -KW2207 : 'KW' '2207'; -KW2208 : 'KW' '2208'; -KW2209 : 'KW' '2209'; -KW2210 : 'KW' '2210'; -KW2211 : 'KW' '2211'; -KW2212 : 'KW' '2212'; -KW2213 : 'KW' '2213'; -KW2214 : 'KW' '2214'; -KW2215 : 'KW' '2215'; -KW2216 : 'KW' '2216'; -KW2217 : 'KW' '2217'; -KW2218 : 'KW' '2218'; -KW2219 : 'KW' '2219'; -KW2220 : 'KW' '2220'; -KW2221 : 'KW' '2221'; -KW2222 : 'KW' '2222'; -KW2223 : 'KW' '2223'; -KW2224 : 'KW' '2224'; -KW2225 : 'KW' '2225'; -KW2226 : 'KW' '2226'; -KW2227 : 'KW' '2227'; -KW2228 : 'KW' '2228'; -KW2229 : 'KW' '2229'; -KW2230 : 'KW' '2230'; -KW2231 : 'KW' '2231'; -KW2232 : 'KW' '2232'; -KW2233 : 'KW' '2233'; -KW2234 : 'KW' '2234'; -KW2235 : 'KW' '2235'; -KW2236 : 'KW' '2236'; -KW2237 : 'KW' '2237'; -KW2238 : 'KW' '2238'; -KW2239 : 'KW' '2239'; -KW2240 : 'KW' '2240'; -KW2241 : 'KW' '2241'; -KW2242 : 'KW' '2242'; -KW2243 : 'KW' '2243'; -KW2244 : 'KW' '2244'; -KW2245 : 'KW' '2245'; -KW2246 : 'KW' '2246'; -KW2247 : 'KW' '2247'; -KW2248 : 'KW' '2248'; -KW2249 : 'KW' '2249'; -KW2250 : 'KW' '2250'; -KW2251 : 'KW' '2251'; -KW2252 : 'KW' '2252'; -KW2253 : 'KW' '2253'; -KW2254 : 'KW' '2254'; -KW2255 : 'KW' '2255'; -KW2256 : 'KW' '2256'; -KW2257 : 'KW' '2257'; -KW2258 : 'KW' '2258'; -KW2259 : 'KW' '2259'; -KW2260 : 'KW' '2260'; -KW2261 : 'KW' '2261'; -KW2262 : 'KW' '2262'; -KW2263 : 'KW' '2263'; -KW2264 : 'KW' '2264'; -KW2265 : 'KW' '2265'; -KW2266 : 'KW' '2266'; -KW2267 : 'KW' '2267'; -KW2268 : 'KW' '2268'; -KW2269 : 'KW' '2269'; -KW2270 : 'KW' '2270'; -KW2271 : 'KW' '2271'; -KW2272 : 'KW' '2272'; -KW2273 : 'KW' '2273'; -KW2274 : 'KW' '2274'; -KW2275 : 'KW' '2275'; -KW2276 : 'KW' '2276'; -KW2277 : 'KW' '2277'; -KW2278 : 'KW' '2278'; -KW2279 : 'KW' '2279'; -KW2280 : 'KW' '2280'; -KW2281 : 'KW' '2281'; -KW2282 : 'KW' '2282'; -KW2283 : 'KW' '2283'; -KW2284 : 'KW' '2284'; -KW2285 : 'KW' '2285'; -KW2286 : 'KW' '2286'; -KW2287 : 'KW' '2287'; -KW2288 : 'KW' '2288'; -KW2289 : 'KW' '2289'; -KW2290 : 'KW' '2290'; -KW2291 : 'KW' '2291'; -KW2292 : 'KW' '2292'; -KW2293 : 'KW' '2293'; -KW2294 : 'KW' '2294'; -KW2295 : 'KW' '2295'; -KW2296 : 'KW' '2296'; -KW2297 : 'KW' '2297'; -KW2298 : 'KW' '2298'; -KW2299 : 'KW' '2299'; -KW2300 : 'KW' '2300'; -KW2301 : 'KW' '2301'; -KW2302 : 'KW' '2302'; -KW2303 : 'KW' '2303'; -KW2304 : 'KW' '2304'; -KW2305 : 'KW' '2305'; -KW2306 : 'KW' '2306'; -KW2307 : 'KW' '2307'; -KW2308 : 'KW' '2308'; -KW2309 : 'KW' '2309'; -KW2310 : 'KW' '2310'; -KW2311 : 'KW' '2311'; -KW2312 : 'KW' '2312'; -KW2313 : 'KW' '2313'; -KW2314 : 'KW' '2314'; -KW2315 : 'KW' '2315'; -KW2316 : 'KW' '2316'; -KW2317 : 'KW' '2317'; -KW2318 : 'KW' '2318'; -KW2319 : 'KW' '2319'; -KW2320 : 'KW' '2320'; -KW2321 : 'KW' '2321'; -KW2322 : 'KW' '2322'; -KW2323 : 'KW' '2323'; -KW2324 : 'KW' '2324'; -KW2325 : 'KW' '2325'; -KW2326 : 'KW' '2326'; -KW2327 : 'KW' '2327'; -KW2328 : 'KW' '2328'; -KW2329 : 'KW' '2329'; -KW2330 : 'KW' '2330'; -KW2331 : 'KW' '2331'; -KW2332 : 'KW' '2332'; -KW2333 : 'KW' '2333'; -KW2334 : 'KW' '2334'; -KW2335 : 'KW' '2335'; -KW2336 : 'KW' '2336'; -KW2337 : 'KW' '2337'; -KW2338 : 'KW' '2338'; -KW2339 : 'KW' '2339'; -KW2340 : 'KW' '2340'; -KW2341 : 'KW' '2341'; -KW2342 : 'KW' '2342'; -KW2343 : 'KW' '2343'; -KW2344 : 'KW' '2344'; -KW2345 : 'KW' '2345'; -KW2346 : 'KW' '2346'; -KW2347 : 'KW' '2347'; -KW2348 : 'KW' '2348'; -KW2349 : 'KW' '2349'; -KW2350 : 'KW' '2350'; -KW2351 : 'KW' '2351'; -KW2352 : 'KW' '2352'; -KW2353 : 'KW' '2353'; -KW2354 : 'KW' '2354'; -KW2355 : 'KW' '2355'; -KW2356 : 'KW' '2356'; -KW2357 : 'KW' '2357'; -KW2358 : 'KW' '2358'; -KW2359 : 'KW' '2359'; -KW2360 : 'KW' '2360'; -KW2361 : 'KW' '2361'; -KW2362 : 'KW' '2362'; -KW2363 : 'KW' '2363'; -KW2364 : 'KW' '2364'; -KW2365 : 'KW' '2365'; -KW2366 : 'KW' '2366'; -KW2367 : 'KW' '2367'; -KW2368 : 'KW' '2368'; -KW2369 : 'KW' '2369'; -KW2370 : 'KW' '2370'; -KW2371 : 'KW' '2371'; -KW2372 : 'KW' '2372'; -KW2373 : 'KW' '2373'; -KW2374 : 'KW' '2374'; -KW2375 : 'KW' '2375'; -KW2376 : 'KW' '2376'; -KW2377 : 'KW' '2377'; -KW2378 : 'KW' '2378'; -KW2379 : 'KW' '2379'; -KW2380 : 'KW' '2380'; -KW2381 : 'KW' '2381'; -KW2382 : 'KW' '2382'; -KW2383 : 'KW' '2383'; -KW2384 : 'KW' '2384'; -KW2385 : 'KW' '2385'; -KW2386 : 'KW' '2386'; -KW2387 : 'KW' '2387'; -KW2388 : 'KW' '2388'; -KW2389 : 'KW' '2389'; -KW2390 : 'KW' '2390'; -KW2391 : 'KW' '2391'; -KW2392 : 'KW' '2392'; -KW2393 : 'KW' '2393'; -KW2394 : 'KW' '2394'; -KW2395 : 'KW' '2395'; -KW2396 : 'KW' '2396'; -KW2397 : 'KW' '2397'; -KW2398 : 'KW' '2398'; -KW2399 : 'KW' '2399'; -KW2400 : 'KW' '2400'; -KW2401 : 'KW' '2401'; -KW2402 : 'KW' '2402'; -KW2403 : 'KW' '2403'; -KW2404 : 'KW' '2404'; -KW2405 : 'KW' '2405'; -KW2406 : 'KW' '2406'; -KW2407 : 'KW' '2407'; -KW2408 : 'KW' '2408'; -KW2409 : 'KW' '2409'; -KW2410 : 'KW' '2410'; -KW2411 : 'KW' '2411'; -KW2412 : 'KW' '2412'; -KW2413 : 'KW' '2413'; -KW2414 : 'KW' '2414'; -KW2415 : 'KW' '2415'; -KW2416 : 'KW' '2416'; -KW2417 : 'KW' '2417'; -KW2418 : 'KW' '2418'; -KW2419 : 'KW' '2419'; -KW2420 : 'KW' '2420'; -KW2421 : 'KW' '2421'; -KW2422 : 'KW' '2422'; -KW2423 : 'KW' '2423'; -KW2424 : 'KW' '2424'; -KW2425 : 'KW' '2425'; -KW2426 : 'KW' '2426'; -KW2427 : 'KW' '2427'; -KW2428 : 'KW' '2428'; -KW2429 : 'KW' '2429'; -KW2430 : 'KW' '2430'; -KW2431 : 'KW' '2431'; -KW2432 : 'KW' '2432'; -KW2433 : 'KW' '2433'; -KW2434 : 'KW' '2434'; -KW2435 : 'KW' '2435'; -KW2436 : 'KW' '2436'; -KW2437 : 'KW' '2437'; -KW2438 : 'KW' '2438'; -KW2439 : 'KW' '2439'; -KW2440 : 'KW' '2440'; -KW2441 : 'KW' '2441'; -KW2442 : 'KW' '2442'; -KW2443 : 'KW' '2443'; -KW2444 : 'KW' '2444'; -KW2445 : 'KW' '2445'; -KW2446 : 'KW' '2446'; -KW2447 : 'KW' '2447'; -KW2448 : 'KW' '2448'; -KW2449 : 'KW' '2449'; -KW2450 : 'KW' '2450'; -KW2451 : 'KW' '2451'; -KW2452 : 'KW' '2452'; -KW2453 : 'KW' '2453'; -KW2454 : 'KW' '2454'; -KW2455 : 'KW' '2455'; -KW2456 : 'KW' '2456'; -KW2457 : 'KW' '2457'; -KW2458 : 'KW' '2458'; -KW2459 : 'KW' '2459'; -KW2460 : 'KW' '2460'; -KW2461 : 'KW' '2461'; -KW2462 : 'KW' '2462'; -KW2463 : 'KW' '2463'; -KW2464 : 'KW' '2464'; -KW2465 : 'KW' '2465'; -KW2466 : 'KW' '2466'; -KW2467 : 'KW' '2467'; -KW2468 : 'KW' '2468'; -KW2469 : 'KW' '2469'; -KW2470 : 'KW' '2470'; -KW2471 : 'KW' '2471'; -KW2472 : 'KW' '2472'; -KW2473 : 'KW' '2473'; -KW2474 : 'KW' '2474'; -KW2475 : 'KW' '2475'; -KW2476 : 'KW' '2476'; -KW2477 : 'KW' '2477'; -KW2478 : 'KW' '2478'; -KW2479 : 'KW' '2479'; -KW2480 : 'KW' '2480'; -KW2481 : 'KW' '2481'; -KW2482 : 'KW' '2482'; -KW2483 : 'KW' '2483'; -KW2484 : 'KW' '2484'; -KW2485 : 'KW' '2485'; -KW2486 : 'KW' '2486'; -KW2487 : 'KW' '2487'; -KW2488 : 'KW' '2488'; -KW2489 : 'KW' '2489'; -KW2490 : 'KW' '2490'; -KW2491 : 'KW' '2491'; -KW2492 : 'KW' '2492'; -KW2493 : 'KW' '2493'; -KW2494 : 'KW' '2494'; -KW2495 : 'KW' '2495'; -KW2496 : 'KW' '2496'; -KW2497 : 'KW' '2497'; -KW2498 : 'KW' '2498'; -KW2499 : 'KW' '2499'; -KW2500 : 'KW' '2500'; -KW2501 : 'KW' '2501'; -KW2502 : 'KW' '2502'; -KW2503 : 'KW' '2503'; -KW2504 : 'KW' '2504'; -KW2505 : 'KW' '2505'; -KW2506 : 'KW' '2506'; -KW2507 : 'KW' '2507'; -KW2508 : 'KW' '2508'; -KW2509 : 'KW' '2509'; -KW2510 : 'KW' '2510'; -KW2511 : 'KW' '2511'; -KW2512 : 'KW' '2512'; -KW2513 : 'KW' '2513'; -KW2514 : 'KW' '2514'; -KW2515 : 'KW' '2515'; -KW2516 : 'KW' '2516'; -KW2517 : 'KW' '2517'; -KW2518 : 'KW' '2518'; -KW2519 : 'KW' '2519'; -KW2520 : 'KW' '2520'; -KW2521 : 'KW' '2521'; -KW2522 : 'KW' '2522'; -KW2523 : 'KW' '2523'; -KW2524 : 'KW' '2524'; -KW2525 : 'KW' '2525'; -KW2526 : 'KW' '2526'; -KW2527 : 'KW' '2527'; -KW2528 : 'KW' '2528'; -KW2529 : 'KW' '2529'; -KW2530 : 'KW' '2530'; -KW2531 : 'KW' '2531'; -KW2532 : 'KW' '2532'; -KW2533 : 'KW' '2533'; -KW2534 : 'KW' '2534'; -KW2535 : 'KW' '2535'; -KW2536 : 'KW' '2536'; -KW2537 : 'KW' '2537'; -KW2538 : 'KW' '2538'; -KW2539 : 'KW' '2539'; -KW2540 : 'KW' '2540'; -KW2541 : 'KW' '2541'; -KW2542 : 'KW' '2542'; -KW2543 : 'KW' '2543'; -KW2544 : 'KW' '2544'; -KW2545 : 'KW' '2545'; -KW2546 : 'KW' '2546'; -KW2547 : 'KW' '2547'; -KW2548 : 'KW' '2548'; -KW2549 : 'KW' '2549'; -KW2550 : 'KW' '2550'; -KW2551 : 'KW' '2551'; -KW2552 : 'KW' '2552'; -KW2553 : 'KW' '2553'; -KW2554 : 'KW' '2554'; -KW2555 : 'KW' '2555'; -KW2556 : 'KW' '2556'; -KW2557 : 'KW' '2557'; -KW2558 : 'KW' '2558'; -KW2559 : 'KW' '2559'; -KW2560 : 'KW' '2560'; -KW2561 : 'KW' '2561'; -KW2562 : 'KW' '2562'; -KW2563 : 'KW' '2563'; -KW2564 : 'KW' '2564'; -KW2565 : 'KW' '2565'; -KW2566 : 'KW' '2566'; -KW2567 : 'KW' '2567'; -KW2568 : 'KW' '2568'; -KW2569 : 'KW' '2569'; -KW2570 : 'KW' '2570'; -KW2571 : 'KW' '2571'; -KW2572 : 'KW' '2572'; -KW2573 : 'KW' '2573'; -KW2574 : 'KW' '2574'; -KW2575 : 'KW' '2575'; -KW2576 : 'KW' '2576'; -KW2577 : 'KW' '2577'; -KW2578 : 'KW' '2578'; -KW2579 : 'KW' '2579'; -KW2580 : 'KW' '2580'; -KW2581 : 'KW' '2581'; -KW2582 : 'KW' '2582'; -KW2583 : 'KW' '2583'; -KW2584 : 'KW' '2584'; -KW2585 : 'KW' '2585'; -KW2586 : 'KW' '2586'; -KW2587 : 'KW' '2587'; -KW2588 : 'KW' '2588'; -KW2589 : 'KW' '2589'; -KW2590 : 'KW' '2590'; -KW2591 : 'KW' '2591'; -KW2592 : 'KW' '2592'; -KW2593 : 'KW' '2593'; -KW2594 : 'KW' '2594'; -KW2595 : 'KW' '2595'; -KW2596 : 'KW' '2596'; -KW2597 : 'KW' '2597'; -KW2598 : 'KW' '2598'; -KW2599 : 'KW' '2599'; -KW2600 : 'KW' '2600'; -KW2601 : 'KW' '2601'; -KW2602 : 'KW' '2602'; -KW2603 : 'KW' '2603'; -KW2604 : 'KW' '2604'; -KW2605 : 'KW' '2605'; -KW2606 : 'KW' '2606'; -KW2607 : 'KW' '2607'; -KW2608 : 'KW' '2608'; -KW2609 : 'KW' '2609'; -KW2610 : 'KW' '2610'; -KW2611 : 'KW' '2611'; -KW2612 : 'KW' '2612'; -KW2613 : 'KW' '2613'; -KW2614 : 'KW' '2614'; -KW2615 : 'KW' '2615'; -KW2616 : 'KW' '2616'; -KW2617 : 'KW' '2617'; -KW2618 : 'KW' '2618'; -KW2619 : 'KW' '2619'; -KW2620 : 'KW' '2620'; -KW2621 : 'KW' '2621'; -KW2622 : 'KW' '2622'; -KW2623 : 'KW' '2623'; -KW2624 : 'KW' '2624'; -KW2625 : 'KW' '2625'; -KW2626 : 'KW' '2626'; -KW2627 : 'KW' '2627'; -KW2628 : 'KW' '2628'; -KW2629 : 'KW' '2629'; -KW2630 : 'KW' '2630'; -KW2631 : 'KW' '2631'; -KW2632 : 'KW' '2632'; -KW2633 : 'KW' '2633'; -KW2634 : 'KW' '2634'; -KW2635 : 'KW' '2635'; -KW2636 : 'KW' '2636'; -KW2637 : 'KW' '2637'; -KW2638 : 'KW' '2638'; -KW2639 : 'KW' '2639'; -KW2640 : 'KW' '2640'; -KW2641 : 'KW' '2641'; -KW2642 : 'KW' '2642'; -KW2643 : 'KW' '2643'; -KW2644 : 'KW' '2644'; -KW2645 : 'KW' '2645'; -KW2646 : 'KW' '2646'; -KW2647 : 'KW' '2647'; -KW2648 : 'KW' '2648'; -KW2649 : 'KW' '2649'; -KW2650 : 'KW' '2650'; -KW2651 : 'KW' '2651'; -KW2652 : 'KW' '2652'; -KW2653 : 'KW' '2653'; -KW2654 : 'KW' '2654'; -KW2655 : 'KW' '2655'; -KW2656 : 'KW' '2656'; -KW2657 : 'KW' '2657'; -KW2658 : 'KW' '2658'; -KW2659 : 'KW' '2659'; -KW2660 : 'KW' '2660'; -KW2661 : 'KW' '2661'; -KW2662 : 'KW' '2662'; -KW2663 : 'KW' '2663'; -KW2664 : 'KW' '2664'; -KW2665 : 'KW' '2665'; -KW2666 : 'KW' '2666'; -KW2667 : 'KW' '2667'; -KW2668 : 'KW' '2668'; -KW2669 : 'KW' '2669'; -KW2670 : 'KW' '2670'; -KW2671 : 'KW' '2671'; -KW2672 : 'KW' '2672'; -KW2673 : 'KW' '2673'; -KW2674 : 'KW' '2674'; -KW2675 : 'KW' '2675'; -KW2676 : 'KW' '2676'; -KW2677 : 'KW' '2677'; -KW2678 : 'KW' '2678'; -KW2679 : 'KW' '2679'; -KW2680 : 'KW' '2680'; -KW2681 : 'KW' '2681'; -KW2682 : 'KW' '2682'; -KW2683 : 'KW' '2683'; -KW2684 : 'KW' '2684'; -KW2685 : 'KW' '2685'; -KW2686 : 'KW' '2686'; -KW2687 : 'KW' '2687'; -KW2688 : 'KW' '2688'; -KW2689 : 'KW' '2689'; -KW2690 : 'KW' '2690'; -KW2691 : 'KW' '2691'; -KW2692 : 'KW' '2692'; -KW2693 : 'KW' '2693'; -KW2694 : 'KW' '2694'; -KW2695 : 'KW' '2695'; -KW2696 : 'KW' '2696'; -KW2697 : 'KW' '2697'; -KW2698 : 'KW' '2698'; -KW2699 : 'KW' '2699'; -KW2700 : 'KW' '2700'; -KW2701 : 'KW' '2701'; -KW2702 : 'KW' '2702'; -KW2703 : 'KW' '2703'; -KW2704 : 'KW' '2704'; -KW2705 : 'KW' '2705'; -KW2706 : 'KW' '2706'; -KW2707 : 'KW' '2707'; -KW2708 : 'KW' '2708'; -KW2709 : 'KW' '2709'; -KW2710 : 'KW' '2710'; -KW2711 : 'KW' '2711'; -KW2712 : 'KW' '2712'; -KW2713 : 'KW' '2713'; -KW2714 : 'KW' '2714'; -KW2715 : 'KW' '2715'; -KW2716 : 'KW' '2716'; -KW2717 : 'KW' '2717'; -KW2718 : 'KW' '2718'; -KW2719 : 'KW' '2719'; -KW2720 : 'KW' '2720'; -KW2721 : 'KW' '2721'; -KW2722 : 'KW' '2722'; -KW2723 : 'KW' '2723'; -KW2724 : 'KW' '2724'; -KW2725 : 'KW' '2725'; -KW2726 : 'KW' '2726'; -KW2727 : 'KW' '2727'; -KW2728 : 'KW' '2728'; -KW2729 : 'KW' '2729'; -KW2730 : 'KW' '2730'; -KW2731 : 'KW' '2731'; -KW2732 : 'KW' '2732'; -KW2733 : 'KW' '2733'; -KW2734 : 'KW' '2734'; -KW2735 : 'KW' '2735'; -KW2736 : 'KW' '2736'; -KW2737 : 'KW' '2737'; -KW2738 : 'KW' '2738'; -KW2739 : 'KW' '2739'; -KW2740 : 'KW' '2740'; -KW2741 : 'KW' '2741'; -KW2742 : 'KW' '2742'; -KW2743 : 'KW' '2743'; -KW2744 : 'KW' '2744'; -KW2745 : 'KW' '2745'; -KW2746 : 'KW' '2746'; -KW2747 : 'KW' '2747'; -KW2748 : 'KW' '2748'; -KW2749 : 'KW' '2749'; -KW2750 : 'KW' '2750'; -KW2751 : 'KW' '2751'; -KW2752 : 'KW' '2752'; -KW2753 : 'KW' '2753'; -KW2754 : 'KW' '2754'; -KW2755 : 'KW' '2755'; -KW2756 : 'KW' '2756'; -KW2757 : 'KW' '2757'; -KW2758 : 'KW' '2758'; -KW2759 : 'KW' '2759'; -KW2760 : 'KW' '2760'; -KW2761 : 'KW' '2761'; -KW2762 : 'KW' '2762'; -KW2763 : 'KW' '2763'; -KW2764 : 'KW' '2764'; -KW2765 : 'KW' '2765'; -KW2766 : 'KW' '2766'; -KW2767 : 'KW' '2767'; -KW2768 : 'KW' '2768'; -KW2769 : 'KW' '2769'; -KW2770 : 'KW' '2770'; -KW2771 : 'KW' '2771'; -KW2772 : 'KW' '2772'; -KW2773 : 'KW' '2773'; -KW2774 : 'KW' '2774'; -KW2775 : 'KW' '2775'; -KW2776 : 'KW' '2776'; -KW2777 : 'KW' '2777'; -KW2778 : 'KW' '2778'; -KW2779 : 'KW' '2779'; -KW2780 : 'KW' '2780'; -KW2781 : 'KW' '2781'; -KW2782 : 'KW' '2782'; -KW2783 : 'KW' '2783'; -KW2784 : 'KW' '2784'; -KW2785 : 'KW' '2785'; -KW2786 : 'KW' '2786'; -KW2787 : 'KW' '2787'; -KW2788 : 'KW' '2788'; -KW2789 : 'KW' '2789'; -KW2790 : 'KW' '2790'; -KW2791 : 'KW' '2791'; -KW2792 : 'KW' '2792'; -KW2793 : 'KW' '2793'; -KW2794 : 'KW' '2794'; -KW2795 : 'KW' '2795'; -KW2796 : 'KW' '2796'; -KW2797 : 'KW' '2797'; -KW2798 : 'KW' '2798'; -KW2799 : 'KW' '2799'; -KW2800 : 'KW' '2800'; -KW2801 : 'KW' '2801'; -KW2802 : 'KW' '2802'; -KW2803 : 'KW' '2803'; -KW2804 : 'KW' '2804'; -KW2805 : 'KW' '2805'; -KW2806 : 'KW' '2806'; -KW2807 : 'KW' '2807'; -KW2808 : 'KW' '2808'; -KW2809 : 'KW' '2809'; -KW2810 : 'KW' '2810'; -KW2811 : 'KW' '2811'; -KW2812 : 'KW' '2812'; -KW2813 : 'KW' '2813'; -KW2814 : 'KW' '2814'; -KW2815 : 'KW' '2815'; -KW2816 : 'KW' '2816'; -KW2817 : 'KW' '2817'; -KW2818 : 'KW' '2818'; -KW2819 : 'KW' '2819'; -KW2820 : 'KW' '2820'; -KW2821 : 'KW' '2821'; -KW2822 : 'KW' '2822'; -KW2823 : 'KW' '2823'; -KW2824 : 'KW' '2824'; -KW2825 : 'KW' '2825'; -KW2826 : 'KW' '2826'; -KW2827 : 'KW' '2827'; -KW2828 : 'KW' '2828'; -KW2829 : 'KW' '2829'; -KW2830 : 'KW' '2830'; -KW2831 : 'KW' '2831'; -KW2832 : 'KW' '2832'; -KW2833 : 'KW' '2833'; -KW2834 : 'KW' '2834'; -KW2835 : 'KW' '2835'; -KW2836 : 'KW' '2836'; -KW2837 : 'KW' '2837'; -KW2838 : 'KW' '2838'; -KW2839 : 'KW' '2839'; -KW2840 : 'KW' '2840'; -KW2841 : 'KW' '2841'; -KW2842 : 'KW' '2842'; -KW2843 : 'KW' '2843'; -KW2844 : 'KW' '2844'; -KW2845 : 'KW' '2845'; -KW2846 : 'KW' '2846'; -KW2847 : 'KW' '2847'; -KW2848 : 'KW' '2848'; -KW2849 : 'KW' '2849'; -KW2850 : 'KW' '2850'; -KW2851 : 'KW' '2851'; -KW2852 : 'KW' '2852'; -KW2853 : 'KW' '2853'; -KW2854 : 'KW' '2854'; -KW2855 : 'KW' '2855'; -KW2856 : 'KW' '2856'; -KW2857 : 'KW' '2857'; -KW2858 : 'KW' '2858'; -KW2859 : 'KW' '2859'; -KW2860 : 'KW' '2860'; -KW2861 : 'KW' '2861'; -KW2862 : 'KW' '2862'; -KW2863 : 'KW' '2863'; -KW2864 : 'KW' '2864'; -KW2865 : 'KW' '2865'; -KW2866 : 'KW' '2866'; -KW2867 : 'KW' '2867'; -KW2868 : 'KW' '2868'; -KW2869 : 'KW' '2869'; -KW2870 : 'KW' '2870'; -KW2871 : 'KW' '2871'; -KW2872 : 'KW' '2872'; -KW2873 : 'KW' '2873'; -KW2874 : 'KW' '2874'; -KW2875 : 'KW' '2875'; -KW2876 : 'KW' '2876'; -KW2877 : 'KW' '2877'; -KW2878 : 'KW' '2878'; -KW2879 : 'KW' '2879'; -KW2880 : 'KW' '2880'; -KW2881 : 'KW' '2881'; -KW2882 : 'KW' '2882'; -KW2883 : 'KW' '2883'; -KW2884 : 'KW' '2884'; -KW2885 : 'KW' '2885'; -KW2886 : 'KW' '2886'; -KW2887 : 'KW' '2887'; -KW2888 : 'KW' '2888'; -KW2889 : 'KW' '2889'; -KW2890 : 'KW' '2890'; -KW2891 : 'KW' '2891'; -KW2892 : 'KW' '2892'; -KW2893 : 'KW' '2893'; -KW2894 : 'KW' '2894'; -KW2895 : 'KW' '2895'; -KW2896 : 'KW' '2896'; -KW2897 : 'KW' '2897'; -KW2898 : 'KW' '2898'; -KW2899 : 'KW' '2899'; -KW2900 : 'KW' '2900'; -KW2901 : 'KW' '2901'; -KW2902 : 'KW' '2902'; -KW2903 : 'KW' '2903'; -KW2904 : 'KW' '2904'; -KW2905 : 'KW' '2905'; -KW2906 : 'KW' '2906'; -KW2907 : 'KW' '2907'; -KW2908 : 'KW' '2908'; -KW2909 : 'KW' '2909'; -KW2910 : 'KW' '2910'; -KW2911 : 'KW' '2911'; -KW2912 : 'KW' '2912'; -KW2913 : 'KW' '2913'; -KW2914 : 'KW' '2914'; -KW2915 : 'KW' '2915'; -KW2916 : 'KW' '2916'; -KW2917 : 'KW' '2917'; -KW2918 : 'KW' '2918'; -KW2919 : 'KW' '2919'; -KW2920 : 'KW' '2920'; -KW2921 : 'KW' '2921'; -KW2922 : 'KW' '2922'; -KW2923 : 'KW' '2923'; -KW2924 : 'KW' '2924'; -KW2925 : 'KW' '2925'; -KW2926 : 'KW' '2926'; -KW2927 : 'KW' '2927'; -KW2928 : 'KW' '2928'; -KW2929 : 'KW' '2929'; -KW2930 : 'KW' '2930'; -KW2931 : 'KW' '2931'; -KW2932 : 'KW' '2932'; -KW2933 : 'KW' '2933'; -KW2934 : 'KW' '2934'; -KW2935 : 'KW' '2935'; -KW2936 : 'KW' '2936'; -KW2937 : 'KW' '2937'; -KW2938 : 'KW' '2938'; -KW2939 : 'KW' '2939'; -KW2940 : 'KW' '2940'; -KW2941 : 'KW' '2941'; -KW2942 : 'KW' '2942'; -KW2943 : 'KW' '2943'; -KW2944 : 'KW' '2944'; -KW2945 : 'KW' '2945'; -KW2946 : 'KW' '2946'; -KW2947 : 'KW' '2947'; -KW2948 : 'KW' '2948'; -KW2949 : 'KW' '2949'; -KW2950 : 'KW' '2950'; -KW2951 : 'KW' '2951'; -KW2952 : 'KW' '2952'; -KW2953 : 'KW' '2953'; -KW2954 : 'KW' '2954'; -KW2955 : 'KW' '2955'; -KW2956 : 'KW' '2956'; -KW2957 : 'KW' '2957'; -KW2958 : 'KW' '2958'; -KW2959 : 'KW' '2959'; -KW2960 : 'KW' '2960'; -KW2961 : 'KW' '2961'; -KW2962 : 'KW' '2962'; -KW2963 : 'KW' '2963'; -KW2964 : 'KW' '2964'; -KW2965 : 'KW' '2965'; -KW2966 : 'KW' '2966'; -KW2967 : 'KW' '2967'; -KW2968 : 'KW' '2968'; -KW2969 : 'KW' '2969'; -KW2970 : 'KW' '2970'; -KW2971 : 'KW' '2971'; -KW2972 : 'KW' '2972'; -KW2973 : 'KW' '2973'; -KW2974 : 'KW' '2974'; -KW2975 : 'KW' '2975'; -KW2976 : 'KW' '2976'; -KW2977 : 'KW' '2977'; -KW2978 : 'KW' '2978'; -KW2979 : 'KW' '2979'; -KW2980 : 'KW' '2980'; -KW2981 : 'KW' '2981'; -KW2982 : 'KW' '2982'; -KW2983 : 'KW' '2983'; -KW2984 : 'KW' '2984'; -KW2985 : 'KW' '2985'; -KW2986 : 'KW' '2986'; -KW2987 : 'KW' '2987'; -KW2988 : 'KW' '2988'; -KW2989 : 'KW' '2989'; -KW2990 : 'KW' '2990'; -KW2991 : 'KW' '2991'; -KW2992 : 'KW' '2992'; -KW2993 : 'KW' '2993'; -KW2994 : 'KW' '2994'; -KW2995 : 'KW' '2995'; -KW2996 : 'KW' '2996'; -KW2997 : 'KW' '2997'; -KW2998 : 'KW' '2998'; -KW2999 : 'KW' '2999'; -KW3000 : 'KW' '3000'; -KW3001 : 'KW' '3001'; -KW3002 : 'KW' '3002'; -KW3003 : 'KW' '3003'; -KW3004 : 'KW' '3004'; -KW3005 : 'KW' '3005'; -KW3006 : 'KW' '3006'; -KW3007 : 'KW' '3007'; -KW3008 : 'KW' '3008'; -KW3009 : 'KW' '3009'; -KW3010 : 'KW' '3010'; -KW3011 : 'KW' '3011'; -KW3012 : 'KW' '3012'; -KW3013 : 'KW' '3013'; -KW3014 : 'KW' '3014'; -KW3015 : 'KW' '3015'; -KW3016 : 'KW' '3016'; -KW3017 : 'KW' '3017'; -KW3018 : 'KW' '3018'; -KW3019 : 'KW' '3019'; -KW3020 : 'KW' '3020'; -KW3021 : 'KW' '3021'; -KW3022 : 'KW' '3022'; -KW3023 : 'KW' '3023'; -KW3024 : 'KW' '3024'; -KW3025 : 'KW' '3025'; -KW3026 : 'KW' '3026'; -KW3027 : 'KW' '3027'; -KW3028 : 'KW' '3028'; -KW3029 : 'KW' '3029'; -KW3030 : 'KW' '3030'; -KW3031 : 'KW' '3031'; -KW3032 : 'KW' '3032'; -KW3033 : 'KW' '3033'; -KW3034 : 'KW' '3034'; -KW3035 : 'KW' '3035'; -KW3036 : 'KW' '3036'; -KW3037 : 'KW' '3037'; -KW3038 : 'KW' '3038'; -KW3039 : 'KW' '3039'; -KW3040 : 'KW' '3040'; -KW3041 : 'KW' '3041'; -KW3042 : 'KW' '3042'; -KW3043 : 'KW' '3043'; -KW3044 : 'KW' '3044'; -KW3045 : 'KW' '3045'; -KW3046 : 'KW' '3046'; -KW3047 : 'KW' '3047'; -KW3048 : 'KW' '3048'; -KW3049 : 'KW' '3049'; -KW3050 : 'KW' '3050'; -KW3051 : 'KW' '3051'; -KW3052 : 'KW' '3052'; -KW3053 : 'KW' '3053'; -KW3054 : 'KW' '3054'; -KW3055 : 'KW' '3055'; -KW3056 : 'KW' '3056'; -KW3057 : 'KW' '3057'; -KW3058 : 'KW' '3058'; -KW3059 : 'KW' '3059'; -KW3060 : 'KW' '3060'; -KW3061 : 'KW' '3061'; -KW3062 : 'KW' '3062'; -KW3063 : 'KW' '3063'; -KW3064 : 'KW' '3064'; -KW3065 : 'KW' '3065'; -KW3066 : 'KW' '3066'; -KW3067 : 'KW' '3067'; -KW3068 : 'KW' '3068'; -KW3069 : 'KW' '3069'; -KW3070 : 'KW' '3070'; -KW3071 : 'KW' '3071'; -KW3072 : 'KW' '3072'; -KW3073 : 'KW' '3073'; -KW3074 : 'KW' '3074'; -KW3075 : 'KW' '3075'; -KW3076 : 'KW' '3076'; -KW3077 : 'KW' '3077'; -KW3078 : 'KW' '3078'; -KW3079 : 'KW' '3079'; -KW3080 : 'KW' '3080'; -KW3081 : 'KW' '3081'; -KW3082 : 'KW' '3082'; -KW3083 : 'KW' '3083'; -KW3084 : 'KW' '3084'; -KW3085 : 'KW' '3085'; -KW3086 : 'KW' '3086'; -KW3087 : 'KW' '3087'; -KW3088 : 'KW' '3088'; -KW3089 : 'KW' '3089'; -KW3090 : 'KW' '3090'; -KW3091 : 'KW' '3091'; -KW3092 : 'KW' '3092'; -KW3093 : 'KW' '3093'; -KW3094 : 'KW' '3094'; -KW3095 : 'KW' '3095'; -KW3096 : 'KW' '3096'; -KW3097 : 'KW' '3097'; -KW3098 : 'KW' '3098'; -KW3099 : 'KW' '3099'; -KW3100 : 'KW' '3100'; -KW3101 : 'KW' '3101'; -KW3102 : 'KW' '3102'; -KW3103 : 'KW' '3103'; -KW3104 : 'KW' '3104'; -KW3105 : 'KW' '3105'; -KW3106 : 'KW' '3106'; -KW3107 : 'KW' '3107'; -KW3108 : 'KW' '3108'; -KW3109 : 'KW' '3109'; -KW3110 : 'KW' '3110'; -KW3111 : 'KW' '3111'; -KW3112 : 'KW' '3112'; -KW3113 : 'KW' '3113'; -KW3114 : 'KW' '3114'; -KW3115 : 'KW' '3115'; -KW3116 : 'KW' '3116'; -KW3117 : 'KW' '3117'; -KW3118 : 'KW' '3118'; -KW3119 : 'KW' '3119'; -KW3120 : 'KW' '3120'; -KW3121 : 'KW' '3121'; -KW3122 : 'KW' '3122'; -KW3123 : 'KW' '3123'; -KW3124 : 'KW' '3124'; -KW3125 : 'KW' '3125'; -KW3126 : 'KW' '3126'; -KW3127 : 'KW' '3127'; -KW3128 : 'KW' '3128'; -KW3129 : 'KW' '3129'; -KW3130 : 'KW' '3130'; -KW3131 : 'KW' '3131'; -KW3132 : 'KW' '3132'; -KW3133 : 'KW' '3133'; -KW3134 : 'KW' '3134'; -KW3135 : 'KW' '3135'; -KW3136 : 'KW' '3136'; -KW3137 : 'KW' '3137'; -KW3138 : 'KW' '3138'; -KW3139 : 'KW' '3139'; -KW3140 : 'KW' '3140'; -KW3141 : 'KW' '3141'; -KW3142 : 'KW' '3142'; -KW3143 : 'KW' '3143'; -KW3144 : 'KW' '3144'; -KW3145 : 'KW' '3145'; -KW3146 : 'KW' '3146'; -KW3147 : 'KW' '3147'; -KW3148 : 'KW' '3148'; -KW3149 : 'KW' '3149'; -KW3150 : 'KW' '3150'; -KW3151 : 'KW' '3151'; -KW3152 : 'KW' '3152'; -KW3153 : 'KW' '3153'; -KW3154 : 'KW' '3154'; -KW3155 : 'KW' '3155'; -KW3156 : 'KW' '3156'; -KW3157 : 'KW' '3157'; -KW3158 : 'KW' '3158'; -KW3159 : 'KW' '3159'; -KW3160 : 'KW' '3160'; -KW3161 : 'KW' '3161'; -KW3162 : 'KW' '3162'; -KW3163 : 'KW' '3163'; -KW3164 : 'KW' '3164'; -KW3165 : 'KW' '3165'; -KW3166 : 'KW' '3166'; -KW3167 : 'KW' '3167'; -KW3168 : 'KW' '3168'; -KW3169 : 'KW' '3169'; -KW3170 : 'KW' '3170'; -KW3171 : 'KW' '3171'; -KW3172 : 'KW' '3172'; -KW3173 : 'KW' '3173'; -KW3174 : 'KW' '3174'; -KW3175 : 'KW' '3175'; -KW3176 : 'KW' '3176'; -KW3177 : 'KW' '3177'; -KW3178 : 'KW' '3178'; -KW3179 : 'KW' '3179'; -KW3180 : 'KW' '3180'; -KW3181 : 'KW' '3181'; -KW3182 : 'KW' '3182'; -KW3183 : 'KW' '3183'; -KW3184 : 'KW' '3184'; -KW3185 : 'KW' '3185'; -KW3186 : 'KW' '3186'; -KW3187 : 'KW' '3187'; -KW3188 : 'KW' '3188'; -KW3189 : 'KW' '3189'; -KW3190 : 'KW' '3190'; -KW3191 : 'KW' '3191'; -KW3192 : 'KW' '3192'; -KW3193 : 'KW' '3193'; -KW3194 : 'KW' '3194'; -KW3195 : 'KW' '3195'; -KW3196 : 'KW' '3196'; -KW3197 : 'KW' '3197'; -KW3198 : 'KW' '3198'; -KW3199 : 'KW' '3199'; -KW3200 : 'KW' '3200'; -KW3201 : 'KW' '3201'; -KW3202 : 'KW' '3202'; -KW3203 : 'KW' '3203'; -KW3204 : 'KW' '3204'; -KW3205 : 'KW' '3205'; -KW3206 : 'KW' '3206'; -KW3207 : 'KW' '3207'; -KW3208 : 'KW' '3208'; -KW3209 : 'KW' '3209'; -KW3210 : 'KW' '3210'; -KW3211 : 'KW' '3211'; -KW3212 : 'KW' '3212'; -KW3213 : 'KW' '3213'; -KW3214 : 'KW' '3214'; -KW3215 : 'KW' '3215'; -KW3216 : 'KW' '3216'; -KW3217 : 'KW' '3217'; -KW3218 : 'KW' '3218'; -KW3219 : 'KW' '3219'; -KW3220 : 'KW' '3220'; -KW3221 : 'KW' '3221'; -KW3222 : 'KW' '3222'; -KW3223 : 'KW' '3223'; -KW3224 : 'KW' '3224'; -KW3225 : 'KW' '3225'; -KW3226 : 'KW' '3226'; -KW3227 : 'KW' '3227'; -KW3228 : 'KW' '3228'; -KW3229 : 'KW' '3229'; -KW3230 : 'KW' '3230'; -KW3231 : 'KW' '3231'; -KW3232 : 'KW' '3232'; -KW3233 : 'KW' '3233'; -KW3234 : 'KW' '3234'; -KW3235 : 'KW' '3235'; -KW3236 : 'KW' '3236'; -KW3237 : 'KW' '3237'; -KW3238 : 'KW' '3238'; -KW3239 : 'KW' '3239'; -KW3240 : 'KW' '3240'; -KW3241 : 'KW' '3241'; -KW3242 : 'KW' '3242'; -KW3243 : 'KW' '3243'; -KW3244 : 'KW' '3244'; -KW3245 : 'KW' '3245'; -KW3246 : 'KW' '3246'; -KW3247 : 'KW' '3247'; -KW3248 : 'KW' '3248'; -KW3249 : 'KW' '3249'; -KW3250 : 'KW' '3250'; -KW3251 : 'KW' '3251'; -KW3252 : 'KW' '3252'; -KW3253 : 'KW' '3253'; -KW3254 : 'KW' '3254'; -KW3255 : 'KW' '3255'; -KW3256 : 'KW' '3256'; -KW3257 : 'KW' '3257'; -KW3258 : 'KW' '3258'; -KW3259 : 'KW' '3259'; -KW3260 : 'KW' '3260'; -KW3261 : 'KW' '3261'; -KW3262 : 'KW' '3262'; -KW3263 : 'KW' '3263'; -KW3264 : 'KW' '3264'; -KW3265 : 'KW' '3265'; -KW3266 : 'KW' '3266'; -KW3267 : 'KW' '3267'; -KW3268 : 'KW' '3268'; -KW3269 : 'KW' '3269'; -KW3270 : 'KW' '3270'; -KW3271 : 'KW' '3271'; -KW3272 : 'KW' '3272'; -KW3273 : 'KW' '3273'; -KW3274 : 'KW' '3274'; -KW3275 : 'KW' '3275'; -KW3276 : 'KW' '3276'; -KW3277 : 'KW' '3277'; -KW3278 : 'KW' '3278'; -KW3279 : 'KW' '3279'; -KW3280 : 'KW' '3280'; -KW3281 : 'KW' '3281'; -KW3282 : 'KW' '3282'; -KW3283 : 'KW' '3283'; -KW3284 : 'KW' '3284'; -KW3285 : 'KW' '3285'; -KW3286 : 'KW' '3286'; -KW3287 : 'KW' '3287'; -KW3288 : 'KW' '3288'; -KW3289 : 'KW' '3289'; -KW3290 : 'KW' '3290'; -KW3291 : 'KW' '3291'; -KW3292 : 'KW' '3292'; -KW3293 : 'KW' '3293'; -KW3294 : 'KW' '3294'; -KW3295 : 'KW' '3295'; -KW3296 : 'KW' '3296'; -KW3297 : 'KW' '3297'; -KW3298 : 'KW' '3298'; -KW3299 : 'KW' '3299'; -KW3300 : 'KW' '3300'; -KW3301 : 'KW' '3301'; -KW3302 : 'KW' '3302'; -KW3303 : 'KW' '3303'; -KW3304 : 'KW' '3304'; -KW3305 : 'KW' '3305'; -KW3306 : 'KW' '3306'; -KW3307 : 'KW' '3307'; -KW3308 : 'KW' '3308'; -KW3309 : 'KW' '3309'; -KW3310 : 'KW' '3310'; -KW3311 : 'KW' '3311'; -KW3312 : 'KW' '3312'; -KW3313 : 'KW' '3313'; -KW3314 : 'KW' '3314'; -KW3315 : 'KW' '3315'; -KW3316 : 'KW' '3316'; -KW3317 : 'KW' '3317'; -KW3318 : 'KW' '3318'; -KW3319 : 'KW' '3319'; -KW3320 : 'KW' '3320'; -KW3321 : 'KW' '3321'; -KW3322 : 'KW' '3322'; -KW3323 : 'KW' '3323'; -KW3324 : 'KW' '3324'; -KW3325 : 'KW' '3325'; -KW3326 : 'KW' '3326'; -KW3327 : 'KW' '3327'; -KW3328 : 'KW' '3328'; -KW3329 : 'KW' '3329'; -KW3330 : 'KW' '3330'; -KW3331 : 'KW' '3331'; -KW3332 : 'KW' '3332'; -KW3333 : 'KW' '3333'; -KW3334 : 'KW' '3334'; -KW3335 : 'KW' '3335'; -KW3336 : 'KW' '3336'; -KW3337 : 'KW' '3337'; -KW3338 : 'KW' '3338'; -KW3339 : 'KW' '3339'; -KW3340 : 'KW' '3340'; -KW3341 : 'KW' '3341'; -KW3342 : 'KW' '3342'; -KW3343 : 'KW' '3343'; -KW3344 : 'KW' '3344'; -KW3345 : 'KW' '3345'; -KW3346 : 'KW' '3346'; -KW3347 : 'KW' '3347'; -KW3348 : 'KW' '3348'; -KW3349 : 'KW' '3349'; -KW3350 : 'KW' '3350'; -KW3351 : 'KW' '3351'; -KW3352 : 'KW' '3352'; -KW3353 : 'KW' '3353'; -KW3354 : 'KW' '3354'; -KW3355 : 'KW' '3355'; -KW3356 : 'KW' '3356'; -KW3357 : 'KW' '3357'; -KW3358 : 'KW' '3358'; -KW3359 : 'KW' '3359'; -KW3360 : 'KW' '3360'; -KW3361 : 'KW' '3361'; -KW3362 : 'KW' '3362'; -KW3363 : 'KW' '3363'; -KW3364 : 'KW' '3364'; -KW3365 : 'KW' '3365'; -KW3366 : 'KW' '3366'; -KW3367 : 'KW' '3367'; -KW3368 : 'KW' '3368'; -KW3369 : 'KW' '3369'; -KW3370 : 'KW' '3370'; -KW3371 : 'KW' '3371'; -KW3372 : 'KW' '3372'; -KW3373 : 'KW' '3373'; -KW3374 : 'KW' '3374'; -KW3375 : 'KW' '3375'; -KW3376 : 'KW' '3376'; -KW3377 : 'KW' '3377'; -KW3378 : 'KW' '3378'; -KW3379 : 'KW' '3379'; -KW3380 : 'KW' '3380'; -KW3381 : 'KW' '3381'; -KW3382 : 'KW' '3382'; -KW3383 : 'KW' '3383'; -KW3384 : 'KW' '3384'; -KW3385 : 'KW' '3385'; -KW3386 : 'KW' '3386'; -KW3387 : 'KW' '3387'; -KW3388 : 'KW' '3388'; -KW3389 : 'KW' '3389'; -KW3390 : 'KW' '3390'; -KW3391 : 'KW' '3391'; -KW3392 : 'KW' '3392'; -KW3393 : 'KW' '3393'; -KW3394 : 'KW' '3394'; -KW3395 : 'KW' '3395'; -KW3396 : 'KW' '3396'; -KW3397 : 'KW' '3397'; -KW3398 : 'KW' '3398'; -KW3399 : 'KW' '3399'; -KW3400 : 'KW' '3400'; -KW3401 : 'KW' '3401'; -KW3402 : 'KW' '3402'; -KW3403 : 'KW' '3403'; -KW3404 : 'KW' '3404'; -KW3405 : 'KW' '3405'; -KW3406 : 'KW' '3406'; -KW3407 : 'KW' '3407'; -KW3408 : 'KW' '3408'; -KW3409 : 'KW' '3409'; -KW3410 : 'KW' '3410'; -KW3411 : 'KW' '3411'; -KW3412 : 'KW' '3412'; -KW3413 : 'KW' '3413'; -KW3414 : 'KW' '3414'; -KW3415 : 'KW' '3415'; -KW3416 : 'KW' '3416'; -KW3417 : 'KW' '3417'; -KW3418 : 'KW' '3418'; -KW3419 : 'KW' '3419'; -KW3420 : 'KW' '3420'; -KW3421 : 'KW' '3421'; -KW3422 : 'KW' '3422'; -KW3423 : 'KW' '3423'; -KW3424 : 'KW' '3424'; -KW3425 : 'KW' '3425'; -KW3426 : 'KW' '3426'; -KW3427 : 'KW' '3427'; -KW3428 : 'KW' '3428'; -KW3429 : 'KW' '3429'; -KW3430 : 'KW' '3430'; -KW3431 : 'KW' '3431'; -KW3432 : 'KW' '3432'; -KW3433 : 'KW' '3433'; -KW3434 : 'KW' '3434'; -KW3435 : 'KW' '3435'; -KW3436 : 'KW' '3436'; -KW3437 : 'KW' '3437'; -KW3438 : 'KW' '3438'; -KW3439 : 'KW' '3439'; -KW3440 : 'KW' '3440'; -KW3441 : 'KW' '3441'; -KW3442 : 'KW' '3442'; -KW3443 : 'KW' '3443'; -KW3444 : 'KW' '3444'; -KW3445 : 'KW' '3445'; -KW3446 : 'KW' '3446'; -KW3447 : 'KW' '3447'; -KW3448 : 'KW' '3448'; -KW3449 : 'KW' '3449'; -KW3450 : 'KW' '3450'; -KW3451 : 'KW' '3451'; -KW3452 : 'KW' '3452'; -KW3453 : 'KW' '3453'; -KW3454 : 'KW' '3454'; -KW3455 : 'KW' '3455'; -KW3456 : 'KW' '3456'; -KW3457 : 'KW' '3457'; -KW3458 : 'KW' '3458'; -KW3459 : 'KW' '3459'; -KW3460 : 'KW' '3460'; -KW3461 : 'KW' '3461'; -KW3462 : 'KW' '3462'; -KW3463 : 'KW' '3463'; -KW3464 : 'KW' '3464'; -KW3465 : 'KW' '3465'; -KW3466 : 'KW' '3466'; -KW3467 : 'KW' '3467'; -KW3468 : 'KW' '3468'; -KW3469 : 'KW' '3469'; -KW3470 : 'KW' '3470'; -KW3471 : 'KW' '3471'; -KW3472 : 'KW' '3472'; -KW3473 : 'KW' '3473'; -KW3474 : 'KW' '3474'; -KW3475 : 'KW' '3475'; -KW3476 : 'KW' '3476'; -KW3477 : 'KW' '3477'; -KW3478 : 'KW' '3478'; -KW3479 : 'KW' '3479'; -KW3480 : 'KW' '3480'; -KW3481 : 'KW' '3481'; -KW3482 : 'KW' '3482'; -KW3483 : 'KW' '3483'; -KW3484 : 'KW' '3484'; -KW3485 : 'KW' '3485'; -KW3486 : 'KW' '3486'; -KW3487 : 'KW' '3487'; -KW3488 : 'KW' '3488'; -KW3489 : 'KW' '3489'; -KW3490 : 'KW' '3490'; -KW3491 : 'KW' '3491'; -KW3492 : 'KW' '3492'; -KW3493 : 'KW' '3493'; -KW3494 : 'KW' '3494'; -KW3495 : 'KW' '3495'; -KW3496 : 'KW' '3496'; -KW3497 : 'KW' '3497'; -KW3498 : 'KW' '3498'; -KW3499 : 'KW' '3499'; -KW3500 : 'KW' '3500'; -KW3501 : 'KW' '3501'; -KW3502 : 'KW' '3502'; -KW3503 : 'KW' '3503'; -KW3504 : 'KW' '3504'; -KW3505 : 'KW' '3505'; -KW3506 : 'KW' '3506'; -KW3507 : 'KW' '3507'; -KW3508 : 'KW' '3508'; -KW3509 : 'KW' '3509'; -KW3510 : 'KW' '3510'; -KW3511 : 'KW' '3511'; -KW3512 : 'KW' '3512'; -KW3513 : 'KW' '3513'; -KW3514 : 'KW' '3514'; -KW3515 : 'KW' '3515'; -KW3516 : 'KW' '3516'; -KW3517 : 'KW' '3517'; -KW3518 : 'KW' '3518'; -KW3519 : 'KW' '3519'; -KW3520 : 'KW' '3520'; -KW3521 : 'KW' '3521'; -KW3522 : 'KW' '3522'; -KW3523 : 'KW' '3523'; -KW3524 : 'KW' '3524'; -KW3525 : 'KW' '3525'; -KW3526 : 'KW' '3526'; -KW3527 : 'KW' '3527'; -KW3528 : 'KW' '3528'; -KW3529 : 'KW' '3529'; -KW3530 : 'KW' '3530'; -KW3531 : 'KW' '3531'; -KW3532 : 'KW' '3532'; -KW3533 : 'KW' '3533'; -KW3534 : 'KW' '3534'; -KW3535 : 'KW' '3535'; -KW3536 : 'KW' '3536'; -KW3537 : 'KW' '3537'; -KW3538 : 'KW' '3538'; -KW3539 : 'KW' '3539'; -KW3540 : 'KW' '3540'; -KW3541 : 'KW' '3541'; -KW3542 : 'KW' '3542'; -KW3543 : 'KW' '3543'; -KW3544 : 'KW' '3544'; -KW3545 : 'KW' '3545'; -KW3546 : 'KW' '3546'; -KW3547 : 'KW' '3547'; -KW3548 : 'KW' '3548'; -KW3549 : 'KW' '3549'; -KW3550 : 'KW' '3550'; -KW3551 : 'KW' '3551'; -KW3552 : 'KW' '3552'; -KW3553 : 'KW' '3553'; -KW3554 : 'KW' '3554'; -KW3555 : 'KW' '3555'; -KW3556 : 'KW' '3556'; -KW3557 : 'KW' '3557'; -KW3558 : 'KW' '3558'; -KW3559 : 'KW' '3559'; -KW3560 : 'KW' '3560'; -KW3561 : 'KW' '3561'; -KW3562 : 'KW' '3562'; -KW3563 : 'KW' '3563'; -KW3564 : 'KW' '3564'; -KW3565 : 'KW' '3565'; -KW3566 : 'KW' '3566'; -KW3567 : 'KW' '3567'; -KW3568 : 'KW' '3568'; -KW3569 : 'KW' '3569'; -KW3570 : 'KW' '3570'; -KW3571 : 'KW' '3571'; -KW3572 : 'KW' '3572'; -KW3573 : 'KW' '3573'; -KW3574 : 'KW' '3574'; -KW3575 : 'KW' '3575'; -KW3576 : 'KW' '3576'; -KW3577 : 'KW' '3577'; -KW3578 : 'KW' '3578'; -KW3579 : 'KW' '3579'; -KW3580 : 'KW' '3580'; -KW3581 : 'KW' '3581'; -KW3582 : 'KW' '3582'; -KW3583 : 'KW' '3583'; -KW3584 : 'KW' '3584'; -KW3585 : 'KW' '3585'; -KW3586 : 'KW' '3586'; -KW3587 : 'KW' '3587'; -KW3588 : 'KW' '3588'; -KW3589 : 'KW' '3589'; -KW3590 : 'KW' '3590'; -KW3591 : 'KW' '3591'; -KW3592 : 'KW' '3592'; -KW3593 : 'KW' '3593'; -KW3594 : 'KW' '3594'; -KW3595 : 'KW' '3595'; -KW3596 : 'KW' '3596'; -KW3597 : 'KW' '3597'; -KW3598 : 'KW' '3598'; -KW3599 : 'KW' '3599'; -KW3600 : 'KW' '3600'; -KW3601 : 'KW' '3601'; -KW3602 : 'KW' '3602'; -KW3603 : 'KW' '3603'; -KW3604 : 'KW' '3604'; -KW3605 : 'KW' '3605'; -KW3606 : 'KW' '3606'; -KW3607 : 'KW' '3607'; -KW3608 : 'KW' '3608'; -KW3609 : 'KW' '3609'; -KW3610 : 'KW' '3610'; -KW3611 : 'KW' '3611'; -KW3612 : 'KW' '3612'; -KW3613 : 'KW' '3613'; -KW3614 : 'KW' '3614'; -KW3615 : 'KW' '3615'; -KW3616 : 'KW' '3616'; -KW3617 : 'KW' '3617'; -KW3618 : 'KW' '3618'; -KW3619 : 'KW' '3619'; -KW3620 : 'KW' '3620'; -KW3621 : 'KW' '3621'; -KW3622 : 'KW' '3622'; -KW3623 : 'KW' '3623'; -KW3624 : 'KW' '3624'; -KW3625 : 'KW' '3625'; -KW3626 : 'KW' '3626'; -KW3627 : 'KW' '3627'; -KW3628 : 'KW' '3628'; -KW3629 : 'KW' '3629'; -KW3630 : 'KW' '3630'; -KW3631 : 'KW' '3631'; -KW3632 : 'KW' '3632'; -KW3633 : 'KW' '3633'; -KW3634 : 'KW' '3634'; -KW3635 : 'KW' '3635'; -KW3636 : 'KW' '3636'; -KW3637 : 'KW' '3637'; -KW3638 : 'KW' '3638'; -KW3639 : 'KW' '3639'; -KW3640 : 'KW' '3640'; -KW3641 : 'KW' '3641'; -KW3642 : 'KW' '3642'; -KW3643 : 'KW' '3643'; -KW3644 : 'KW' '3644'; -KW3645 : 'KW' '3645'; -KW3646 : 'KW' '3646'; -KW3647 : 'KW' '3647'; -KW3648 : 'KW' '3648'; -KW3649 : 'KW' '3649'; -KW3650 : 'KW' '3650'; -KW3651 : 'KW' '3651'; -KW3652 : 'KW' '3652'; -KW3653 : 'KW' '3653'; -KW3654 : 'KW' '3654'; -KW3655 : 'KW' '3655'; -KW3656 : 'KW' '3656'; -KW3657 : 'KW' '3657'; -KW3658 : 'KW' '3658'; -KW3659 : 'KW' '3659'; -KW3660 : 'KW' '3660'; -KW3661 : 'KW' '3661'; -KW3662 : 'KW' '3662'; -KW3663 : 'KW' '3663'; -KW3664 : 'KW' '3664'; -KW3665 : 'KW' '3665'; -KW3666 : 'KW' '3666'; -KW3667 : 'KW' '3667'; -KW3668 : 'KW' '3668'; -KW3669 : 'KW' '3669'; -KW3670 : 'KW' '3670'; -KW3671 : 'KW' '3671'; -KW3672 : 'KW' '3672'; -KW3673 : 'KW' '3673'; -KW3674 : 'KW' '3674'; -KW3675 : 'KW' '3675'; -KW3676 : 'KW' '3676'; -KW3677 : 'KW' '3677'; -KW3678 : 'KW' '3678'; -KW3679 : 'KW' '3679'; -KW3680 : 'KW' '3680'; -KW3681 : 'KW' '3681'; -KW3682 : 'KW' '3682'; -KW3683 : 'KW' '3683'; -KW3684 : 'KW' '3684'; -KW3685 : 'KW' '3685'; -KW3686 : 'KW' '3686'; -KW3687 : 'KW' '3687'; -KW3688 : 'KW' '3688'; -KW3689 : 'KW' '3689'; -KW3690 : 'KW' '3690'; -KW3691 : 'KW' '3691'; -KW3692 : 'KW' '3692'; -KW3693 : 'KW' '3693'; -KW3694 : 'KW' '3694'; -KW3695 : 'KW' '3695'; -KW3696 : 'KW' '3696'; -KW3697 : 'KW' '3697'; -KW3698 : 'KW' '3698'; -KW3699 : 'KW' '3699'; -KW3700 : 'KW' '3700'; -KW3701 : 'KW' '3701'; -KW3702 : 'KW' '3702'; -KW3703 : 'KW' '3703'; -KW3704 : 'KW' '3704'; -KW3705 : 'KW' '3705'; -KW3706 : 'KW' '3706'; -KW3707 : 'KW' '3707'; -KW3708 : 'KW' '3708'; -KW3709 : 'KW' '3709'; -KW3710 : 'KW' '3710'; -KW3711 : 'KW' '3711'; -KW3712 : 'KW' '3712'; -KW3713 : 'KW' '3713'; -KW3714 : 'KW' '3714'; -KW3715 : 'KW' '3715'; -KW3716 : 'KW' '3716'; -KW3717 : 'KW' '3717'; -KW3718 : 'KW' '3718'; -KW3719 : 'KW' '3719'; -KW3720 : 'KW' '3720'; -KW3721 : 'KW' '3721'; -KW3722 : 'KW' '3722'; -KW3723 : 'KW' '3723'; -KW3724 : 'KW' '3724'; -KW3725 : 'KW' '3725'; -KW3726 : 'KW' '3726'; -KW3727 : 'KW' '3727'; -KW3728 : 'KW' '3728'; -KW3729 : 'KW' '3729'; -KW3730 : 'KW' '3730'; -KW3731 : 'KW' '3731'; -KW3732 : 'KW' '3732'; -KW3733 : 'KW' '3733'; -KW3734 : 'KW' '3734'; -KW3735 : 'KW' '3735'; -KW3736 : 'KW' '3736'; -KW3737 : 'KW' '3737'; -KW3738 : 'KW' '3738'; -KW3739 : 'KW' '3739'; -KW3740 : 'KW' '3740'; -KW3741 : 'KW' '3741'; -KW3742 : 'KW' '3742'; -KW3743 : 'KW' '3743'; -KW3744 : 'KW' '3744'; -KW3745 : 'KW' '3745'; -KW3746 : 'KW' '3746'; -KW3747 : 'KW' '3747'; -KW3748 : 'KW' '3748'; -KW3749 : 'KW' '3749'; -KW3750 : 'KW' '3750'; -KW3751 : 'KW' '3751'; -KW3752 : 'KW' '3752'; -KW3753 : 'KW' '3753'; -KW3754 : 'KW' '3754'; -KW3755 : 'KW' '3755'; -KW3756 : 'KW' '3756'; -KW3757 : 'KW' '3757'; -KW3758 : 'KW' '3758'; -KW3759 : 'KW' '3759'; -KW3760 : 'KW' '3760'; -KW3761 : 'KW' '3761'; -KW3762 : 'KW' '3762'; -KW3763 : 'KW' '3763'; -KW3764 : 'KW' '3764'; -KW3765 : 'KW' '3765'; -KW3766 : 'KW' '3766'; -KW3767 : 'KW' '3767'; -KW3768 : 'KW' '3768'; -KW3769 : 'KW' '3769'; -KW3770 : 'KW' '3770'; -KW3771 : 'KW' '3771'; -KW3772 : 'KW' '3772'; -KW3773 : 'KW' '3773'; -KW3774 : 'KW' '3774'; -KW3775 : 'KW' '3775'; -KW3776 : 'KW' '3776'; -KW3777 : 'KW' '3777'; -KW3778 : 'KW' '3778'; -KW3779 : 'KW' '3779'; -KW3780 : 'KW' '3780'; -KW3781 : 'KW' '3781'; -KW3782 : 'KW' '3782'; -KW3783 : 'KW' '3783'; -KW3784 : 'KW' '3784'; -KW3785 : 'KW' '3785'; -KW3786 : 'KW' '3786'; -KW3787 : 'KW' '3787'; -KW3788 : 'KW' '3788'; -KW3789 : 'KW' '3789'; -KW3790 : 'KW' '3790'; -KW3791 : 'KW' '3791'; -KW3792 : 'KW' '3792'; -KW3793 : 'KW' '3793'; -KW3794 : 'KW' '3794'; -KW3795 : 'KW' '3795'; -KW3796 : 'KW' '3796'; -KW3797 : 'KW' '3797'; -KW3798 : 'KW' '3798'; -KW3799 : 'KW' '3799'; -KW3800 : 'KW' '3800'; -KW3801 : 'KW' '3801'; -KW3802 : 'KW' '3802'; -KW3803 : 'KW' '3803'; -KW3804 : 'KW' '3804'; -KW3805 : 'KW' '3805'; -KW3806 : 'KW' '3806'; -KW3807 : 'KW' '3807'; -KW3808 : 'KW' '3808'; -KW3809 : 'KW' '3809'; -KW3810 : 'KW' '3810'; -KW3811 : 'KW' '3811'; -KW3812 : 'KW' '3812'; -KW3813 : 'KW' '3813'; -KW3814 : 'KW' '3814'; -KW3815 : 'KW' '3815'; -KW3816 : 'KW' '3816'; -KW3817 : 'KW' '3817'; -KW3818 : 'KW' '3818'; -KW3819 : 'KW' '3819'; -KW3820 : 'KW' '3820'; -KW3821 : 'KW' '3821'; -KW3822 : 'KW' '3822'; -KW3823 : 'KW' '3823'; -KW3824 : 'KW' '3824'; -KW3825 : 'KW' '3825'; -KW3826 : 'KW' '3826'; -KW3827 : 'KW' '3827'; -KW3828 : 'KW' '3828'; -KW3829 : 'KW' '3829'; -KW3830 : 'KW' '3830'; -KW3831 : 'KW' '3831'; -KW3832 : 'KW' '3832'; -KW3833 : 'KW' '3833'; -KW3834 : 'KW' '3834'; -KW3835 : 'KW' '3835'; -KW3836 : 'KW' '3836'; -KW3837 : 'KW' '3837'; -KW3838 : 'KW' '3838'; -KW3839 : 'KW' '3839'; -KW3840 : 'KW' '3840'; -KW3841 : 'KW' '3841'; -KW3842 : 'KW' '3842'; -KW3843 : 'KW' '3843'; -KW3844 : 'KW' '3844'; -KW3845 : 'KW' '3845'; -KW3846 : 'KW' '3846'; -KW3847 : 'KW' '3847'; -KW3848 : 'KW' '3848'; -KW3849 : 'KW' '3849'; -KW3850 : 'KW' '3850'; -KW3851 : 'KW' '3851'; -KW3852 : 'KW' '3852'; -KW3853 : 'KW' '3853'; -KW3854 : 'KW' '3854'; -KW3855 : 'KW' '3855'; -KW3856 : 'KW' '3856'; -KW3857 : 'KW' '3857'; -KW3858 : 'KW' '3858'; -KW3859 : 'KW' '3859'; -KW3860 : 'KW' '3860'; -KW3861 : 'KW' '3861'; -KW3862 : 'KW' '3862'; -KW3863 : 'KW' '3863'; -KW3864 : 'KW' '3864'; -KW3865 : 'KW' '3865'; -KW3866 : 'KW' '3866'; -KW3867 : 'KW' '3867'; -KW3868 : 'KW' '3868'; -KW3869 : 'KW' '3869'; -KW3870 : 'KW' '3870'; -KW3871 : 'KW' '3871'; -KW3872 : 'KW' '3872'; -KW3873 : 'KW' '3873'; -KW3874 : 'KW' '3874'; -KW3875 : 'KW' '3875'; -KW3876 : 'KW' '3876'; -KW3877 : 'KW' '3877'; -KW3878 : 'KW' '3878'; -KW3879 : 'KW' '3879'; -KW3880 : 'KW' '3880'; -KW3881 : 'KW' '3881'; -KW3882 : 'KW' '3882'; -KW3883 : 'KW' '3883'; -KW3884 : 'KW' '3884'; -KW3885 : 'KW' '3885'; -KW3886 : 'KW' '3886'; -KW3887 : 'KW' '3887'; -KW3888 : 'KW' '3888'; -KW3889 : 'KW' '3889'; -KW3890 : 'KW' '3890'; -KW3891 : 'KW' '3891'; -KW3892 : 'KW' '3892'; -KW3893 : 'KW' '3893'; -KW3894 : 'KW' '3894'; -KW3895 : 'KW' '3895'; -KW3896 : 'KW' '3896'; -KW3897 : 'KW' '3897'; -KW3898 : 'KW' '3898'; -KW3899 : 'KW' '3899'; -KW3900 : 'KW' '3900'; -KW3901 : 'KW' '3901'; -KW3902 : 'KW' '3902'; -KW3903 : 'KW' '3903'; -KW3904 : 'KW' '3904'; -KW3905 : 'KW' '3905'; -KW3906 : 'KW' '3906'; -KW3907 : 'KW' '3907'; -KW3908 : 'KW' '3908'; -KW3909 : 'KW' '3909'; -KW3910 : 'KW' '3910'; -KW3911 : 'KW' '3911'; -KW3912 : 'KW' '3912'; -KW3913 : 'KW' '3913'; -KW3914 : 'KW' '3914'; -KW3915 : 'KW' '3915'; -KW3916 : 'KW' '3916'; -KW3917 : 'KW' '3917'; -KW3918 : 'KW' '3918'; -KW3919 : 'KW' '3919'; -KW3920 : 'KW' '3920'; -KW3921 : 'KW' '3921'; -KW3922 : 'KW' '3922'; -KW3923 : 'KW' '3923'; -KW3924 : 'KW' '3924'; -KW3925 : 'KW' '3925'; -KW3926 : 'KW' '3926'; -KW3927 : 'KW' '3927'; -KW3928 : 'KW' '3928'; -KW3929 : 'KW' '3929'; -KW3930 : 'KW' '3930'; -KW3931 : 'KW' '3931'; -KW3932 : 'KW' '3932'; -KW3933 : 'KW' '3933'; -KW3934 : 'KW' '3934'; -KW3935 : 'KW' '3935'; -KW3936 : 'KW' '3936'; -KW3937 : 'KW' '3937'; -KW3938 : 'KW' '3938'; -KW3939 : 'KW' '3939'; -KW3940 : 'KW' '3940'; -KW3941 : 'KW' '3941'; -KW3942 : 'KW' '3942'; -KW3943 : 'KW' '3943'; -KW3944 : 'KW' '3944'; -KW3945 : 'KW' '3945'; -KW3946 : 'KW' '3946'; -KW3947 : 'KW' '3947'; -KW3948 : 'KW' '3948'; -KW3949 : 'KW' '3949'; -KW3950 : 'KW' '3950'; -KW3951 : 'KW' '3951'; -KW3952 : 'KW' '3952'; -KW3953 : 'KW' '3953'; -KW3954 : 'KW' '3954'; -KW3955 : 'KW' '3955'; -KW3956 : 'KW' '3956'; -KW3957 : 'KW' '3957'; -KW3958 : 'KW' '3958'; -KW3959 : 'KW' '3959'; -KW3960 : 'KW' '3960'; -KW3961 : 'KW' '3961'; -KW3962 : 'KW' '3962'; -KW3963 : 'KW' '3963'; -KW3964 : 'KW' '3964'; -KW3965 : 'KW' '3965'; -KW3966 : 'KW' '3966'; -KW3967 : 'KW' '3967'; -KW3968 : 'KW' '3968'; -KW3969 : 'KW' '3969'; -KW3970 : 'KW' '3970'; -KW3971 : 'KW' '3971'; -KW3972 : 'KW' '3972'; -KW3973 : 'KW' '3973'; -KW3974 : 'KW' '3974'; -KW3975 : 'KW' '3975'; -KW3976 : 'KW' '3976'; -KW3977 : 'KW' '3977'; -KW3978 : 'KW' '3978'; -KW3979 : 'KW' '3979'; -KW3980 : 'KW' '3980'; -KW3981 : 'KW' '3981'; -KW3982 : 'KW' '3982'; -KW3983 : 'KW' '3983'; -KW3984 : 'KW' '3984'; -KW3985 : 'KW' '3985'; -KW3986 : 'KW' '3986'; -KW3987 : 'KW' '3987'; -KW3988 : 'KW' '3988'; -KW3989 : 'KW' '3989'; -KW3990 : 'KW' '3990'; -KW3991 : 'KW' '3991'; -KW3992 : 'KW' '3992'; -KW3993 : 'KW' '3993'; -KW3994 : 'KW' '3994'; -KW3995 : 'KW' '3995'; -KW3996 : 'KW' '3996'; -KW3997 : 'KW' '3997'; -KW3998 : 'KW' '3998'; -KW3999 : 'KW' '3999'; \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeLexers/LexerDelegatorInvokesDelegateRule.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeLexers/LexerDelegatorInvokesDelegateRule.txt new file mode 100644 index 0000000000..2113c646f6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeLexers/LexerDelegatorInvokesDelegateRule.txt @@ -0,0 +1,24 @@ +[type] +CompositeLexer + +[grammar] +lexer grammar M; +import S; +B : 'b'; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +lexer grammar S; +A : 'a' {}; +C : 'c' ; + +[input] +abc + +[output] +S.A +[@0,0:0='a',<3>,1:0] +[@1,1:1='b',<1>,1:1] +[@2,2:2='c',<4>,1:2] +[@3,3:2='',<-1>,1:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeLexers/LexerDelegatorRuleOverridesDelegate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeLexers/LexerDelegatorRuleOverridesDelegate.txt new file mode 100644 index 0000000000..74d6098a7c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeLexers/LexerDelegatorRuleOverridesDelegate.txt @@ -0,0 +1,22 @@ +[type] +CompositeLexer + +[grammar] +lexer grammar M; +import S; +A : 'a' B {} ; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +lexer grammar S; +A : 'a' {} ; +B : 'b' {} ; + +[input] +ab + +[output] +M.A +[@0,0:1='ab',<1>,1:0] +[@1,2:1='',<-1>,1:2] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt new file mode 100644 index 0000000000..fa198f3065 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt @@ -0,0 +1,21 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : a ; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +a : '=' 'a' {}; + +[start] +s + +[input] +=a + +[output] +"""S.a""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/CombinedImportsCombined.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/CombinedImportsCombined.txt new file mode 100644 index 0000000000..80a446ac32 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/CombinedImportsCombined.txt @@ -0,0 +1,25 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : x INT; + +[slaveGrammar] +parser grammar S; +tokens { A, B, C } +x : 'x' INT {}; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x 34 9 + +[output] +"""S.x +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatesSeeSameTokenType.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatesSeeSameTokenType.txt new file mode 100644 index 0000000000..5e01124c22 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatesSeeSameTokenType.txt @@ -0,0 +1,44 @@ +[notes] +The lexer will create rules to match letters a, b, c. +The associated token types A, B, C must have the same value +and all import'd parsers. Since ANTLR regenerates all imports +for use with the delegator M, it can generate the same token type +mapping in each parser: +public static final int C=6; +public static final int EOF=-1; +public static final int B=5; +public static final int WS=7; +public static final int A=4; + +[type] +CompositeParser + +[grammar] +grammar M; +import S,T; +s : x y ; // matches AA, which should be 'aa' +B : 'b' ; // another order: B, A, C +A : 'a' ; +C : 'c' ; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar T; +tokens { C, B, A } // reverse order +y : A {}; + +[slaveGrammar] +parser grammar S; +tokens { A, B, C } +x : A {}; + +[start] +s + +[input] +aa + +[output] +S.x +T.y + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorAccessesDelegateMembers.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorAccessesDelegateMembers.txt new file mode 100644 index 0000000000..d2dafa4117 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorAccessesDelegateMembers.txt @@ -0,0 +1,26 @@ +[type] +CompositeParser + +[grammar] +grammar M; // uses no rules from the import +import S; +s : 'b' {} ; // gS is import pointer +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +@parser::members { + +} +a : B; + +[start] +s + +[input] +b + +[output] +"""foo +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRule.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRule.txt new file mode 100644 index 0000000000..5b4ad9c2a1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRule.txt @@ -0,0 +1,24 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : a ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +a : B {}; + +[start] +s + +[input] +b + +[output] +"""S.a +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.txt new file mode 100644 index 0000000000..96fa718070 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.txt @@ -0,0 +1,24 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : label=a[3] {} ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +a[int x] returns [int y] : B {} {$y=1000;} ; + +[start] +s + +[input] +b + +[output] +"""S.a1000 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt new file mode 100644 index 0000000000..339a3f4740 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt @@ -0,0 +1,23 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : a {} ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +a : B {} ; + +[start] +s + +[input] +b + +[output] +"""S.ab""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.txt new file mode 100644 index 0000000000..a0bab0812d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.txt @@ -0,0 +1,29 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S,T; +s : a ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar T; +a : B {}; + +[slaveGrammar] +parser grammar S; +a : b {}; +b : B; + +[start] +s + +[input] +b + +[output] +"""S.a +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt new file mode 100644 index 0000000000..d25532f56e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt @@ -0,0 +1,22 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +b : 'b'|'c'; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +a : b {}; +b : B ; + +[start] +a + +[input] +c + +[output] +"""S.a""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegates.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegates.txt new file mode 100644 index 0000000000..af5da57869 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegates.txt @@ -0,0 +1,29 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S, T; +b : 'b'|'c' {}|B|A; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar T; +tokens { A } +b : 'b' {}; + +[slaveGrammar] +parser grammar S; +a : b {}; +b : 'b' ; + +[start] +a + +[input] +c + +[output] +M.b +S.a + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.txt new file mode 100644 index 0000000000..36725be26d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.txt @@ -0,0 +1,29 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +prog : decl ; +type_ : 'int' | 'float' ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; + +[slaveGrammar] +parser grammar S; +type_ : 'int' ; +decl : type_ ID ';' + | type_ ID init_ ';' {}; +init_ : '=' INT; + +[start] +prog + +[input] +float x = 3; + +[output] +"""JavaDecl: floatx=3; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportLexerWithOnlyFragmentRules.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportLexerWithOnlyFragmentRules.txt new file mode 100644 index 0000000000..fc1d47f2f2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportLexerWithOnlyFragmentRules.txt @@ -0,0 +1,32 @@ +[notes] +This is a regression test for antlr/antlr4#248 "Including grammar with only +fragments breaks generated lexer". https://github.com/antlr/antlr4/issues/248 + +[type] +CompositeParser + +[grammar] +grammar Test; +import Unicode; + +program : 'test' 'test'; + +WS : (UNICODE_CLASS_Zs)+ -> skip; + +[slaveGrammar] +"""lexer grammar Unicode; + +fragment +UNICODE_CLASS_Zs : ' ' | ' ' | ' ' | '᠎' + | ' '..' ' + | ' ' | ' ' | ' ' + ; + +""" + +[start] +program + +[input] +test test + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportedGrammarWithEmptyOptions.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportedGrammarWithEmptyOptions.txt new file mode 100644 index 0000000000..00c1065461 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportedGrammarWithEmptyOptions.txt @@ -0,0 +1,21 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : a ; +B : 'b' ; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +options {} +a : B ; + +[start] +s + +[input] +b + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportedRuleWithAction.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportedRuleWithAction.txt new file mode 100644 index 0000000000..2fff024817 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/ImportedRuleWithAction.txt @@ -0,0 +1,23 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +s : a; +B : 'b'; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +parser grammar S; +a @after {} : B; + +[start] +s + +[input] +b + +[skip] +Go + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/KeywordVSIDOrder.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/KeywordVSIDOrder.txt new file mode 100644 index 0000000000..80794c81fd --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/KeywordVSIDOrder.txt @@ -0,0 +1,24 @@ +[type] +CompositeParser + +[grammar] +grammar M; +import S; +a : A {}; +A : 'abc' {}; +WS : (' '|'\n') -> skip ; + +[slaveGrammar] +lexer grammar S; +ID : 'a'..'z'+; + +[start] +a + +[input] +abc + +[output] +M.A +M.a: [@0,0:2='abc',<1>,1:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/AmbigYieldsCtxSensitiveDFA.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/AmbigYieldsCtxSensitiveDFA.txt new file mode 100644 index 0000000000..d9ab124449 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/AmbigYieldsCtxSensitiveDFA.txt @@ -0,0 +1,27 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} + : ID | ID {} ; +ID : 'a'..'z'+; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +abc + +[output] +Decision 0: +s0-ID->:s1^=>1 + +[errors] +"""line 1:0 reportAttemptingFullContext d=0 (s), input='abc' +""" + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/AmbiguityNoLoop.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/AmbiguityNoLoop.txt new file mode 100644 index 0000000000..b2c0304a41 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/AmbiguityNoLoop.txt @@ -0,0 +1,36 @@ +[type] +Parser + +[grammar] +grammar T; +prog +@init {} + : expr expr {} + | expr + ; +expr: '@' + | ID '@' + | ID + ; +ID : [a-z]+ ; +WS : [ \r\n\t]+ -> skip ; + +[start] +prog + +[input] +a@ + +[output] +"""alt 1 +""" + +[errors] +line 1:2 reportAttemptingFullContext d=0 (prog), input='a@' +line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@' +line 1:2 reportAttemptingFullContext d=1 (expr), input='a@' +line 1:2 reportContextSensitivity d=1 (expr), input='a@' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFATwoDiffInput.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFATwoDiffInput.txt new file mode 100644 index 0000000000..b07cc5e717 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFATwoDiffInput.txt @@ -0,0 +1,34 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} + : ('$' a | '@' b)+ ; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +$ 34 abc @ 34 abc + +[output] +Decision 2: +s0-INT->s1 +s1-ID->:s2^=>1 + +[errors] +line 1:5 reportAttemptingFullContext d=2 (e), input='34abc' +line 1:2 reportContextSensitivity d=2 (e), input='34' +line 1:14 reportAttemptingFullContext d=2 (e), input='34abc' +line 1:14 reportContextSensitivity d=2 (e), input='34abc' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFA_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFA_1.txt new file mode 100644 index 0000000000..7f4b51755f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFA_1.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} + : '$' a | '@' b ; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +$ 34 abc + +[output] +Decision 1: +s0-INT->s1 +s1-ID->:s2^=>1 + +[errors] +line 1:5 reportAttemptingFullContext d=1 (e), input='34abc' +line 1:2 reportContextSensitivity d=1 (e), input='34' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFA_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFA_2.txt new file mode 100644 index 0000000000..f2cfc226b9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/CtxSensitiveDFA_2.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} + : '$' a | '@' b ; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +@ 34 abc + +[output] +Decision 1: +s0-INT->s1 +s1-ID->:s2^=>1 + +[errors] +line 1:5 reportAttemptingFullContext d=1 (e), input='34abc' +line 1:5 reportContextSensitivity d=1 (e), input='34abc' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/ExprAmbiguity_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/ExprAmbiguity_1.txt new file mode 100644 index 0000000000..291be3b0c2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/ExprAmbiguity_1.txt @@ -0,0 +1,35 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +: expr[0] {}; + expr[int _p] + : ID + ( + {5 >= $_p}? '*' expr[6] + | {4 >= $_p}? '+' expr[5] + )* + ; +ID : [a-zA-Z]+ ; +WS : [ \r\n\t]+ -> skip ; + +[start] +s + +[input] +a+b + +[output] +"""(expr a + (expr b)) +""" + +[errors] +line 1:1 reportAttemptingFullContext d=1 (expr), input='+' +line 1:2 reportContextSensitivity d=1 (expr), input='+b' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/ExprAmbiguity_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/ExprAmbiguity_2.txt new file mode 100644 index 0000000000..fd26dfa50b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/ExprAmbiguity_2.txt @@ -0,0 +1,37 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +: expr[0] {}; + expr[int _p] + : ID + ( + {5 >= $_p}? '*' expr[6] + | {4 >= $_p}? '+' expr[5] + )* + ; +ID : [a-zA-Z]+ ; +WS : [ \r\n\t]+ -> skip ; + +[start] +s + +[input] +a+b*c + +[output] +"""(expr a + (expr b * (expr c))) +""" + +[errors] +line 1:1 reportAttemptingFullContext d=1 (expr), input='+' +line 1:2 reportContextSensitivity d=1 (expr), input='+b' +line 1:3 reportAttemptingFullContext d=1 (expr), input='*' +line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_1.txt new file mode 100644 index 0000000000..ee5a5ff28a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_1.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return' + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +{ if x then return } + +[output] +Decision 1: +s0-'}'->:s1=>2 + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_2.txt new file mode 100644 index 0000000000..741ab3be3a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_2.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return' + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +{ if x then return else foo } + +[output] +Decision 1: +s0-'else'->:s1^=>1 + +[errors] +line 1:19 reportAttemptingFullContext d=1 (stat), input='else' +line 1:19 reportContextSensitivity d=1 (stat), input='else' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_3.txt new file mode 100644 index 0000000000..481a446624 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_3.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return' + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +{ if x then if y then return else foo } + +[output] +Decision 1: +s0-'}'->:s2=>2 +s0-'else'->:s1^=>1 + +[errors] +line 1:29 reportAttemptingFullContext d=1 (stat), input='else' +line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_4.txt new file mode 100644 index 0000000000..53278e4fc3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_4.txt @@ -0,0 +1,34 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return' + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +{ if x then if y then return else foo else bar } + +[output] +Decision 1: +s0-'else'->:s1^=>1 + +[errors] +line 1:29 reportAttemptingFullContext d=1 (stat), input='else' +line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse' +line 1:38 reportAttemptingFullContext d=1 (stat), input='else' +line 1:38 reportContextSensitivity d=1 (stat), input='else' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_5.txt new file mode 100644 index 0000000000..7a140928fa --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_5.txt @@ -0,0 +1,36 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return' + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +{ if x then return else foo +if x then if y then return else foo } + +[output] +Decision 1: +s0-'}'->:s2=>2 +s0-'else'->:s1^=>1 + +[errors] +line 1:19 reportAttemptingFullContext d=1 (stat), input='else' +line 1:19 reportContextSensitivity d=1 (stat), input='else' +line 2:27 reportAttemptingFullContext d=1 (stat), input='else' +line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_6.txt new file mode 100644 index 0000000000..7a140928fa --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/FullContextIF_THEN_ELSEParse_6.txt @@ -0,0 +1,36 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return' + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +{ if x then return else foo +if x then if y then return else foo } + +[output] +Decision 1: +s0-'}'->:s2=>2 +s0-'else'->:s1^=>1 + +[errors] +line 1:19 reportAttemptingFullContext d=1 (stat), input='else' +line 1:19 reportContextSensitivity d=1 (stat), input='else' +line 2:27 reportAttemptingFullContext d=1 (stat), input='else' +line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/LoopsSimulateTailRecursion.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/LoopsSimulateTailRecursion.txt new file mode 100644 index 0000000000..d708221f72 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/LoopsSimulateTailRecursion.txt @@ -0,0 +1,41 @@ +[notes] +Tests predictions for the following case involving closures. +http://www.antlr.org/wiki/display/~admin/2011/12/29/Flaw+in+ANTLR+v3+LL(*)+analysis+algorithm + +[type] +Parser + +[grammar] +grammar T; +prog +@init {} + : expr_or_assign*; +expr_or_assign + : expr '++' {} + | expr {} + ; +expr: expr_primary ('\<-' ID)?; +expr_primary + : '(' ID ')' + | ID '(' ID ')' + | ID + ; +ID : [a-z]+ ; + +[start] +prog + +[input] +a(i)<-x + +[output] +"""pass: a(i)<-x +""" + +[errors] +line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)' +line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/SLLSeesEOFInLLGrammar.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/SLLSeesEOFInLLGrammar.txt new file mode 100644 index 0000000000..0908de123d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/FullContextParsing/SLLSeesEOFInLLGrammar.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} + : a; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; + +[start] +s + +[input] +34 abc + +[output] +Decision 0: +s0-INT->s1 +s1-ID->:s2^=>1 + +[errors] +line 1:3 reportAttemptingFullContext d=0 (e), input='34abc' +line 1:0 reportContextSensitivity d=0 (e), input='34' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_1.txt new file mode 100644 index 0000000000..8dd1e63929 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_1.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar Expr; +prog: stat ; +stat: expr NEWLINE # printExpr + | ID '=' expr NEWLINE# assign + | NEWLINE # blank + ; +expr: expr ('*'|'/') expr # MulDiv + | expr ('+'|'-') expr # AddSub + | INT # int + | ID # id + | '(' expr ')' # parens + ; + +MUL : '*' ; // assigns token name to '*' used above in grammar +DIV : '/' ; +ADD : '+' ; +SUB : '-' ; +ID : [a-zA-Z]+ ; // match identifiers +INT : [0-9]+ ;// match integers +NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) +WS : [ \t]+ -> skip ; // toss out whitespace + +[start] +prog + +[input] +"""1 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_2.txt new file mode 100644 index 0000000000..a017592e4a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_2.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar Expr; +prog: stat ; +stat: expr NEWLINE # printExpr + | ID '=' expr NEWLINE# assign + | NEWLINE # blank + ; +expr: expr ('*'|'/') expr # MulDiv + | expr ('+'|'-') expr # AddSub + | INT # int + | ID # id + | '(' expr ')' # parens + ; + +MUL : '*' ; // assigns token name to '*' used above in grammar +DIV : '/' ; +ADD : '+' ; +SUB : '-' ; +ID : [a-zA-Z]+ ; // match identifiers +INT : [0-9]+ ;// match integers +NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) +WS : [ \t]+ -> skip ; // toss out whitespace + +[start] +prog + +[input] +"""a = 5 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_3.txt new file mode 100644 index 0000000000..4fc53bfe85 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_3.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar Expr; +prog: stat ; +stat: expr NEWLINE # printExpr + | ID '=' expr NEWLINE# assign + | NEWLINE # blank + ; +expr: expr ('*'|'/') expr # MulDiv + | expr ('+'|'-') expr # AddSub + | INT # int + | ID # id + | '(' expr ')' # parens + ; + +MUL : '*' ; // assigns token name to '*' used above in grammar +DIV : '/' ; +ADD : '+' ; +SUB : '-' ; +ID : [a-zA-Z]+ ; // match identifiers +INT : [0-9]+ ;// match integers +NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) +WS : [ \t]+ -> skip ; // toss out whitespace + +[start] +prog + +[input] +"""b = 6 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_4.txt new file mode 100644 index 0000000000..30ee4ed389 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_4.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar Expr; +prog: stat ; +stat: expr NEWLINE # printExpr + | ID '=' expr NEWLINE# assign + | NEWLINE # blank + ; +expr: expr ('*'|'/') expr # MulDiv + | expr ('+'|'-') expr # AddSub + | INT # int + | ID # id + | '(' expr ')' # parens + ; + +MUL : '*' ; // assigns token name to '*' used above in grammar +DIV : '/' ; +ADD : '+' ; +SUB : '-' ; +ID : [a-zA-Z]+ ; // match identifiers +INT : [0-9]+ ;// match integers +NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) +WS : [ \t]+ -> skip ; // toss out whitespace + +[start] +prog + +[input] +"""a+b*2 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_5.txt new file mode 100644 index 0000000000..98fba28371 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/AmbigLR_5.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar Expr; +prog: stat ; +stat: expr NEWLINE # printExpr + | ID '=' expr NEWLINE# assign + | NEWLINE # blank + ; +expr: expr ('*'|'/') expr # MulDiv + | expr ('+'|'-') expr # AddSub + | INT # int + | ID # id + | '(' expr ')' # parens + ; + +MUL : '*' ; // assigns token name to '*' used above in grammar +DIV : '/' ; +ADD : '+' ; +SUB : '-' ; +ID : [a-zA-Z]+ ; // match identifiers +INT : [0-9]+ ;// match integers +NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) +WS : [ \t]+ -> skip ; // toss out whitespace + +[start] +prog + +[input] +"""(1+2)*3 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_1.txt new file mode 100644 index 0000000000..ff41bcf1ad --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_1.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a + +[output] +"""(s (declarator a) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_10.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_10.txt new file mode 100644 index 0000000000..de50a048c6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_10.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(*a)[] + +[output] +"""(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_2.txt new file mode 100644 index 0000000000..c1baa1504f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_2.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +*a + +[output] +"""(s (declarator * (declarator a)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_3.txt new file mode 100644 index 0000000000..3b6d324616 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_3.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +**a + +[output] +"""(s (declarator * (declarator * (declarator a))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_4.txt new file mode 100644 index 0000000000..1f642b4d39 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_4.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a[3] + +[output] +"""(s (declarator (declarator a) [ (e 3) ]) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_5.txt new file mode 100644 index 0000000000..a64c3234ec --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_5.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +b[] + +[output] +"""(s (declarator (declarator b) [ ]) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_6.txt new file mode 100644 index 0000000000..afd017fce5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_6.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(a) + +[output] +"""(s (declarator ( (declarator a) )) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_7.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_7.txt new file mode 100644 index 0000000000..d62df17019 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_7.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a[]() + +[output] +"""(s (declarator (declarator (declarator a) [ ]) ( )) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_8.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_8.txt new file mode 100644 index 0000000000..b76e2fe79b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_8.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a[][] + +[output] +"""(s (declarator (declarator (declarator a) [ ]) [ ]) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_9.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_9.txt new file mode 100644 index 0000000000..9f9a8e40ad --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Declarations_9.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +*a[] + +[output] +"""(s (declarator * (declarator (declarator a) [ ])) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_1.txt new file mode 100644 index 0000000000..9ccf4c3108 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_1.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for "Support direct calls to left-recursive +rules". https://github.com/antlr/antlr4/issues/161 + +[type] +Parser + +[grammar] +grammar T; +a @after {} : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +x + +[output] +"""(a x) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_2.txt new file mode 100644 index 0000000000..2dccce6352 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_2.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for "Support direct calls to left-recursive +rules". https://github.com/antlr/antlr4/issues/161 + +[type] +Parser + +[grammar] +grammar T; +a @after {} : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +x y + +[output] +"""(a (a x) y) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_3.txt new file mode 100644 index 0000000000..bc67fab7e8 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/DirectCallToLeftRecursiveRule_3.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for "Support direct calls to left-recursive +rules". https://github.com/antlr/antlr4/issues/161 + +[type] +Parser + +[grammar] +grammar T; +a @after {} : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +x y z + +[output] +"""(a (a (a x) y) z) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_1.txt new file mode 100644 index 0000000000..c65e2a0265 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_1.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a + +[output] +"""(s (e a) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_2.txt new file mode 100644 index 0000000000..61490cf815 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_2.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1 + +[output] +"""(s (e 1) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_3.txt new file mode 100644 index 0000000000..355cf4ffc3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_3.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a-1 + +[output] +"""(s (e (e a) - (e 1)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_4.txt new file mode 100644 index 0000000000..82823bc802 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_4.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a.b + +[output] +"""(s (e (e a) . b) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_5.txt new file mode 100644 index 0000000000..2fecad1eac --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_5.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a.this + +[output] +"""(s (e (e a) . this) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_6.txt new file mode 100644 index 0000000000..f36e03d1f6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_6.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +-a + +[output] +"""(s (e - (e a)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_7.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_7.txt new file mode 100644 index 0000000000..69c9a28d60 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Expressions_7.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +-a+b + +[output] +"""(s (e (e - (e a)) + (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_1.txt new file mode 100644 index 0000000000..f770f3cbfb --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_1.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a|b&c + +[output] +"""(s (e (e a) | (e (e b) & (e c))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_10.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_10.txt new file mode 100644 index 0000000000..05bf8eb6fc --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_10.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a.f(x)==T.c + +[output] +"""(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_11.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_11.txt new file mode 100644 index 0000000000..d6ff972f3f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_11.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a.f().g(x,1) + +[output] +"""(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_12.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_12.txt new file mode 100644 index 0000000000..4fd312b0fd --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_12.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +new T[((n-1) * x) + 1] + +[output] +"""(s (e new (typespec T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_2.txt new file mode 100644 index 0000000000..ae082d450e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_2.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(a|b)&c + +[output] +"""(s (e (e ( (e (e a) | (e b)) )) & (e c)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_3.txt new file mode 100644 index 0000000000..da0aba1d89 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_3.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a > b + +[output] +"""(s (e (e a) > (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_4.txt new file mode 100644 index 0000000000..f0b826c0d5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_4.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a >> b + +[output] +"""(s (e (e a) >> (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_5.txt new file mode 100644 index 0000000000..c6b0b80d6e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_5.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a=b=c + +[output] +"""(s (e (e a) = (e (e b) = (e c))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_6.txt new file mode 100644 index 0000000000..bff6065322 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_6.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a^b^c + +[output] +"""(s (e (e a) ^ (e (e b) ^ (e c))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_7.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_7.txt new file mode 100644 index 0000000000..4728aa4b50 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_7.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(T)x + +[output] +"""(s (e ( (typespec T) ) (e x)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_8.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_8.txt new file mode 100644 index 0000000000..0a5860edee --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_8.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +new A().b + +[output] +"""(s (e (e new (typespec A) ( )) . b) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_9.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_9.txt new file mode 100644 index 0000000000..1d85d91e86 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/JavaExpressions_9.txt @@ -0,0 +1,72 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | typespec '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' typespec ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +typespec + : ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(T)t.f() + +[output] +"""(s (e (e ( (typespec T) ) (e (e t) . f)) ( )) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_1.txt new file mode 100644 index 0000000000..d7fe95127c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_1.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e; +e : a=e op=('*'|'/') b=e {} + | INT {} + | '(' x=e ')' {} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +4 + +[output] +"""(s (e 4)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_2.txt new file mode 100644 index 0000000000..0ca7d3fd9c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_2.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e; +e : a=e op=('*'|'/') b=e {} + | INT {} + | '(' x=e ')' {} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1*2/3 + +[output] +"""(s (e (e (e 1) * (e 2)) / (e 3))) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_3.txt new file mode 100644 index 0000000000..20238f95a5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/LabelsOnOpSubrule_3.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e; +e : a=e op=('*'|'/') b=e {} + | INT {} + | '(' x=e ')' {} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(1/2)*3 + +[output] +"""(s (e (e ( (e (e 1) / (e 2)) )) * (e 3))) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_1.txt new file mode 100644 index 0000000000..c67586699b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_1.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#625 "Duplicate action breaks +operator precedence" https://github.com/antlr/antlr4/issues/625 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{}? + | a=e op=('+'|'-') b=e {}\{}?\ + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +4 + +[output] +"""(s (e 4)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_2.txt new file mode 100644 index 0000000000..61fabe9d85 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_2.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#625 "Duplicate action breaks +operator precedence" https://github.com/antlr/antlr4/issues/625 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{}? + | a=e op=('+'|'-') b=e {}\{}?\ + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +1*2/3 + +[output] +"""(s (e (e (e 1) * (e 2)) / (e 3))) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_3.txt new file mode 100644 index 0000000000..689a65f174 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActionsPredicatesOptions_3.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#625 "Duplicate action breaks +operator precedence" https://github.com/antlr/antlr4/issues/625 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{}? + | a=e op=('+'|'-') b=e {}\{}?\ + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +(1/2)*3 + +[output] +"""(s (e (e ( (e (e 1) / (e 2)) )) * (e 3))) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_1.txt new file mode 100644 index 0000000000..671b11ffda --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_1.txt @@ -0,0 +1,27 @@ +[notes] +This is a regression test for antlr/antlr4#625 "Duplicate action breaks +operator precedence" https://github.com/antlr/antlr4/issues/625 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{} + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +4 + +[output] +"""(s (e 4)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_2.txt new file mode 100644 index 0000000000..752d000ccc --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_2.txt @@ -0,0 +1,27 @@ +[notes] +This is a regression test for antlr/antlr4#625 "Duplicate action breaks +operator precedence" https://github.com/antlr/antlr4/issues/625 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{} + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1*2/3 + +[output] +"""(s (e (e (e 1) * (e 2)) / (e 3))) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_3.txt new file mode 100644 index 0000000000..6fc361bce3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleActions_3.txt @@ -0,0 +1,27 @@ +[notes] +This is a regression test for antlr/antlr4#625 "Duplicate action breaks +operator precedence" https://github.com/antlr/antlr4/issues/625 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{} + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(1/2)*3 + +[output] +"""(s (e (e ( (e (e 1) / (e 2)) )) * (e 3))) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_1.txt new file mode 100644 index 0000000000..629311487f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_1.txt @@ -0,0 +1,36 @@ +[notes] +This is a regression test for antlr/antlr4#433 "Not all context accessor +methods are generated when an alternative rule label is used for multiple +alternatives". https://github.com/antlr/antlr4/issues/433 + +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v] + : e '*' e {$v = (0)}, {})> * (1)}, {})>;} # binary + | e '+' e {$v = (0)}, {})> + (1)}, {})>;} # binary + | INT{$v = $INT.int;} # anInt + | '(' e ')' {$v = $e.v;} # parens + | left=e INC {$v = $left.v + 1;} # unary + | left=e DEC {$v = $left.v - 1;} # unary + | ID {} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +INC : '++' ; +DEC : '--' ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +4 + +[output] +"""4 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_2.txt new file mode 100644 index 0000000000..31ee95f747 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_2.txt @@ -0,0 +1,36 @@ +[notes] +This is a regression test for antlr/antlr4#433 "Not all context accessor +methods are generated when an alternative rule label is used for multiple +alternatives". https://github.com/antlr/antlr4/issues/433 + +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v] + : e '*' e {$v = (0)}, {})> * (1)}, {})>;} # binary + | e '+' e {$v = (0)}, {})> + (1)}, {})>;} # binary + | INT{$v = $INT.int;} # anInt + | '(' e ')' {$v = $e.v;} # parens + | left=e INC {$v = $left.v + 1;} # unary + | left=e DEC {$v = $left.v - 1;} # unary + | ID {} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +INC : '++' ; +DEC : '--' ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +1+2 + +[output] +"""3 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_3.txt new file mode 100644 index 0000000000..3e2de3e588 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_3.txt @@ -0,0 +1,36 @@ +[notes] +This is a regression test for antlr/antlr4#433 "Not all context accessor +methods are generated when an alternative rule label is used for multiple +alternatives". https://github.com/antlr/antlr4/issues/433 + +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v] + : e '*' e {$v = (0)}, {})> * (1)}, {})>;} # binary + | e '+' e {$v = (0)}, {})> + (1)}, {})>;} # binary + | INT{$v = $INT.int;} # anInt + | '(' e ')' {$v = $e.v;} # parens + | left=e INC {$v = $left.v + 1;} # unary + | left=e DEC {$v = $left.v - 1;} # unary + | ID {} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +INC : '++' ; +DEC : '--' ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +1+2*3 + +[output] +"""7 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_4.txt new file mode 100644 index 0000000000..d3a5726520 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_4.txt @@ -0,0 +1,36 @@ +[notes] +This is a regression test for antlr/antlr4#433 "Not all context accessor +methods are generated when an alternative rule label is used for multiple +alternatives". https://github.com/antlr/antlr4/issues/433 + +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v] + : e '*' e {$v = (0)}, {})> * (1)}, {})>;} # binary + | e '+' e {$v = (0)}, {})> + (1)}, {})>;} # binary + | INT{$v = $INT.int;} # anInt + | '(' e ')' {$v = $e.v;} # parens + | left=e INC {$v = $left.v + 1;} # unary + | left=e DEC {$v = $left.v - 1;} # unary + | ID {} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +INC : '++' ; +DEC : '--' ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +i++*3 + +[output] +"""12 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_5.txt new file mode 100644 index 0000000000..4a313df433 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/MultipleAlternativesWithCommonLabel_5.txt @@ -0,0 +1,36 @@ +[notes] +This is a regression test for antlr/antlr4#433 "Not all context accessor +methods are generated when an alternative rule label is used for multiple +alternatives". https://github.com/antlr/antlr4/issues/433 + +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v] + : e '*' e {$v = (0)}, {})> * (1)}, {})>;} # binary + | e '+' e {$v = (0)}, {})> + (1)}, {})>;} # binary + | INT{$v = $INT.int;} # anInt + | '(' e ')' {$v = $e.v;} # parens + | left=e INC {$v = $left.v + 1;} # unary + | left=e DEC {$v = $left.v - 1;} # unary + | ID {} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +INC : '++' ; +DEC : '--' ; +WS : (' '|'\n') -> skip; + +[start] +s + +[input] +(99)+3 + +[output] +"""102 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrecedenceFilterConsidersContext.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrecedenceFilterConsidersContext.txt new file mode 100644 index 0000000000..67d1bdde33 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrecedenceFilterConsidersContext.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for antlr/antlr4#509 "Incorrect rule chosen in +unambiguous grammar". https://github.com/antlr/antlr4/issues/509 + +[type] +Parser + +[grammar] +grammar T; +prog +@after {} +: statement* EOF {}; +statement: letterA | statement letterA 'b' ; +letterA: 'a'; + +[start] +prog + +[input] +aa + +[output] +"""(prog (statement (letterA a)) (statement (letterA a)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixAndOtherAlt_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixAndOtherAlt_1.txt new file mode 100644 index 0000000000..30a38f6447 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixAndOtherAlt_1.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF ; +expr : literal + | op expr + | expr op expr + ; +literal : '-'? Integer ; +op : '+' | '-' ; +Integer : [0-9]+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +-1 + +[output] +"""(s (expr (literal - 1)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixAndOtherAlt_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixAndOtherAlt_2.txt new file mode 100644 index 0000000000..8002cdabb9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixAndOtherAlt_2.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF ; +expr : literal + | op expr + | expr op expr + ; +literal : '-'? Integer ; +op : '+' | '-' ; +Integer : [0-9]+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +-1 + -1 + +[output] +"""(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_1.txt new file mode 100644 index 0000000000..1395ce3060 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_1.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {} ; +e returns [ result] + : ID '=' e1=e {$result = ;} + | ID {$result = $ID.text;} + | e1=e '+' e2=e {$result = ;} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a + +[output] +"""a +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_2.txt new file mode 100644 index 0000000000..54e8d888ff --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_2.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {} ; +e returns [ result] + : ID '=' e1=e {$result = ;} + | ID {$result = $ID.text;} + | e1=e '+' e2=e {$result = ;} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a+b + +[output] +"""(a+b) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_3.txt new file mode 100644 index 0000000000..034019f7bc --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/PrefixOpWithActionAndLabel_3.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {} ; +e returns [ result] + : ID '=' e1=e {$result = ;} + | ID {$result = $ID.text;} + | e1=e '+' e2=e {$result = ;} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a=b+c + +[output] +"""((a=b)+c) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_1.txt new file mode 100644 index 0000000000..9cf6bb44ee --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_1.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s : q=e {}; +e returns [int v] + : a=e op='*' b=e {$v = $a.v * $b.v;} # mult + | a=e '+' b=e {$v = $a.v + $b.v;} # add + | INT{$v = $INT.int;} # anInt + | '(' x=e ')' {$v = $x.v;} # parens + | x=e '++' {$v = $x.v+1;} # inc + | e '--' # dec + | ID {$v = 3;} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +4 + +[output] +"""4 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_2.txt new file mode 100644 index 0000000000..ba490abc13 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_2.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s : q=e {}; +e returns [int v] + : a=e op='*' b=e {$v = $a.v * $b.v;} # mult + | a=e '+' b=e {$v = $a.v + $b.v;} # add + | INT{$v = $INT.int;} # anInt + | '(' x=e ')' {$v = $x.v;} # parens + | x=e '++' {$v = $x.v+1;} # inc + | e '--' # dec + | ID {$v = 3;} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1+2 + +[output] +"""3 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_3.txt new file mode 100644 index 0000000000..10b39b825b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_3.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s : q=e {}; +e returns [int v] + : a=e op='*' b=e {$v = $a.v * $b.v;} # mult + | a=e '+' b=e {$v = $a.v + $b.v;} # add + | INT{$v = $INT.int;} # anInt + | '(' x=e ')' {$v = $x.v;} # parens + | x=e '++' {$v = $x.v+1;} # inc + | e '--' # dec + | ID {$v = 3;} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1+2*3 + +[output] +"""7 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_4.txt new file mode 100644 index 0000000000..0d7f923820 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsAndLabels_4.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +s : q=e {}; +e returns [int v] + : a=e op='*' b=e {$v = $a.v * $b.v;} # mult + | a=e '+' b=e {$v = $a.v + $b.v;} # add + | INT{$v = $INT.int;} # anInt + | '(' x=e ')' {$v = $x.v;} # parens + | x=e '++' {$v = $x.v+1;} # inc + | e '--' # dec + | ID {$v = 3;} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +i++*3 + +[output] +"""12 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_1.txt new file mode 100644 index 0000000000..1cfb886d7d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_1.txt @@ -0,0 +1,34 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr (',' b+=expr)* '>>' c=expr #Send + | ID #JustId //semantic check on modifiers +; + +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; + +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +a*b + +[output] +"""(s (expr (expr a) * (expr b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_2.txt new file mode 100644 index 0000000000..06dc777133 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_2.txt @@ -0,0 +1,34 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr (',' b+=expr)* '>>' c=expr #Send + | ID #JustId //semantic check on modifiers +; + +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; + +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +a,c>>x + +[output] +"""(s (expr (expr a) , (expr c) >> (expr x)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_3.txt new file mode 100644 index 0000000000..55fc5f9d47 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_3.txt @@ -0,0 +1,34 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr (',' b+=expr)* '>>' c=expr #Send + | ID #JustId //semantic check on modifiers +; + +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; + +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +x + +[output] +"""(s (expr x) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_4.txt new file mode 100644 index 0000000000..ac7eddea64 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList1_4.txt @@ -0,0 +1,34 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr (',' b+=expr)* '>>' c=expr #Send + | ID #JustId //semantic check on modifiers +; + +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; + +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +a*b,c,x*y>>r + +[output] +"""(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_1.txt new file mode 100644 index 0000000000..83ca371af9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_1.txt @@ -0,0 +1,33 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr ',' b+=expr #Comma + | b+=expr '>>' c=expr #Send + | ID #JustId //semantic check on modifiers + ; +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +a*b + +[output] +"""(s (expr (expr a) * (expr b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_2.txt new file mode 100644 index 0000000000..e72fc249df --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_2.txt @@ -0,0 +1,33 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr ',' b+=expr #Comma + | b+=expr '>>' c=expr #Send + | ID #JustId //semantic check on modifiers + ; +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +a,c>>x + +[output] +"""(s (expr (expr (expr a) , (expr c)) >> (expr x)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_3.txt new file mode 100644 index 0000000000..0f60f72ea4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_3.txt @@ -0,0 +1,33 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr ',' b+=expr #Comma + | b+=expr '>>' c=expr #Send + | ID #JustId //semantic check on modifiers + ; +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +x + +[output] +"""(s (expr x) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_4.txt new file mode 100644 index 0000000000..235577bbc7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActionsList2_4.txt @@ -0,0 +1,33 @@ +[notes] +This is a regression test for antlr/antlr4#677 "labels not working in grammar +file". https://github.com/antlr/antlr4/issues/677 +This test treats `,` and `>>` as part of a single compound operator (similar +to a ternary operator). + +[type] +Parser + +[grammar] +grammar T; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr ',' b+=expr #Comma + | b+=expr '>>' c=expr #Send + | ID #JustId //semantic check on modifiers + ; +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +a*b,c,x*y>>r + +[output] +"""(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_1.txt new file mode 100644 index 0000000000..2bf914ac7d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_1.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v, ignored] + : a=e '*' b=e {$v = $a.v * $b.v;} + | a=e '+' b=e {$v = $a.v + $b.v;} + | INT {$v = $INT.int;} + | '(' x=e ')' {$v = $x.v;} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +4 + +[output] +"""4 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_2.txt new file mode 100644 index 0000000000..268582148a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_2.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v, ignored] + : a=e '*' b=e {$v = $a.v * $b.v;} + | a=e '+' b=e {$v = $a.v + $b.v;} + | INT {$v = $INT.int;} + | '(' x=e ')' {$v = $x.v;} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1+2 + +[output] +"""3 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_3.txt new file mode 100644 index 0000000000..67b00890d7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_3.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v, ignored] + : a=e '*' b=e {$v = $a.v * $b.v;} + | a=e '+' b=e {$v = $a.v + $b.v;} + | INT {$v = $INT.int;} + | '(' x=e ')' {$v = $x.v;} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +1+2*3 + +[output] +"""7 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_4.txt new file mode 100644 index 0000000000..9f9fb785e0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/ReturnValueAndActions_4.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : e {}; +e returns [int v, ignored] + : a=e '*' b=e {$v = $a.v * $b.v;} + | a=e '+' b=e {$v = $a.v + $b.v;} + | INT {$v = $INT.int;} + | '(' x=e ')' {$v = $x.v;} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +(1+2)*3 + +[output] +"""9 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/SemPred.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/SemPred.txt new file mode 100644 index 0000000000..f932b83d85 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/SemPred.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : a ; +a : a {}? ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x y z + +[output] +"""(s (a (a (a x) y) z)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/SemPredFailOption.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/SemPredFailOption.txt new file mode 100644 index 0000000000..5d3008e855 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/SemPredFailOption.txt @@ -0,0 +1,26 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : a ; +a : a ID {}?\ + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x y z + +[output] +"""(s (a (a x) y z)) +""" + +[errors] +"""line 1:4 rule a custom message +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_1.txt new file mode 100644 index 0000000000..16aa2d233d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_1.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : a ; +a : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x + +[output] +"""(s (a x)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_2.txt new file mode 100644 index 0000000000..bf9ec64e90 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_2.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : a ; +a : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x y + +[output] +"""(s (a (a x) y)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_3.txt new file mode 100644 index 0000000000..3f95b714d9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/Simple_3.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : a ; +a : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x y z + +[output] +"""(s (a (a (a x) y) z)) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_1.txt new file mode 100644 index 0000000000..d2e0701e9a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_1.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a + +[output] +"""(s (e a) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_2.txt new file mode 100644 index 0000000000..58d81f3c2f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_2.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a+b + +[output] +"""(s (e (e a) + (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_3.txt new file mode 100644 index 0000000000..bdb50285a4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_3.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a*b + +[output] +"""(s (e (e a) * (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_4.txt new file mode 100644 index 0000000000..390e126179 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_4.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b:c + +[output] +"""(s (e (e a) ? (e b) : (e c)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_5.txt new file mode 100644 index 0000000000..b9fe9d5aeb --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_5.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a=b=c + +[output] +"""(s (e (e a) = (e (e b) = (e c))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_6.txt new file mode 100644 index 0000000000..a6ccc3e3ed --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_6.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b+c:d + +[output] +"""(s (e (e a) ? (e (e b) + (e c)) : (e d)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_7.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_7.txt new file mode 100644 index 0000000000..d1c0d16850 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_7.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b=c:d + +[output] +"""(s (e (e a) ? (e (e b) = (e c)) : (e d)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_8.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_8.txt new file mode 100644 index 0000000000..37e9e3d824 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_8.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a? b?c:d : e + +[output] +"""(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_9.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_9.txt new file mode 100644 index 0000000000..3f6796a856 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExprExplicitAssociativity_9.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#542 "First alternative cannot +be right-associative". https://github.com/antlr/antlr4/issues/542 + +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b: c?d:e + +[output] +"""(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_1.txt new file mode 100644 index 0000000000..2a6f3017a1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_1.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a + +[output] +"""(s (e a) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_2.txt new file mode 100644 index 0000000000..7922cdb982 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_2.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a+b + +[output] +"""(s (e (e a) + (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_3.txt new file mode 100644 index 0000000000..484e8baee3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_3.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a*b + +[output] +"""(s (e (e a) * (e b)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_4.txt new file mode 100644 index 0000000000..0bc4b86d15 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_4.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b:c + +[output] +"""(s (e (e a) ? (e b) : (e c)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_5.txt new file mode 100644 index 0000000000..1912cc1c2d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_5.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a=b=c + +[output] +"""(s (e (e a) = (e (e b) = (e c))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_6.txt new file mode 100644 index 0000000000..a4af8ecbd5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_6.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b+c:d + +[output] +"""(s (e (e a) ? (e (e b) + (e c)) : (e d)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_7.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_7.txt new file mode 100644 index 0000000000..673f87858a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_7.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b=c:d + +[output] +"""(s (e (e a) ? (e (e b) = (e c)) : (e d)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_8.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_8.txt new file mode 100644 index 0000000000..84fe9d2b84 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_8.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a? b?c:d : e + +[output] +"""(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_9.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_9.txt new file mode 100644 index 0000000000..661bdefd90 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/TernaryExpr_9.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a?b: c?d:e + +[output] +"""(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/WhitespaceInfluence_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/WhitespaceInfluence_1.txt new file mode 100644 index 0000000000..afbd927b97 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/WhitespaceInfluence_1.txt @@ -0,0 +1,64 @@ +[notes] +This is a regression test for #239 "recoursive parser using implicit tokens +ignore white space lexer rule". https://github.com/antlr/antlr4/issues/239 + +[type] +Parser + +[grammar] +grammar Expr; +prog : expression EOF; +expression + : ID '(' expression (',' expression)* ')' # doFunction + | '(' expression ')'# doParenthesis + | '!' expression # doNot + | '-' expression # doNegate + | '+' expression # doPositiv + | expression '^' expression # doPower + | expression '*' expression # doMultipy + | expression '/' expression # doDivide + | expression '%' expression # doModulo + | expression '-' expression # doMinus + | expression '+' expression # doPlus + | expression '=' expression # doEqual + | expression '!=' expression # doNotEqual + | expression '>' expression # doGreather + | expression '>=' expression # doGreatherEqual + | expression '\<' expression # doLesser + | expression '\<=' expression # doLesserEqual + | expression K_IN '(' expression (',' expression)* ')' # doIn + | expression ( '&' | K_AND) expression# doAnd + | expression ( '|' | K_OR) expression # doOr + | '[' expression (',' expression)* ']'# newArray + | K_TRUE # newTrueBoolean + | K_FALSE # newFalseBoolean + | NUMBER # newNumber + | DATE # newDateTime + | ID # newIdentifier + | SQ_STRING# newString + | K_NULL # newNull + ; + +// Fragments +fragment DIGIT : '0' .. '9'; +fragment UPPER : 'A' .. 'Z'; +fragment LOWER : 'a' .. 'z'; +fragment LETTER : LOWER | UPPER; +fragment WORD : LETTER | '_' | '$' | '#' | '.'; +fragment ALPHANUM : WORD | DIGIT; + +// Tokens +ID : LETTER ALPHANUM*; +NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?; +DATE : '\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\''; +SQ_STRING : '\'' ('\'\'' | ~'\'')* '\''; +DQ_STRING : '"' ('\\\\"' | ~'"')* '"'; +WS : [ \t\n\r]+ -> skip ; +COMMENTS : ('/*' .*? '*' '/' | '//' ~'\n'* '\n' ) -> skip; + +[start] +prog + +[input] +Test(1,3) + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/WhitespaceInfluence_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/WhitespaceInfluence_2.txt new file mode 100644 index 0000000000..345ae30982 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LeftRecursion/WhitespaceInfluence_2.txt @@ -0,0 +1,64 @@ +[notes] +This is a regression test for #239 "recoursive parser using implicit tokens +ignore white space lexer rule". https://github.com/antlr/antlr4/issues/239 + +[type] +Parser + +[grammar] +grammar Expr; +prog : expression EOF; +expression + : ID '(' expression (',' expression)* ')' # doFunction + | '(' expression ')'# doParenthesis + | '!' expression # doNot + | '-' expression # doNegate + | '+' expression # doPositiv + | expression '^' expression # doPower + | expression '*' expression # doMultipy + | expression '/' expression # doDivide + | expression '%' expression # doModulo + | expression '-' expression # doMinus + | expression '+' expression # doPlus + | expression '=' expression # doEqual + | expression '!=' expression # doNotEqual + | expression '>' expression # doGreather + | expression '>=' expression # doGreatherEqual + | expression '\<' expression # doLesser + | expression '\<=' expression # doLesserEqual + | expression K_IN '(' expression (',' expression)* ')' # doIn + | expression ( '&' | K_AND) expression# doAnd + | expression ( '|' | K_OR) expression # doOr + | '[' expression (',' expression)* ']'# newArray + | K_TRUE # newTrueBoolean + | K_FALSE # newFalseBoolean + | NUMBER # newNumber + | DATE # newDateTime + | ID # newIdentifier + | SQ_STRING# newString + | K_NULL # newNull + ; + +// Fragments +fragment DIGIT : '0' .. '9'; +fragment UPPER : 'A' .. 'Z'; +fragment LOWER : 'a' .. 'z'; +fragment LETTER : LOWER | UPPER; +fragment WORD : LETTER | '_' | '$' | '#' | '.'; +fragment ALPHANUM : WORD | DIGIT; + +// Tokens +ID : LETTER ALPHANUM*; +NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?; +DATE : '\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\''; +SQ_STRING : '\'' ('\'\'' | ~'\'')* '\''; +DQ_STRING : '"' ('\\\\"' | ~'"')* '"'; +WS : [ \t\n\r]+ -> skip ; +COMMENTS : ('/*' .*? '*' '/' | '//' ~'\n'* '\n' ) -> skip; + +[start] +prog + +[input] +Test(1, 3) + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/DFAToATNThatFailsBackToDFA.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/DFAToATNThatFailsBackToDFA.txt new file mode 100644 index 0000000000..ad24098e29 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/DFAToATNThatFailsBackToDFA.txt @@ -0,0 +1,20 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'ab' ; +B : 'abc' ; + +[input] +ababx + +[output] +[@0,0:1='ab',<1>,1:0] +[@1,2:3='ab',<1>,1:2] +[@2,5:4='',<-1>,1:5] + +[errors] +"""line 1:4 token recognition error at: 'x' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/DFAToATNThatMatchesThenFailsInATN.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/DFAToATNThatMatchesThenFailsInATN.txt new file mode 100644 index 0000000000..817f7d4cdc --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/DFAToATNThatMatchesThenFailsInATN.txt @@ -0,0 +1,21 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'ab' ; +B : 'abc' ; +C : 'abcd' ; + +[input] +ababcx + +[output] +[@0,0:1='ab',<1>,1:0] +[@1,2:4='abc',<2>,1:2] +[@2,6:5='',<-1>,1:6] + +[errors] +"""line 1:5 token recognition error at: 'x' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/EnforcedGreedyNestedBraces_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/EnforcedGreedyNestedBraces_1.txt new file mode 100644 index 0000000000..d5edc22ed0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/EnforcedGreedyNestedBraces_1.txt @@ -0,0 +1,15 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ACTION : '{' (ACTION | ~[{}])* '}'; +WS : [ \r\n\t]+ -> skip; + +[input] +{ { } } + +[output] +[@0,0:6='{ { } }',<1>,1:0] +[@1,7:6='',<-1>,1:7] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/EnforcedGreedyNestedBraces_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/EnforcedGreedyNestedBraces_2.txt new file mode 100644 index 0000000000..aa251242af --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/EnforcedGreedyNestedBraces_2.txt @@ -0,0 +1,19 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ACTION : '{' (ACTION | ~[{}])* '}'; +WS : [ \r\n\t]+ -> skip; + +[input] +{ { } + +[output] +"""[@0,5:4='',<-1>,1:5] +""" + +[errors] +"""line 1:0 token recognition error at: '{ { }' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/ErrorInMiddle.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/ErrorInMiddle.txt new file mode 100644 index 0000000000..e70c91b1a1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/ErrorInMiddle.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'abc' ; + +[input] +abx + +[output] +"""[@0,3:2='',<-1>,1:3] +""" + +[errors] +"""line 1:0 token recognition error at: 'abx' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharAtStart.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharAtStart.txt new file mode 100644 index 0000000000..b8291b8191 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharAtStart.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'a' 'b' ; + +[input] +x + +[output] +"""[@0,1:0='',<-1>,1:1] +""" + +[errors] +"""line 1:0 token recognition error at: 'x' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharAtStartAfterDFACache.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharAtStartAfterDFACache.txt new file mode 100644 index 0000000000..ae21517319 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharAtStartAfterDFACache.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'a' 'b' ; + +[input] +abx + +[output] +[@0,0:1='ab',<1>,1:0] +[@1,3:2='',<-1>,1:3] + +[errors] +"""line 1:2 token recognition error at: 'x' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharInToken.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharInToken.txt new file mode 100644 index 0000000000..3e0b509f07 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharInToken.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'a' 'b' ; + +[input] +ax + +[output] +"""[@0,2:1='',<-1>,1:2] +""" + +[errors] +"""line 1:0 token recognition error at: 'ax' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharInTokenAfterDFACache.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharInTokenAfterDFACache.txt new file mode 100644 index 0000000000..9d014e8d9d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/InvalidCharInTokenAfterDFACache.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'a' 'b' ; + +[input] +abax + +[output] +[@0,0:1='ab',<1>,1:0] +[@1,4:3='',<-1>,1:4] + +[errors] +"""line 1:2 token recognition error at: 'ax' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/LexerExecDFA.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/LexerExecDFA.txt new file mode 100644 index 0000000000..16aa4fdbd2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/LexerExecDFA.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for #45 "NullPointerException in LexerATNSimulator.execDFA". https://github.com/antlr/antlr4/issues/46 + +[type] +Lexer + +[grammar] +lexer grammar L; +COLON : ':' ; +PTR : '->' ; +ID : [a-z]+; + +[input] +x : x + +[output] +[@0,0:0='x',<3>,1:0] +[@1,2:2=':',<1>,1:2] +[@2,4:4='x',<3>,1:4] +[@3,5:4='',<-1>,1:5] + +[errors] +line 1:1 token recognition error at: ' ' +line 1:3 token recognition error at: ' ' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/StringsEmbeddedInActions_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/StringsEmbeddedInActions_1.txt new file mode 100644 index 0000000000..1c2b65214b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/StringsEmbeddedInActions_1.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ACTION2 : '[' (STRING | ~'"')*? ']'; +STRING : '"' ('\\\\' '"' | .)*? '"'; +WS : [ \t\r\n]+ -> skip; + +[input] +["foo"] + +[output] +[@0,0:6='["foo"]',<1>,1:0] +[@1,7:6='',<-1>,1:7] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/StringsEmbeddedInActions_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/StringsEmbeddedInActions_2.txt new file mode 100644 index 0000000000..2ce4047472 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerErrors/StringsEmbeddedInActions_2.txt @@ -0,0 +1,20 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ACTION2 : '[' (STRING | ~'"')*? ']'; +STRING : '"' ('\\\\' '"' | .)*? '"'; +WS : [ \t\r\n]+ -> skip; + +[input] +["foo] + +[output] +"""[@0,6:5='',<-1>,1:6] +""" + +[errors] +"""line 1:0 token recognition error at: '["foo]' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ActionPlacement.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ActionPlacement.txt new file mode 100644 index 0000000000..8ec87f8f5a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ActionPlacement.txt @@ -0,0 +1,24 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : ({} 'a' +| {} + 'a' {} + 'b' {}) + {} ; +WS : (' '|'\n') -> skip ; +J : .; + +[input] +ab + +[output] +stuff0: +stuff1: a +stuff2: ab +ab +[@0,0:1='ab',<1>,1:0] +[@1,2:1='',<-1>,1:2] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSet.txt new file mode 100644 index 0000000000..d4ba73ee7c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSet.txt @@ -0,0 +1,19 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : '0'..'9'+ {} ; +WS : [ \n\\u000D] -> skip ; + +[input] +"""34 + 34""" + +[output] +I +I +[@0,0:1='34',<1>,1:0] +[@1,4:5='34',<1>,2:1] +[@2,6:5='',<-1>,2:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetInSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetInSet.txt new file mode 100644 index 0000000000..84c2bd9fa1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetInSet.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : (~[ab \\n]|'a') {} ; +WS : [ \n\\u000D]+ -> skip ; + +[input] +a x + +[output] +I +I +[@0,0:0='a',<1>,1:0] +[@1,2:2='x',<1>,1:2] +[@2,3:2='',<-1>,1:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetNot.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetNot.txt new file mode 100644 index 0000000000..a68132b4c7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetNot.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : ~[ab \n] ~[ \ncd]* {} ; +WS : [ \n\\u000D]+ -> skip ; + +[input] +xaf + +[output] +I +[@0,0:2='xaf',<1>,1:0] +[@1,3:2='',<-1>,1:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetPlus.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetPlus.txt new file mode 100644 index 0000000000..24f4585a67 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetPlus.txt @@ -0,0 +1,19 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : '0'..'9'+ {} ; +WS : [ \n\\u000D]+ -> skip ; + +[input] +"""34 + 34""" + +[output] +I +I +[@0,0:1='34',<1>,1:0] +[@1,4:5='34',<1>,2:1] +[@2,6:5='',<-1>,2:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetRange.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetRange.txt new file mode 100644 index 0000000000..b65a50ce60 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetRange.txt @@ -0,0 +1,25 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : [0-9]+ {} ; +ID : [a-zA-Z] [a-zA-Z0-9]* {} ; +WS : [ \n\\u0009\r]+ -> skip ; + +[input] +"""34 + 34 a2 abc + """ + +[output] +I +I +ID +ID +[@0,0:1='34',<1>,1:0] +[@1,4:5='34',<1>,2:1] +[@2,7:8='a2',<2>,2:4] +[@3,10:12='abc',<2>,2:7] +[@4,18:17='',<-1>,3:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithEscapedChar.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithEscapedChar.txt new file mode 100644 index 0000000000..3ed621601f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithEscapedChar.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +DASHBRACK : [\\-\]]+ {} ; +WS : [ \n]+ -> skip ; + +[input] +"""- ] """ + +[output] +DASHBRACK +DASHBRACK +[@0,0:0='-',<1>,1:0] +[@1,2:2=']',<1>,1:2] +[@2,4:3='',<-1>,1:4] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithMissingEscapeChar.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithMissingEscapeChar.txt new file mode 100644 index 0000000000..48ef9bc3f7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithMissingEscapeChar.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : [0-9]+ {} ; +WS : [ \n]+ -> skip ; + +[input] +"""34 """ + +[output] +I +[@0,0:1='34',<1>,1:0] +[@1,3:2='',<-1>,1:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithQuote1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithQuote1.txt new file mode 100644 index 0000000000..82e1f42a6a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithQuote1.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : ["a-z]+ {} ; +WS : [ \n\t]+ -> skip ; + +[input] +b"a + +[output] +A +[@0,0:2='b"a',<1>,1:0] +[@1,3:2='',<-1>,1:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithQuote2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithQuote2.txt new file mode 100644 index 0000000000..9af4d641d2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/CharSetWithQuote2.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : ["\\\\ab]+ {} ; +WS : [ \n\t]+ -> skip ; + +[input] +b"\a + +[output] +A +[@0,0:3='b"\a',<1>,1:0] +[@1,4:3='',<-1>,1:4] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFByItself.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFByItself.txt new file mode 100644 index 0000000000..08232bd435 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFByItself.txt @@ -0,0 +1,15 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +DONE : EOF ; +A : 'a'; + +[input] + + +[output] +[@0,0:-1='',<1>,1:0] +[@1,0:-1='',<-1>,1:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFSuffixInFirstRule_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFSuffixInFirstRule_1.txt new file mode 100644 index 0000000000..8ad081813e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFSuffixInFirstRule_1.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'a' EOF ; +B : 'a'; +C : 'c'; + +[input] + + +[output] +"""[@0,0:-1='',<-1>,1:0] +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFSuffixInFirstRule_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFSuffixInFirstRule_2.txt new file mode 100644 index 0000000000..cc9383f7e5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EOFSuffixInFirstRule_2.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : 'a' EOF ; +B : 'a'; +C : 'c'; + +[input] +a + +[output] +[@0,0:0='a',<1>,1:0] +[@1,1:0='',<-1>,1:1] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EscapeTargetStringLiteral.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EscapeTargetStringLiteral.txt new file mode 100644 index 0000000000..33e7de1c43 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EscapeTargetStringLiteral.txt @@ -0,0 +1,15 @@ +[notes] +This is a regression test for antlr/antlr4#2709 "PHP target generates +invalid output when $ is used as part of the literal in lexer rule" +https://github.com/antlr/antlr4/issues/2709 + +[type] +Lexer + +[grammar] +lexer grammar L; +ACTION_WITH_DOLLAR: '$ACTION'; + +[output] +"""[@0,0:-1='',<-1>,1:0] +""" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EscapedCharacters.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EscapedCharacters.txt new file mode 100644 index 0000000000..5f62cc8ede --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/EscapedCharacters.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +LF : '\\u000A'; +X : 'x'; + +[input] +"""x +""" + +[output] +[@0,0:0='x',<2>,1:0] +[@1,1:1='\n',<1>,1:1] +[@2,2:1='',<-1>,2:0] diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyClosure.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyClosure.txt new file mode 100644 index 0000000000..960b66ca62 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyClosure.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '//' .*? '\n' CMT*; +WS : (' '|'\t')+; + +[input] +//blah +//blah + +[output] +[@0,0:13='//blah\n//blah\n',<1>,1:0] +[@1,14:13='',<-1>,3:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyConfigs.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyConfigs.txt new file mode 100644 index 0000000000..300d97bbcc --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyConfigs.txt @@ -0,0 +1,17 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : ('a' | 'ab') {} ; +WS : (' '|'\n') -> skip ; +J : .; + +[input] +ab + +[output] +ab +[@0,0:1='ab',<1>,1:0] +[@1,2:1='',<-1>,1:2] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyOptional.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyOptional.txt new file mode 100644 index 0000000000..5ece832975 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyOptional.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '//' .*? '\n' CMT?; +WS : (' '|'\t')+; + +[input] +//blah +//blah + +[output] +[@0,0:13='//blah\n//blah\n',<1>,1:0] +[@1,14:13='',<-1>,3:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyPositiveClosure.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyPositiveClosure.txt new file mode 100644 index 0000000000..f031174d83 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/GreedyPositiveClosure.txt @@ -0,0 +1,16 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : ('//' .*? '\n')+; +WS : (' '|'\t')+; + +[input] +//blah +//blah + +[output] +[@0,0:13='//blah\n//blah\n',<1>,1:0] +[@1,14:13='',<-1>,3:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/HexVsID.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/HexVsID.txt new file mode 100644 index 0000000000..024bd4c043 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/HexVsID.txt @@ -0,0 +1,32 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +HexLiteral : '0' ('x'|'X') HexDigit+ ; +DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ; +FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ; +DOT : '.' ; +ID : 'a'..'z'+ ; +fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ; +WS : (' '|'\n')+; + +[input] +x 0 1 a.b a.l + +[output] +[@0,0:0='x',<5>,1:0] +[@1,1:1=' ',<6>,1:1] +[@2,2:2='0',<2>,1:2] +[@3,3:3=' ',<6>,1:3] +[@4,4:4='1',<2>,1:4] +[@5,5:5=' ',<6>,1:5] +[@6,6:6='a',<5>,1:6] +[@7,7:7='.',<4>,1:7] +[@8,8:8='b',<5>,1:8] +[@9,9:9=' ',<6>,1:9] +[@10,10:10='a',<5>,1:10] +[@11,11:11='.',<4>,1:11] +[@12,12:12='l',<5>,1:12] +[@13,13:12='',<-1>,1:13] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/KeywordID.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/KeywordID.txt new file mode 100644 index 0000000000..d5dba55a83 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/KeywordID.txt @@ -0,0 +1,22 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +KEND : 'end' ; // has priority +ID : 'a'..'z'+ ; +WS : (' '|'\n')+; + +[input] +end eend ending a + +[output] +[@0,0:2='end',<1>,1:0] +[@1,3:3=' ',<3>,1:3] +[@2,4:7='eend',<2>,1:4] +[@3,8:8=' ',<3>,1:8] +[@4,9:14='ending',<2>,1:9] +[@5,15:15=' ',<3>,1:15] +[@6,16:16='a',<2>,1:16] +[@7,17:16='',<-1>,1:17] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyClosure.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyClosure.txt new file mode 100644 index 0000000000..11a30c874f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyClosure.txt @@ -0,0 +1,17 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '//' .*? '\n' CMT*?; +WS : (' '|'\t')+; + +[input] +//blah +//blah + +[output] +[@0,0:6='//blah\n',<1>,1:0] +[@1,7:13='//blah\n',<1>,2:0] +[@2,14:13='',<-1>,3:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyConfigs.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyConfigs.txt new file mode 100644 index 0000000000..316c5e9c25 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyConfigs.txt @@ -0,0 +1,19 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +I : .*? ('a' | 'ab') {} ; +WS : (' '|'\n') -> skip ; +J : . {}; + +[input] +ab + +[output] +a +b +[@0,0:0='a',<1>,1:0] +[@1,1:1='b',<3>,1:1] +[@2,2:1='',<-1>,1:2] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyOptional.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyOptional.txt new file mode 100644 index 0000000000..21dd16b944 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyOptional.txt @@ -0,0 +1,17 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '//' .*? '\n' CMT??; +WS : (' '|'\t')+; + +[input] +//blah +//blah + +[output] +[@0,0:6='//blah\n',<1>,1:0] +[@1,7:13='//blah\n',<1>,2:0] +[@2,14:13='',<-1>,3:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyPositiveClosure.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyPositiveClosure.txt new file mode 100644 index 0000000000..670dc33e7f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyPositiveClosure.txt @@ -0,0 +1,17 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : ('//' .*? '\n')+?; +WS : (' '|'\t')+; + +[input] +//blah +//blah + +[output] +[@0,0:6='//blah\n',<1>,1:0] +[@1,7:13='//blah\n',<1>,2:0] +[@2,14:13='',<-1>,3:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyTermination1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyTermination1.txt new file mode 100644 index 0000000000..1791cdaca2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyTermination1.txt @@ -0,0 +1,15 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +STRING : '!' ('!!' | .)*? '!'; + +[input] +!hi!!mom! + +[output] +[@0,0:3='!hi!',<1>,1:0] +[@1,4:8='!mom!',<1>,1:4] +[@2,9:8='',<-1>,1:9] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyTermination2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyTermination2.txt new file mode 100644 index 0000000000..23aa2ad78c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/NonGreedyTermination2.txt @@ -0,0 +1,14 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +STRING : '!' ('!!' | .)+? '!'; + +[input] +!!!mom! + +[output] +[@0,0:6='!!!mom!',<1>,1:0] +[@1,7:6='',<-1>,1:7] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/Parentheses.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/Parentheses.txt new file mode 100644 index 0000000000..be6a7f00ac --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/Parentheses.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for antlr/antlr4#224: "Parentheses without +quantifier in lexer rules have unclear effect". +https://github.com/antlr/antlr4/issues/224 + +[type] +Lexer + +[grammar] +lexer grammar L; +START_BLOCK: '-.-.-'; +ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+; +fragment LETTER: L_A|L_K; +fragment L_A: '.-'; +fragment L_K: '-.-'; +SEPARATOR: '!'; + +[input] +-.-.-! + +[output] +[@0,0:4='-.-.-',<1>,1:0] +[@1,5:5='!',<3>,1:5] +[@2,6:5='',<-1>,1:6] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/PositionAdjustingLexer.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/PositionAdjustingLexer.txt new file mode 100644 index 0000000000..f6fa740232 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/PositionAdjustingLexer.txt @@ -0,0 +1,63 @@ +[type] +Lexer + +[grammar] +lexer grammar PositionAdjustingLexer; + +@definitions { + +} + +@members { + +} + +ASSIGN : '=' ; +PLUS_ASSIGN : '+=' ; +LCURLY: '{'; + +// 'tokens' followed by '{' +TOKENS : 'tokens' IGNORED '{'; + +// IDENTIFIER followed by '+=' or '=' +LABEL + : IDENTIFIER IGNORED '+'? '=' + ; + +IDENTIFIER + : [a-zA-Z_] [a-zA-Z0-9_]* + ; + +fragment +IGNORED + : [ \t\r\n]* + ; + +NEWLINE + : [\r\n]+ -> skip + ; + +WS + : [ \t]+ -> skip + ; + +[input] +tokens +tokens { +notLabel +label1 = +label2 += +notLabel + +[output] +[@0,0:5='tokens',<6>,1:0] +[@1,7:12='tokens',<4>,2:0] +[@2,14:14='{',<3>,2:7] +[@3,16:23='notLabel',<6>,3:0] +[@4,25:30='label1',<5>,4:0] +[@5,32:32='=',<1>,4:7] +[@6,34:39='label2',<5>,5:0] +[@7,41:42='+=',<2>,5:7] +[@8,44:51='notLabel',<6>,6:0] +[@9,53:52='',<-1>,7:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/QuoteTranslation.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/QuoteTranslation.txt new file mode 100644 index 0000000000..7053bfe34b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/QuoteTranslation.txt @@ -0,0 +1,14 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +QUOTE : '"' ; // make sure this compiles + +[input] +" + +[output] +[@0,0:0='"',<1>,1:0] +[@1,1:0='',<-1>,1:1] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardPlus_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardPlus_1.txt new file mode 100644 index 0000000000..e617727d6a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardPlus_1.txt @@ -0,0 +1,20 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '/*' (CMT | .)+? '*' '/' ; +WS : (' '|'\n')+; + +[input] +/* ick */ +/* /* */ +/* /*nested*/ */ + +[output] +[@0,0:8='/* ick */',<1>,1:0] +[@1,9:9='\n',<2>,1:9] +[@2,10:34='/* /* */\n/* /*nested*/ */',<1>,2:0] +[@3,35:35='\n',<2>,3:16] +[@4,36:35='',<-1>,4:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardPlus_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardPlus_2.txt new file mode 100644 index 0000000000..cdcab0efd6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardPlus_2.txt @@ -0,0 +1,24 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '/*' (CMT | .)+? '*' '/' ; +WS : (' '|'\n')+; + +[input] +/* ick */x +/* /* */x +/* /*nested*/ */x + +[output] +[@0,0:8='/* ick */',<1>,1:0] +[@1,10:10='\n',<2>,1:10] +[@2,11:36='/* /* */x\n/* /*nested*/ */',<1>,2:0] +[@3,38:38='\n',<2>,3:17] +[@4,39:38='',<-1>,4:0] + +[errors] +line 1:9 token recognition error at: 'x' +line 3:16 token recognition error at: 'x' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardStar_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardStar_1.txt new file mode 100644 index 0000000000..c3dfca374f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardStar_1.txt @@ -0,0 +1,20 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '/*' (CMT | .)*? '*' '/' ; +WS : (' '|'\n')+; + +[input] +/* ick */ +/* /* */ +/* /*nested*/ */ + +[output] +[@0,0:8='/* ick */',<1>,1:0] +[@1,9:9='\n',<2>,1:9] +[@2,10:34='/* /* */\n/* /*nested*/ */',<1>,2:0] +[@3,35:35='\n',<2>,3:16] +[@4,36:35='',<-1>,4:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardStar_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardStar_2.txt new file mode 100644 index 0000000000..7e218562f5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RecursiveLexerRuleRefWithWildcardStar_2.txt @@ -0,0 +1,24 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +CMT : '/*' (CMT | .)*? '*' '/' ; +WS : (' '|'\n')+; + +[input] +/* ick */x +/* /* */x +/* /*nested*/ */x + +[output] +[@0,0:8='/* ick */',<1>,1:0] +[@1,10:10='\n',<2>,1:10] +[@2,11:36='/* /* */x\n/* /*nested*/ */',<1>,2:0] +[@3,38:38='\n',<2>,3:17] +[@4,39:38='',<-1>,4:0] + +[errors] +line 1:9 token recognition error at: 'x' +line 3:16 token recognition error at: 'x' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.txt new file mode 100644 index 0000000000..ce8e67392f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.txt @@ -0,0 +1,18 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +A : '-' I ; +I : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + +[input] +34 -21 3 + +[output] +[@0,0:1='34',<2>,1:0] +[@1,3:5='-21',<1>,1:3] +[@2,7:7='3',<2>,1:7] +[@3,8:7='',<-1>,1:8] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ReservedWordsEscaping.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ReservedWordsEscaping.txt new file mode 100644 index 0000000000..785f1644e3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ReservedWordsEscaping.txt @@ -0,0 +1,23 @@ +[notes] +https://github.com/antlr/antlr4/issues/1070 + +[type] +Lexer + +[grammar] +lexer grammar L; + +channels { break } + +A: 'a' -> mode(for); + +mode for; +B: 'b' -> channel(break); + +[input] +ab + +[output] +[@0,0:0='a',<1>,1:0] +[@1,1:1='b',<2>,channel=2,1:1] +[@2,2:1='',<-1>,1:2] diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ReservedWordsEscaping_NULL.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ReservedWordsEscaping_NULL.txt new file mode 100644 index 0000000000..cd09169580 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ReservedWordsEscaping_NULL.txt @@ -0,0 +1,17 @@ +[notes] +https://github.com/antlr/antlr4/pull/3889 + +[type] +Lexer + +[grammar] +lexer grammar L; + +NULL : ('N' | 'n')('U' | 'u')('L' | 'l')('L' | 'l') ; + +[input] +NULL + +[output] +[@0,0:3='NULL',<1>,1:0] +[@1,4:3='',<-1>,1:4] diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/Slashes.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/Slashes.txt new file mode 100644 index 0000000000..c11711b96c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/Slashes.txt @@ -0,0 +1,21 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +Backslash : '\\\\'; +Slash : '/'; +Vee : '\\\\/'; +Wedge : '/\\\\'; +WS : [ \t] -> skip; + +[input] +\ / \/ /\ + +[output] +[@0,0:0='\',<1>,1:0] +[@1,2:2='/',<2>,1:2] +[@2,4:5='\/',<3>,1:4] +[@3,7:8='/\',<4>,1:7] +[@4,9:8='',<-1>,1:9] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/StackoverflowDueToNotEscapedHyphen.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/StackoverflowDueToNotEscapedHyphen.txt new file mode 100644 index 0000000000..56f7e5e1a0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/StackoverflowDueToNotEscapedHyphen.txt @@ -0,0 +1,17 @@ +[notes] +https://github.com/antlr/antlr4/issues/1943 + +[type] +Lexer + +[grammar] +lexer grammar L; +WORD : [a-z-+]+; + +[input] +word + +[output] +[@0,0:3='word',<1>,1:0] +[@1,4:3='',<-1>,1:4] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/TokenType0xFFFF.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/TokenType0xFFFF.txt new file mode 100644 index 0000000000..9018111fe7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/TokenType0xFFFF.txt @@ -0,0 +1,13 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +T_FFFF: 'FFFF' -> type(65535); + +[input] +FFFF + +[output] +[@0,0:3='FFFF',<65535>,1:0] +[@1,4:3='',<-1>,1:4] diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/UnicodeCharSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/UnicodeCharSet.txt new file mode 100644 index 0000000000..705920df8b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/UnicodeCharSet.txt @@ -0,0 +1,19 @@ +[notes] +regression test for antlr/antlr4#1925 + +[type] +Lexer + +[grammar] +lexer grammar L; +ID : ([A-Z_]|'Ā'..'\uFFFC') ([A-Z_0-9]|'Ā'..'\uFFFC')*; // FFFD+ are not valid char + +[input] +均 + +[output] +[@0,0:0='均',<1>,1:0] +[@1,1:0='',<-1>,1:1] + +[skip] +Rust \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ZeroLengthToken.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ZeroLengthToken.txt new file mode 100644 index 0000000000..f83d14a4e6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/LexerExec/ZeroLengthToken.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#687 "Empty zero-length tokens +cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match +zero-length tokens" +https://github.com/antlr/antlr4/issues/687 +https://github.com/antlr/antlr4/issues/688 + +[type] +Lexer + +[grammar] +lexer grammar L; +BeginString + : '\'' -> more, pushMode(StringMode) + ; +mode StringMode; + StringMode_X : 'x' -> more; + StringMode_Done : -> more, mode(EndStringMode); +mode EndStringMode; + EndString : '\'' -> popMode; + +[input] +'xxx' + +[output] +[@0,0:4=''xxx'',<1>,1:0] +[@1,5:4='',<-1>,1:5] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/Basic.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/Basic.txt new file mode 100644 index 0000000000..e1e8eae221 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/Basic.txt @@ -0,0 +1,35 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=a ; +a : INT INT + | ID + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +1 2 + +[output] +(a 1 2) +1 +2 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/LR.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/LR.txt new file mode 100644 index 0000000000..9f6467db99 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/LR.txt @@ -0,0 +1,39 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=e ; +e : e op='*' e + | e op='+' e + | INT + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +1+2*3 + +[output] +(e (e 1) + (e (e 2) * (e 3))) +1 +2 +3 +2 3 2 +1 2 1 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/LRWithLabels.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/LRWithLabels.txt new file mode 100644 index 0000000000..4f6acc08e2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/LRWithLabels.txt @@ -0,0 +1,38 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=e ; +e : e '(' eList ')' # Call + | INT # Int + ; +eList : e (',' e)* ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +1(2,3) + +[output] +(e (e 1) ( (eList (e 2) , (e 3)) )) +1 +2 +3 +1 [13 6] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/RuleGetters_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/RuleGetters_1.txt new file mode 100644 index 0000000000..693bb5c745 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/RuleGetters_1.txt @@ -0,0 +1,35 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=a ; +a : b b // forces list + | b // a list still + ; +b : ID | INT; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +1 2 + +[output] +(a (b 1) (b 2)) +1 2 1 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/RuleGetters_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/RuleGetters_2.txt new file mode 100644 index 0000000000..88770876bf --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/RuleGetters_2.txt @@ -0,0 +1,35 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=a ; +a : b b // forces list + | b // a list still + ; +b : ID | INT; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +abc + +[output] +(a (b abc)) +abc + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/TokenGetters_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/TokenGetters_1.txt new file mode 100644 index 0000000000..98e161f2f0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/TokenGetters_1.txt @@ -0,0 +1,34 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=a ; +a : INT INT + | ID + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +1 2 + +[output] +(a 1 2) +1 2 [1, 2] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/TokenGetters_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/TokenGetters_2.txt new file mode 100644 index 0000000000..5816aa3c35 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Listeners/TokenGetters_2.txt @@ -0,0 +1,34 @@ +[type] +Parser + +[grammar] +grammar T; + + + + +s +@after { + + +} + : r=a ; +a : INT INT + | ID + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; + +[start] +s + +[input] +abc + +[output] +(a abc) +[@0,0:2='abc',<4>,1:0] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/AltNum.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/AltNum.txt new file mode 100644 index 0000000000..bf590889be --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/AltNum.txt @@ -0,0 +1,40 @@ +[type] +Parser + +[grammar] +grammar T; + +options { contextSuperClass=MyRuleNode; } + + + + +s +@init { + +} +@after { + +} + : r=a ; + +a : 'f' + | 'g' + | 'x' b 'z' + ; +b : 'e' {} | 'y' + ; + +[start] +s + +[input] +xyz + +[output] +"""(a:3 x (b:2 y) z) +""" + +[skip] +Go +Rust \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/ExtraToken.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/ExtraToken.txt new file mode 100644 index 0000000000..172d14a692 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/ExtraToken.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' 'y' + ; +Z : 'z' + ; + +[start] +s + +[input] +xzy + +[output] +"""(a x z y) +""" + +[errors] +"""line 1:1 extraneous input 'z' expecting 'y' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/ExtraTokensAndAltLabels.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/ExtraTokensAndAltLabels.txt new file mode 100644 index 0000000000..75a49d3333 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/ExtraTokensAndAltLabels.txt @@ -0,0 +1,46 @@ +[type] +Parser + +[grammar] +grammar T; + +s +@init { + +} +@after { + +} + : '${' v '}' + ; + +v : A #altA + | B #altB + ; + +A : 'a' ; +B : 'b' ; + +WHITESPACE : [ \n\t\r]+ -> channel(HIDDEN) ; + +ERROR : . ; + +[start] +s + +[input] +${ ? a ?} + +[output] +"""(s ${ (v ? a) ? }) +""" + +[errors] +line 1:3 extraneous input '?' expecting {'a', 'b'} +line 1:7 extraneous input '?' expecting '}' + +[skip] +Cpp +Go +PHP + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/NoViableAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/NoViableAlt.txt new file mode 100644 index 0000000000..24932bcab4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/NoViableAlt.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' | 'y' + ; +Z : 'z' + ; + +[start] +s + +[input] +z + +[output] +"""(a z) +""" + +[errors] +"""line 1:0 mismatched input 'z' expecting {'x', 'y'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/RuleRef.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/RuleRef.txt new file mode 100644 index 0000000000..880749f7b1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/RuleRef.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : b 'x' + ; +b : 'y' + ; + +[start] +s + +[input] +yx + +[output] +"""(a (b y) x) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/Sync.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/Sync.txt new file mode 100644 index 0000000000..e64edff226 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/Sync.txt @@ -0,0 +1,32 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' 'y'* '!' + ; +Z : 'z' + ; + +[start] +s + +[input] +xzyy! + +[output] +"""(a x z y y !) +""" + +[errors] +"""line 1:1 extraneous input 'z' expecting {'y', '!'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/Token2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/Token2.txt new file mode 100644 index 0000000000..53eb9fa3ca --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/Token2.txt @@ -0,0 +1,26 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' 'y' + ; + +[start] +s + +[input] +xy + +[output] +"""(a x y) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TokenAndRuleContextString.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TokenAndRuleContextString.txt new file mode 100644 index 0000000000..6f2a7068e8 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TokenAndRuleContextString.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; + + +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' { + +} ; + +[start] +s + +[input] +x + +[output] +[a, s] +(a x) + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TwoAltLoop.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TwoAltLoop.txt new file mode 100644 index 0000000000..0c71e39420 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TwoAltLoop.txt @@ -0,0 +1,26 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : ('x' | 'y')* 'z' + ; + +[start] +s + +[input] +xyyxyxz + +[output] +"""(a x y y x y x z) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TwoAlts.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TwoAlts.txt new file mode 100644 index 0000000000..a91cf76ba2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParseTrees/TwoAlts.txt @@ -0,0 +1,26 @@ +[type] +Parser + +[grammar] +grammar T; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' | 'y' + ; + +[start] +s + +[input] +y + +[output] +"""(a y) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ConjuringUpToken.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ConjuringUpToken.txt new file mode 100644 index 0000000000..ec932d59b0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ConjuringUpToken.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' x='b' {} 'c' ; + +[start] +a + +[input] +ac + +[output] +"""conjured=[@-1,-1:-1='',<2>,1:1] +""" + +[errors] +"""line 1:1 missing 'b' at 'c' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ConjuringUpTokenFromSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ConjuringUpTokenFromSet.txt new file mode 100644 index 0000000000..59e9286204 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ConjuringUpTokenFromSet.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' x=('b'|'c') {} 'd' ; + +[start] +a + +[input] +ad + +[output] +"""conjured=[@-1,-1:-1='',<2>,1:1] +""" + +[errors] +"""line 1:1 missing {'b', 'c'} at 'd' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt new file mode 100644 index 0000000000..1db5baad56 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt @@ -0,0 +1,24 @@ +[notes] +Regression test for "Getter for context is not a list when it should be". +https://github.com/antlr/antlr4/issues/19 + +[type] +Parser + +[grammar] +grammar T; +@parser::members{ + +} +s : (a | b)+; +a : 'a' {}; +b : 'b' {}; + +[start] +s + +[input] +abab + +[output] +"""abab""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_1.txt new file mode 100644 index 0000000000..c1ec872a5b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_1.txt @@ -0,0 +1,16 @@ +[type] +Parser + +[grammar] +grammar T; +start : expr EOF; +expr : 'x' + | expr expr + ; + +[start] +start + +[input] +x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_2.txt new file mode 100644 index 0000000000..a6b4a7a4f7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_2.txt @@ -0,0 +1,16 @@ +[type] +Parser + +[grammar] +grammar T; +start : expr EOF; +expr : 'x' + | expr expr + ; + +[start] +start + +[input] +xx + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_3.txt new file mode 100644 index 0000000000..46738d3e24 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_3.txt @@ -0,0 +1,16 @@ +[type] +Parser + +[grammar] +grammar T; +start : expr EOF; +expr : 'x' + | expr expr + ; + +[start] +start + +[input] +xxx + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_4.txt new file mode 100644 index 0000000000..e5b8bddc5c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/DuplicatedLeftRecursiveCall_4.txt @@ -0,0 +1,16 @@ +[type] +Parser + +[grammar] +grammar T; +start : expr EOF; +expr : 'x' + | expr expr + ; + +[start] +start + +[input] +xxxx + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt new file mode 100644 index 0000000000..3d76cf6d5e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt @@ -0,0 +1,31 @@ +[type] +Parser + +[grammar] +grammar T; + +member : 'a'; +body : member*; +file : body EOF; +B : 'b'; + +[start] +file + +[input] +baa + +[output] + +[errors] +"""line 1:0 mismatched input 'b' expecting {, 'a'} +""" + +[skip] +Cpp +CSharp +Go +JavaScript +TypeScript +PHP +Python3 diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/InvalidATNStateRemoval.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/InvalidATNStateRemoval.txt new file mode 100644 index 0000000000..f843359231 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/InvalidATNStateRemoval.txt @@ -0,0 +1,23 @@ +[notes] +This is a regression test for #45 "NullPointerException in ATNConfig.hashCode". +https://github.com/antlr/antlr4/issues/45 +The original cause of this issue was an error in the tool's ATN state optimization, +which is now detected early in {@link ATNSerializer} by ensuring that all +serialized transitions point to states which were not removed. + +[type] +Parser + +[grammar] +grammar T; +start : ID ':' expr; +expr : primary expr? {} | expr '->' ID; +primary : ID; +ID : [a-z]+; + +[start] +start + +[input] +x:x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/InvalidEmptyInput.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/InvalidEmptyInput.txt new file mode 100644 index 0000000000..4a149b2d3b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/InvalidEmptyInput.txt @@ -0,0 +1,22 @@ +[notes] +This is a regression test for #6 "NullPointerException in getMissingSymbol". +https://github.com/antlr/antlr4/issues/6 + +[type] +Parser + +[grammar] +grammar T; +start : ID+; +ID : [a-z]+; + +[start] +start + +[input] + + +[errors] +"""line 1:0 mismatched input '' expecting ID +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL1ErrorInfo.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL1ErrorInfo.txt new file mode 100644 index 0000000000..958cabc892 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL1ErrorInfo.txt @@ -0,0 +1,29 @@ +[type] +Parser + +[grammar] +grammar T; +start : animal (AND acClass)? service EOF; +animal : (DOG | CAT ); +service : (HARDWARE | SOFTWARE) ; +AND : 'and'; +DOG : 'dog'; +CAT : 'cat'; +HARDWARE: 'hardware'; +SOFTWARE: 'software'; +WS : ' ' -> skip ; +acClass +@init +{} + : ; + +[start] +start + +[input] +dog and software + +[output] +"""{'hardware', 'software'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL2.txt new file mode 100644 index 0000000000..a75c2c9322 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL2.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b' + | 'a' 'c' +; +q : 'e' ; + +[start] +a + +[input] +ae + +[errors] +"""line 1:1 no viable alternative at input 'ae' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL3.txt new file mode 100644 index 0000000000..0d41bb0ce1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LL3.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b'* 'c' + | 'a' 'b' 'd' +; +q : 'e' ; + +[start] +a + +[input] +abe + +[errors] +"""line 1:2 no viable alternative at input 'abe' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LLStar.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LLStar.txt new file mode 100644 index 0000000000..65ebf91fea --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/LLStar.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a'+ 'b' + | 'a'+ 'c' +; +q : 'e' ; + +[start] +a + +[input] +aaae + +[errors] +"""line 1:3 no viable alternative at input 'aaae' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionBeforeLoop.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionBeforeLoop.txt new file mode 100644 index 0000000000..18791af00b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionBeforeLoop.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b'* 'c'; + +[start] +a + +[input] +aacabc + +[errors] +"""line 1:1 extraneous input 'a' expecting {'b', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionBeforeLoop2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionBeforeLoop2.txt new file mode 100644 index 0000000000..85cded2dfe --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionBeforeLoop2.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' ('b'|'z'{})* 'c'; + +[start] +a + +[input] +aacabc + +[errors] +"""line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionDuringLoop.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionDuringLoop.txt new file mode 100644 index 0000000000..faba53c978 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionDuringLoop.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b'* 'c' ; + +[start] +a + +[input] +abaaababc + +[errors] +line 1:2 extraneous input 'a' expecting {'b', 'c'} +line 1:6 extraneous input 'a' expecting {'b', 'c'} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionDuringLoop2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionDuringLoop2.txt new file mode 100644 index 0000000000..cf08e3c996 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/MultiTokenDeletionDuringLoop2.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' ('b'|'z'{})* 'c' ; + +[start] +a + +[input] +abaaababc + +[errors] +line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'} +line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/NoViableAltAvoidance.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/NoViableAltAvoidance.txt new file mode 100644 index 0000000000..3347ec820c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/NoViableAltAvoidance.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +s : e '!' ; +e : 'a' 'b' + | 'a' + ; +DOT : '.' ; +WS : [ \t\r\n]+ -> skip; + +[start] +s + +[input] +a. + +[errors] +"""line 1:1 mismatched input '.' expecting '!' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleSetInsertion.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleSetInsertion.txt new file mode 100644 index 0000000000..ce1b18a09f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleSetInsertion.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' ('b'|'c') 'd' ; + +[start] +a + +[input] +ad + +[errors] +"""line 1:1 missing {'b', 'c'} at 'd' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleSetInsertionConsumption.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleSetInsertionConsumption.txt new file mode 100644 index 0000000000..8c93a644e3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleSetInsertionConsumption.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +myset: ('b'|'c') ; +a: 'a' myset 'd' {} ; + +[start] +a + +[input] +ad + +[output] +"""[@0,0:0='a',<3>,1:0] +""" + +[errors] +"""line 1:1 missing {'b', 'c'} at 'd' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletion.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletion.txt new file mode 100644 index 0000000000..8ec847bf61 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletion.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b' ; + +[start] +a + +[input] +aab + +[errors] +"""line 1:1 extraneous input 'a' expecting 'b' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeAlt.txt new file mode 100644 index 0000000000..900ac4d8b9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeAlt.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : ('b' | 'c') +; +q : 'a' +; + +[start] +a + +[input] +ac + +[errors] +"""line 1:0 extraneous input 'a' expecting {'b', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeLoop.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeLoop.txt new file mode 100644 index 0000000000..5d6b7958bb --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeLoop.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b'* EOF ; + +[start] +a + +[input] +aabc + +[errors] +line 1:1 extraneous input 'a' expecting {, 'b'} +line 1:3 token recognition error at: 'c' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeLoop2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeLoop2.txt new file mode 100644 index 0000000000..aef8200492 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforeLoop2.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' ('b'|'z'{})* EOF ; + +[start] +a + +[input] +aabc + +[errors] +line 1:1 extraneous input 'a' expecting {, 'b', 'z'} +line 1:3 token recognition error at: 'c' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforePredict.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforePredict.txt new file mode 100644 index 0000000000..826697e877 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionBeforePredict.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a'+ 'b' + | 'a'+ 'c' +; +q : 'e' ; + +[start] +a + +[input] +caaab + +[errors] +"""line 1:0 extraneous input 'c' expecting 'a' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionConsumption.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionConsumption.txt new file mode 100644 index 0000000000..c9f1b4a1db --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionConsumption.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +myset: ('b'|'c') ; +a: 'a' myset 'd' {} ; + +[start] +a + +[input] +aabd + +[output] +"""[@2,2:2='b',<1>,1:2] +""" + +[errors] +"""line 1:1 extraneous input 'a' expecting {'b', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionDuringLoop.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionDuringLoop.txt new file mode 100644 index 0000000000..c0d99d5aa2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionDuringLoop.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b'* 'c' ; + +[start] +a + +[input] +ababbc + +[errors] +"""line 1:2 extraneous input 'a' expecting {'b', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionDuringLoop2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionDuringLoop2.txt new file mode 100644 index 0000000000..aad9f70524 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionDuringLoop2.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' ('b'|'z'{})* 'c' ; + +[start] +a + +[input] +ababbc + +[errors] +"""line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionExpectingSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionExpectingSet.txt new file mode 100644 index 0000000000..6715ab68c8 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenDeletionExpectingSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' ('b'|'c') ; + +[start] +a + +[input] +aab + +[errors] +"""line 1:1 extraneous input 'a' expecting {'b', 'c'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenInsertion.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenInsertion.txt new file mode 100644 index 0000000000..4e40faf8d3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/SingleTokenInsertion.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b' 'c' ; + +[start] +a + +[input] +ac + +[errors] +"""line 1:1 missing 'b' at 'c' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch.txt new file mode 100644 index 0000000000..2469359247 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : 'a' 'b' ; + +[start] +a + +[input] +aa + +[errors] +"""line 1:1 mismatched input 'a' expecting 'b' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch2.txt new file mode 100644 index 0000000000..7163ec7cef --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch2.txt @@ -0,0 +1,24 @@ +[type] +Parser + +[grammar] +grammar T; + +stat: ( '(' expr? ')' )? EOF ; +expr: ID '=' STR ; + +ERR : '~FORCE_ERROR~' ; +ID : [a-zA-Z]+ ; +STR : '"' ~["]* '"' ; +WS : [ \t\r\n]+ -> skip ; + +[start] +stat + +[input] +"""( ~FORCE_ERROR~ """ + +[errors] +"""line 1:2 mismatched input '~FORCE_ERROR~' expecting {')', ID} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch3.txt new file mode 100644 index 0000000000..afe6fdfe72 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/TokenMismatch3.txt @@ -0,0 +1,38 @@ +[type] +Parser + +[grammar] +grammar T; + +expression +: value +| expression op=AND expression +| expression op=OR expression +; +value +: BOOLEAN_LITERAL +| ID +| ID1 +| '(' expression ')' +; + +AND : '&&'; +OR : '||'; + +BOOLEAN_LITERAL : 'true' | 'false'; + +ID : [a-z]+; +ID1 : '$'; + +WS : [ \t\r\n]+ -> skip ; + +[start] +expression + +[input] + + +[errors] +"""line 1:0 mismatched input '' expecting {'(', BOOLEAN_LITERAL, ID, '$'} +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/APlus.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/APlus.txt new file mode 100644 index 0000000000..58941a2780 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/APlus.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : ID+ { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +a b c + +[output] +"""abc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AStar_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AStar_1.txt new file mode 100644 index 0000000000..5a398756c3 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AStar_1.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : ID* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] + + +[output] +""" +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AStar_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AStar_2.txt new file mode 100644 index 0000000000..a6f3796568 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AStar_2.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : ID* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +a b c + +[output] +"""abc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAPlus.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAPlus.txt new file mode 100644 index 0000000000..1b5ceddc25 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAPlus.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|ID)+ { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +a b c + +[output] +"""abc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAStar_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAStar_1.txt new file mode 100644 index 0000000000..4f325d695f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAStar_1.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|ID)* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] + + +[output] +""" +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAStar_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAStar_2.txt new file mode 100644 index 0000000000..24d74cb079 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorAStar_2.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|ID)* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +a b c + +[output] +"""abc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorB.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorB.txt new file mode 100644 index 0000000000..f32b5e6ccd --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorB.txt @@ -0,0 +1,24 @@ +[type] +Parser + +[grammar] +grammar T; +a : ID { + +} | INT { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; + +[start] +a + +[input] +34 + +[output] +"""alt 2 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBPlus.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBPlus.txt new file mode 100644 index 0000000000..3837de3b49 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBPlus.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|INT{ +})+ { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; + +[start] +a + +[input] +a 34 c + +[output] +"""a34c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBStar_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBStar_1.txt new file mode 100644 index 0000000000..3fc94004c6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBStar_1.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|INT{ +})* { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; + +[start] +a + +[input] + + +[output] +""" +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBStar_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBStar_2.txt new file mode 100644 index 0000000000..4d854cb691 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/AorBStar_2.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|INT{ +})* { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; + +[start] +a + +[input] +a 34 c + +[output] +"""a34c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Basic.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Basic.txt new file mode 100644 index 0000000000..3683a22f4c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Basic.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +a : ID INT { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +abc 34 + +[output] +"""abc34 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/BuildParseTree_FALSE.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/BuildParseTree_FALSE.txt new file mode 100644 index 0000000000..8253b1055e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/BuildParseTree_FALSE.txt @@ -0,0 +1,31 @@ +[type] +Parser + +[grammar] +grammar T; + +r + : a b {} + ; +a + : A + ; +b + : B + ; +A : 'A'; +B : 'B'; +WS : [ \r\n\t]+ -> skip ; + +[start] +r + +[input] +A B + +[output] +"""r +""" + +[flags] +notBuildParseTree diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/BuildParseTree_TRUE.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/BuildParseTree_TRUE.txt new file mode 100644 index 0000000000..45c48d6991 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/BuildParseTree_TRUE.txt @@ -0,0 +1,28 @@ +[type] +Parser + +[grammar] +grammar T; + +r + : a b {} + ; +a + : A + ; +b + : B + ; +A : 'A'; +B : 'B'; +WS : [ \r\n\t]+ -> skip ; + +[start] +r + +[input] +A B + +[output] +"""(r (a A) (b B)) +""" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseGreedyBinding1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseGreedyBinding1.txt new file mode 100644 index 0000000000..b29138d048 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseGreedyBinding1.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement ('else' statement)? { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); + +[start] +start + +[input] +if y if y x else x + +[output] +if y x else x +if y if y x else x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseGreedyBinding2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseGreedyBinding2.txt new file mode 100644 index 0000000000..3d5b342fbb --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseGreedyBinding2.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement ('else' statement|) { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); + +[start] +start + +[input] +if y if y x else x + +[output] +if y x else x +if y if y x else x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseNonGreedyBinding1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseNonGreedyBinding1.txt new file mode 100644 index 0000000000..da1583410c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseNonGreedyBinding1.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement ('else' statement)?? { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); + +[start] +start + +[input] +if y if y x else x + +[output] +if y x +if y if y x else x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseNonGreedyBinding2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseNonGreedyBinding2.txt new file mode 100644 index 0000000000..ca8d7826a5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/IfIfElseNonGreedyBinding2.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement (|'else' statement) { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); + +[start] +start + +[input] +if y if y x else x + +[output] +if y x +if y if y x else x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_1.txt new file mode 100644 index 0000000000..8495e96775 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_1.txt @@ -0,0 +1,19 @@ +[type] +Parser + +[grammar] +grammar T; + +program : state*{} EOF ; +state: 'break;' | 'continue;' | 'return;' ; + +[start] +program + +[input] +break;continue;return; + +[output] +"""break;continue;return; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_2.txt new file mode 100644 index 0000000000..a753c96544 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_2.txt @@ -0,0 +1,19 @@ +[type] +Parser + +[grammar] +grammar T; + +program : sempred*{} EOF ; +sempred: 'break;' | 'continue;' | 'return;' ; + +[start] +program + +[input] +break;continue;return; + +[output] +"""break;continue;return; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_3.txt new file mode 100644 index 0000000000..22e528b7bf --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_3.txt @@ -0,0 +1,19 @@ +[type] +Parser + +[grammar] +grammar T; + +program : action*{} EOF ; +action: 'break;' | 'continue;' | 'return;' ; + +[start] +program + +[input] +break;continue;return; + +[output] +"""break;continue;return; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_4.txt new file mode 100644 index 0000000000..86566f8d4d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_4.txt @@ -0,0 +1,19 @@ +[type] +Parser + +[grammar] +grammar T; + +program : ruleIndexMap*{} EOF ; +ruleIndexMap: 'break;' | 'continue;' | 'return;' ; + +[start] +program + +[input] +break;continue;return; + +[output] +"""break;continue;return; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_5.txt new file mode 100644 index 0000000000..47ac451705 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_5.txt @@ -0,0 +1,19 @@ +[type] +Parser + +[grammar] +grammar T; + +program : addErrorListener*{} EOF ; +addErrorListener: 'break;' | 'continue;' | 'return;' ; + +[start] +program + +[input] +break;continue;return; + +[output] +"""break;continue;return; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_6.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_6.txt new file mode 100644 index 0000000000..0ec82792ef --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Keyword_6.txt @@ -0,0 +1,19 @@ +[type] +Parser + +[grammar] +grammar T; + +program : reset*{} EOF ; +reset: 'break;' | 'continue;' | 'return;' ; + +[start] +program + +[input] +break;continue;return; + +[output] +"""break;continue;return; +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LL1OptionalBlock_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LL1OptionalBlock_1.txt new file mode 100644 index 0000000000..e33efa3aba --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LL1OptionalBlock_1.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|{}INT)? { + +}; +ID : 'a'..'z'+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] + + +[output] +""" +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LL1OptionalBlock_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LL1OptionalBlock_2.txt new file mode 100644 index 0000000000..319abe22d4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LL1OptionalBlock_2.txt @@ -0,0 +1,22 @@ +[type] +Parser + +[grammar] +grammar T; +a : (ID|{}INT)? { + +}; +ID : 'a'..'z'+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +a + +[output] +"""a +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LabelAliasingAcrossLabeledAlternatives.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LabelAliasingAcrossLabeledAlternatives.txt new file mode 100644 index 0000000000..88145142c4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/LabelAliasingAcrossLabeledAlternatives.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#195 "label 'label' type +mismatch with previous definition: TOKEN_LABEL!=RULE_LABEL" +https://github.com/antlr/antlr4/issues/195 + +[type] +Parser + +[grammar] +grammar T; +start : a* EOF; +a + : label=subrule {} #One + | label='y' {} #Two + ; +subrule : 'x'; +WS : (' '|'\n') -> skip ; + +[start] +start + +[input] +xy + +[output] +x +y + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Labels.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Labels.txt new file mode 100644 index 0000000000..3b4a534a43 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Labels.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : b1=b b2+=b* b3+=';' ; +b : id_=ID val+=INT*; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +abc 34; + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt new file mode 100644 index 0000000000..ea0fc109f4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt @@ -0,0 +1,37 @@ +[notes] +This is a regression test for antlr/antlr4#299 "Repeating subtree not +accessible in visitor". https://github.com/antlr/antlr4/issues/299 + +[type] +Parser + +[grammar] +grammar T; +ifStatement +@after { +})> +} + : 'if' expression + ( ( 'then' + executableStatement* + elseIfStatement* // \<--- problem is here; should yield a list not node + elseStatement? + 'end' 'if' + ) | executableStatement ) + ; + +elseIfStatement + : 'else' 'if' expression 'then' executableStatement* + ; +expression : 'a' ; +executableStatement : 'a' ; +elseStatement : 'a' ; + +[start] +expression + +[input] +a + +[skip] +Go \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnRuleRefStartOfAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnRuleRefStartOfAlt.txt new file mode 100644 index 0000000000..a2e436daf2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnRuleRefStartOfAlt.txt @@ -0,0 +1,33 @@ +[notes] +Checks that this compiles; see https://github.com/antlr/antlr4/issues/2016 + +[type] +Parser + +[grammar] +grammar Test; + +expression +@after { + +} + : op=NOT args+=expression + | args+=expression (op=AND args+=expression)+ + | args+=expression (op=OR args+=expression)+ + | IDENTIFIER + ; + +AND : 'and' ; +OR : 'or' ; +NOT : 'not' ; +IDENTIFIER : [a-zA-Z_][a-zA-Z0-9_]* ; +WS : [ \t\r\n]+ -> skip ; + +[start] +expression + +[input] +a and b + +[skip] +PHP \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnSet.txt new file mode 100644 index 0000000000..cf4d8dbced --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnSet.txt @@ -0,0 +1,22 @@ +[notes] +This is a regression test for #270 "Fix operator += applied to a set of +tokens". https://github.com/antlr/antlr4/issues/270 + +[type] +Parser + +[grammar] +grammar T; +a : b b* ';' ; +b : ID val+=(INT | FLOAT)*; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +FLOAT : [0-9]+ '.' [0-9]+; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +abc 34; + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/MultipleEOFHandling.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/MultipleEOFHandling.txt new file mode 100644 index 0000000000..3d400f415a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/MultipleEOFHandling.txt @@ -0,0 +1,18 @@ +[notes] +This test ensures that {@link ParserATNSimulator} produces a correct +result when the grammar contains multiple explicit references to +{@code EOF} inside of parser rules. + +[type] +Parser + +[grammar] +grammar T; +prog : ('x' | 'x' 'y') EOF EOF; + +[start] +prog + +[input] +x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case1.txt new file mode 100644 index 0000000000..f7d7f92713 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case1.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#1545, case 1. + +[type] +Parser + +[grammar] +grammar OpenDeviceStatement; +program : statement+ '.' ; + +statement : 'OPEN' ( 'DEVICE' ( OPT1 | OPT2 | OPT3 )? )+ {} ; + +OPT1 : 'OPT-1'; +OPT2 : 'OPT-2'; +OPT3 : 'OPT-3'; + +WS : (' '|'\n')+ -> channel(HIDDEN); + +[start] +statement + +[input] +OPEN DEVICE DEVICE + +[output] +"""OPEN DEVICE DEVICE +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case2.txt new file mode 100644 index 0000000000..ee46983ec9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case2.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#1545, case 2. + +[type] +Parser + +[grammar] +grammar OpenDeviceStatement; +program : statement+ '.' ; + +statement : 'OPEN' ( 'DEVICE' ( (OPT1) | OPT2 | OPT3 )? )+ {} ; + +OPT1 : 'OPT-1'; +OPT2 : 'OPT-2'; +OPT3 : 'OPT-3'; + +WS : (' '|'\n')+ -> channel(HIDDEN); + +[start] +statement + +[input] +OPEN DEVICE DEVICE + +[output] +"""OPEN DEVICE DEVICE +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case3.txt new file mode 100644 index 0000000000..bb64b10973 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OpenDeviceStatement_Case3.txt @@ -0,0 +1,28 @@ +[notes] +This is a regression test for antlr/antlr4#1545, case 3. + +[type] +Parser + +[grammar] +grammar OpenDeviceStatement; +program : statement+ '.' ; + +statement : 'OPEN' ( 'DEVICE' ( (OPT1) | OPT2 | OPT3 )? )+ {} ; + +OPT1 : 'OPT-1'; +OPT2 : 'OPT-2'; +OPT3 : 'OPT-3'; + +WS : (' '|'\n')+ -> channel(HIDDEN); + +[start] +statement + +[input] +OPEN DEVICE DEVICE. + +[output] +"""OPEN DEVICE DEVICE +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_1.txt new file mode 100644 index 0000000000..15bc560a4c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_1.txt @@ -0,0 +1,19 @@ +[notes] +This test is meant to detect regressions of bug antlr/antlr4#41. +https://github.com/antlr/antlr4/issues/41 + +[type] +Parser + +[grammar] +grammar T; +stat : ifstat | 'x'; +ifstat : 'if' stat ('else' stat)?; +WS : [ \n\t]+ -> skip ; + +[start] +stat + +[input] +x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_2.txt new file mode 100644 index 0000000000..e12700202a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_2.txt @@ -0,0 +1,19 @@ +[notes] +This test is meant to detect regressions of bug antlr/antlr4#41. +https://github.com/antlr/antlr4/issues/41 + +[type] +Parser + +[grammar] +grammar T; +stat : ifstat | 'x'; +ifstat : 'if' stat ('else' stat)?; +WS : [ \n\t]+ -> skip ; + +[start] +stat + +[input] +if x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_3.txt new file mode 100644 index 0000000000..f6f1a2e691 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_3.txt @@ -0,0 +1,19 @@ +[notes] +This test is meant to detect regressions of bug antlr/antlr4#41. +https://github.com/antlr/antlr4/issues/41 + +[type] +Parser + +[grammar] +grammar T; +stat : ifstat | 'x'; +ifstat : 'if' stat ('else' stat)?; +WS : [ \n\t]+ -> skip ; + +[start] +stat + +[input] +if x else x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_4.txt new file mode 100644 index 0000000000..257940d939 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Optional_4.txt @@ -0,0 +1,19 @@ +[notes] +This test is meant to detect regressions of bug antlr/antlr4#41. +https://github.com/antlr/antlr4/issues/41 + +[type] +Parser + +[grammar] +grammar T; +stat : ifstat | 'x'; +ifstat : 'if' stat ('else' stat)?; +WS : [ \n\t]+ -> skip ; + +[start] +stat + +[input] +if if x else x + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OrderingPredicates.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OrderingPredicates.txt new file mode 100644 index 0000000000..bc238266b7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/OrderingPredicates.txt @@ -0,0 +1,33 @@ +[notes] +This is a regression test for antlr/antlr4#2301. + +[type] +Parser + +[grammar] +grammar Issue2301; + +SPACES: [ \t\r\n]+ -> skip; + +AT: 'AT'; +X : 'X'; +Y : 'Y'; + +ID: [A-Z]+; + +constant +: 'DUMMY' +; + +expr +: ID constant? +| expr AT X +| expr AT Y +; + +[start] +expr + +[input] +POINT AT X + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ParserProperty.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ParserProperty.txt new file mode 100644 index 0000000000..6dcc3975dd --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ParserProperty.txt @@ -0,0 +1,25 @@ +[notes] +This is a regression test for antlr/antlr4#561 "Issue with parser +generation in 4.2.2" https://github.com/antlr/antlr4/issues/561 + +[type] +Parser + +[grammar] +grammar T; + +a : {}? ID {} + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +abc + +[output] +"""valid +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredicatedIfIfElse.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredicatedIfIfElse.txt new file mode 100644 index 0000000000..b131fbe9de --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredicatedIfIfElse.txt @@ -0,0 +1,22 @@ +[notes] +This test is meant to test the expected solution to antlr/antlr4#42. +https://github.com/antlr/antlr4/issues/42 + +[type] +Parser + +[grammar] +grammar T; +s : stmt EOF ; +stmt : ifStmt | ID; +ifStmt : 'if' ID stmt ('else' stmt | { })> }?); +ELSE : 'else'; +ID : [a-zA-Z]+; +WS : [ \\n\\t]+ -> skip; + +[start] +s + +[input] +if x if x a else b + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionIssue334.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionIssue334.txt new file mode 100644 index 0000000000..4ec1088bc6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionIssue334.txt @@ -0,0 +1,32 @@ +[notes] +This is a regression test for antlr/antlr4#334 "BailErrorStrategy: bails +on proper input". https://github.com/antlr/antlr4/issues/334 + +[type] +Parser + +[grammar] +grammar T; +file_ @init{ + +} +@after { + +} + : item (SEMICOLON item)* SEMICOLON? EOF ; +item : A B?; +SEMICOLON: ';'; +A : 'a'|'A'; +B : 'b'|'B'; +WS : [ \r\t\n]+ -> skip; + +[start] +file_ + +[input] +a + +[output] +"""(file_ (item a) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionMode_LL.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionMode_LL.txt new file mode 100644 index 0000000000..88b81e79a6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionMode_LL.txt @@ -0,0 +1,31 @@ +[type] +Parser + +[grammar] +grammar T; + +r + : (a b | a) EOF {} + ; +a + : X Y? + ; +b + : Y + ; +X: 'X'; +Y: 'Y'; +WS : [ \r\n\t]+ -> skip ; + +[start] +r + +[input] +X Y + +[output] +"""(r (a X) (b Y) ) +""" + +[flags] +predictionMode=LL diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionMode_SLL.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionMode_SLL.txt new file mode 100644 index 0000000000..d9ce8fd426 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/PredictionMode_SLL.txt @@ -0,0 +1,35 @@ +[type] +Parser + +[grammar] +grammar T; + +r + : (a b | a) EOF {} + ; +a + : X Y? + ; +b + : Y + ; +X: 'X'; +Y: 'Y'; +WS : [ \r\n\t]+ -> skip ; + +[start] +r + +[input] +X Y + +[output] +"""XY +""" + +[errors] +"""line 1:3 missing 'Y' at '' +""" + +[flags] +predictionMode=SLL diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReferenceToATN_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReferenceToATN_1.txt new file mode 100644 index 0000000000..db1855b1ee --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReferenceToATN_1.txt @@ -0,0 +1,24 @@ +[notes] +This is a regression test for antlr/antlr4#561 "Issue with parser +generation in 4.2.2" https://github.com/antlr/antlr4/issues/561 + +[type] +Parser + +[grammar] +grammar T; +a : (ID|ATN)* ATN? {} ; +ID : 'a'..'z'+ ; +ATN : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] + + +[output] +""" +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReferenceToATN_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReferenceToATN_2.txt new file mode 100644 index 0000000000..9e12a9ea3c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReferenceToATN_2.txt @@ -0,0 +1,24 @@ +[notes] +This is a regression test for antlr/antlr4#561 "Issue with parser +generation in 4.2.2" https://github.com/antlr/antlr4/issues/561 + +[type] +Parser + +[grammar] +grammar T; +a : (ID|ATN)* ATN? {} ; +ID : 'a'..'z'+ ; +ATN : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +a + +[input] +a 34 c + +[output] +"""a34c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt new file mode 100644 index 0000000000..08c3fc16ce --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt @@ -0,0 +1,38 @@ +[notes] +https://github.com/antlr/antlr4/issues/1070 + +[type] +Parser + +[grammar] +grammar G; + +root + : {0==0}? continue+ {} + ; + +continue returns [] + : for for? {1==1}? #else + | break=BREAK BREAK+ (for | IF) #else + | if+=IF if+=IF* #int + | continue CONTINUE_ {} #class + ; + +args[int else] locals [] + : for + ; + +for: FOR; +FOR: 'for '; +BREAK: 'break '; +IF: 'if '; +CONTINUE_: 'continue'; + +[start] +root + +[input] +for for break break for if if for continue + +[output] +"""for for break break for if if for continue""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/TokenOffset.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/TokenOffset.txt new file mode 100644 index 0000000000..e80800b594 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/TokenOffset.txt @@ -0,0 +1,29 @@ +[notes] +This is a regression test for antlr/antlr4#2728 +It should generate correct code for grammars with more than 65 tokens. +https://github.com/antlr/antlr4/pull/2728#issuecomment-622940562 + +[type] +Parser + +[grammar] +grammar L; +a : ('1'|'2'|'3'|'4'|'5'|'6'|'7'|'8'|'9'|'10'|'11'|'12'|'13'|'14'|'15'|'16' +|'17'|'18'|'19'|'20'|'21'|'22'|'23'|'24'|'25'|'26'|'27'|'28'|'29'|'30'|'31'|'32' +|'33'|'34'|'35'|'36'|'37'|'38'|'39'|'40'|'41'|'42'|'43'|'44'|'45'|'46'|'47'|'48' +|'49'|'50'|'51'|'52'|'53'|'54'|'55'|'56'|'57'|'58'|'59'|'60'|'61'|'62'|'63'|'64' +|'65'|'66')+ { + +}; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +12 34 56 66 + +[output] +"""12345666 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Wildcard.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Wildcard.txt new file mode 100644 index 0000000000..f84bbd8a64 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/Wildcard.txt @@ -0,0 +1,26 @@ +[notes] +Match assignments, ignore other tokens with wildcard. + +[type] +Parser + +[grammar] +grammar T; +a : (assign|.)+ EOF ; +assign : ID '=' INT ';' { + +} ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip; + +[start] +a + +[input] +x=10; abc;;;; y=99; + +[output] +x=10; +y=99; + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/uStartingCharDoesNotCauseIllegalUnicodeEscape.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/uStartingCharDoesNotCauseIllegalUnicodeEscape.txt new file mode 100644 index 0000000000..e62c617b11 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/uStartingCharDoesNotCauseIllegalUnicodeEscape.txt @@ -0,0 +1,20 @@ +[notes] +Test for https://github.com/antlr/antlr4/issues/4128 + +[type] +Parser + +[grammar] +grammar u; +u : 'u' {}; + +[start] +u + +[input] +u + +[output] +"""u +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_1.txt new file mode 100644 index 0000000000..25866b809e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_1.txt @@ -0,0 +1,45 @@ +[notes] +for https://github.com/antlr/antlr4/issues/1398. +Seeing through a large expression takes 5 _minutes_ on +my fast box to complete. After fix, it's instantaneous. + +[type] +Parser + +[grammar] +grammar Expr; + +stat : expr ';' + | expr '.' + ; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + | '(' ID ')' expr + | expr '?' expr ':' expr + | 'between' expr 'and' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; + +[start] +stat + +[input] +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 +; + +[skip] +PHP + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_2.txt new file mode 100644 index 0000000000..473fa04762 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_2.txt @@ -0,0 +1,44 @@ +[notes] +for https://github.com/antlr/antlr4/issues/1398. +Seeing through a large expression takes 5 _minutes_ on +my fast box to complete. After fix, it's instantaneous. + +[type] +Parser + +[grammar] +grammar Expr; + +stat : expr ';' + | expr '.' + ; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + | '(' ID ')' expr + | expr '?' expr ':' expr + | 'between' expr 'and' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; + +[start] +stat + +[input] +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 or +X1 and X2 and X3 and X4 and X5 and X6 and X7 +. + +[skip] +PHP + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_3.txt new file mode 100644 index 0000000000..00812caad0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_3.txt @@ -0,0 +1,55 @@ +[notes] +for https://github.com/antlr/antlr4/issues/1398. +Seeing through a large expression takes 5 _minutes_ on +my fast box to complete. After fix, it's instantaneous. + +[type] +Parser + +[grammar] +grammar Expr; + +stat : expr ';' + | expr '.' + ; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + | '(' ID ')' expr + | expr '?' expr ':' expr + | 'between' expr 'and' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; + +[start] +stat + +[input] +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 +; + +[skip] +PHP + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt new file mode 100644 index 0000000000..a6f4b3d966 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt @@ -0,0 +1,59 @@ +[notes] +for https://github.com/antlr/antlr4/issues/1398. +Seeing through a large expression takes 5 _minutes_ on +my fast box to complete. After fix, it's instantaneous. +Was working for C++ but I think it was not parsing correctly (Nov 2022) +So I'll make it skip for now and will add bug. I believe it is not +merging arrays properly. + +[type] +Parser + +[grammar] +grammar Expr; + +stat : expr ';' + | expr '.' + ; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + | '(' ID ')' expr + | expr '?' expr ':' expr + | 'between' expr 'and' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; + +[start] +stat + +[input] +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 and +between X1 and X2 or between X3 and X4 +; + +[skip] +Python3 +JavaScript +TypeScript +PHP +Go + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_5.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_5.txt new file mode 100644 index 0000000000..e066f1dbfa --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_5.txt @@ -0,0 +1,66 @@ +[notes] +for https://github.com/antlr/antlr4/issues/1398. +Seeing through a large expression takes 5 _minutes_ on +my fast box to complete. After fix, it's instantaneous. + +[type] +Parser + +[grammar] +grammar Expr; + +stat : expr ';' + | expr '.' + ; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + | '(' ID ')' expr + | expr '?' expr ':' expr + | 'between' expr 'and' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; + +[start] +stat + +[input] +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z or +X ? Y : Z +; + +[skip] +PHP + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/ExpressionGrammar_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/ExpressionGrammar_1.txt new file mode 100644 index 0000000000..9c1ea39d66 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/ExpressionGrammar_1.txt @@ -0,0 +1,41 @@ +[notes] +This is a regression test for antlr/antlr4#192 "Poor performance of +expression parsing". https://github.com/antlr/antlr4/issues/192 + +[type] +Parser + +[grammar] +grammar Expr; + +program: expr EOF; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; +ERROR: .; + +[start] +program + +[input] +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or + X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/ExpressionGrammar_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/ExpressionGrammar_2.txt new file mode 100644 index 0000000000..2d2056fceb --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/ExpressionGrammar_2.txt @@ -0,0 +1,38 @@ +[notes] +This is a regression test for antlr/antlr4#192 "Poor performance of +expression parsing". https://github.com/antlr/antlr4/issues/192 + +[type] +Parser + +[grammar] +grammar Expr; + +program: expr EOF; + +expr + : ID + | 'not' expr + | expr 'and' expr + | expr 'or' expr + ; + +ID: [a-zA-Z_][a-zA-Z_0-9]*; +WS: [ \t\n\r\f]+ -> skip; +ERROR: .; + +[start] +program + +[input] +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or +not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/DisableRule.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/DisableRule.txt new file mode 100644 index 0000000000..52a358b371 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/DisableRule.txt @@ -0,0 +1,28 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +E1 : 'enum' { }? ; +E2 : 'enum' { }? ; // winner not E1 or ID +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip; + +[input] +enum abc + +[output] +[@0,0:3='enum',<2>,1:0] +[@1,5:7='abc',<3>,1:5] +[@2,8:7='',<-1>,1:8] +s0-' '->:s5=>4 +s0-'a'->:s6=>3 +s0-'e'->:s1=>3 +:s1=>3-'n'->:s2=>3 +:s2=>3-'u'->:s3=>3 +:s6=>3-'b'->:s6=>3 +:s6=>3-'c'->:s6=>3 + +[flags] +showDFA + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/EnumNotID.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/EnumNotID.txt new file mode 100644 index 0000000000..7be85967d1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/EnumNotID.txt @@ -0,0 +1,22 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ENUM : [a-z]+ { }? ; +ID : [a-z]+ ; +WS : (' '|'\n') -> skip; + +[input] +enum abc enum + +[output] +[@0,0:3='enum',<1>,1:0] +[@1,5:7='abc',<2>,1:5] +[@2,9:12='enum',<1>,1:9] +[@3,13:12='',<-1>,1:13] +s0-' '->:s3=>3 + +[flags] +showDFA + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/IDnotEnum.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/IDnotEnum.txt new file mode 100644 index 0000000000..dd0e871319 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/IDnotEnum.txt @@ -0,0 +1,22 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ENUM : [a-z]+ { }? ; +ID : [a-z]+ ; +WS : (' '|'\n') -> skip; + +[input] +enum abc enum + +[output] +[@0,0:3='enum',<2>,1:0] +[@1,5:7='abc',<2>,1:5] +[@2,9:12='enum',<2>,1:9] +[@3,13:12='',<-1>,1:13] +s0-' '->:s2=>3 + +[flags] +showDFA + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/IDvsEnum.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/IDvsEnum.txt new file mode 100644 index 0000000000..91a825e6ef --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/IDvsEnum.txt @@ -0,0 +1,28 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ENUM : 'enum' { }? ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip; + +[input] +enum abc enum + +[output] +[@0,0:3='enum',<2>,1:0] +[@1,5:7='abc',<2>,1:5] +[@2,9:12='enum',<2>,1:9] +[@3,13:12='',<-1>,1:13] +s0-' '->:s5=>3 +s0-'a'->:s4=>2 +s0-'e'->:s1=>2 +:s1=>2-'n'->:s2=>2 +:s2=>2-'u'->:s3=>2 +:s4=>2-'b'->:s4=>2 +:s4=>2-'c'->:s4=>2 + +[flags] +showDFA + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/Indent.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/Indent.txt new file mode 100644 index 0000000000..dfa5077fed --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/Indent.txt @@ -0,0 +1,37 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ID : [a-z]+ ; +INDENT : [ \t]+ { }? +{ } ; +NL : '\n'; +WS : [ \t]+ ; + +[input] +"""abc + def +""" + +[output] +INDENT +[@0,0:2='abc',<1>,1:0] +[@1,3:3='\n',<3>,1:3] +[@2,4:5=' ',<2>,2:0] +[@3,6:8='def',<1>,2:2] +[@4,9:10=' ',<4>,2:5] +[@5,11:11='\n',<3>,2:7] +[@6,12:11='',<-1>,3:0] +s0-' +'->:s2=>3 +s0-'a'->:s1=>1 +s0-'d'->:s1=>1 +:s1=>1-'b'->:s1=>1 +:s1=>1-'c'->:s1=>1 +:s1=>1-'e'->:s1=>1 +:s1=>1-'f'->:s1=>1 + +[flags] +showDFA + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/LexerInputPositionSensitivePredicates.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/LexerInputPositionSensitivePredicates.txt new file mode 100644 index 0000000000..5b68133691 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/LexerInputPositionSensitivePredicates.txt @@ -0,0 +1,29 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +WORD1 : ID1+ { } ; +WORD2 : ID2+ { } ; +fragment ID1 : { \< 2 }? [a-zA-Z]; +fragment ID2 : { >= 2 }? [a-zA-Z]; +WS : (' '|'\n') -> skip; + +[input] +a cde +abcde + +[output] +a +cde +ab +cde +[@0,0:0='a',<1>,1:0] +[@1,2:4='cde',<2>,1:2] +[@2,6:7='ab',<1>,2:0] +[@3,8:10='cde',<2>,2:2] +[@4,12:11='',<-1>,3:0] + +[flags] +showDFA + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/PredicatedKeywords.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/PredicatedKeywords.txt new file mode 100644 index 0000000000..9e0eba556b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/PredicatedKeywords.txt @@ -0,0 +1,21 @@ +[type] +Lexer + +[grammar] +lexer grammar L; +ENUM : [a-z]+ { }? { } ; +ID : [a-z]+ { } ; +WS : [ \n] -> skip ; + +[input] +enum enu a + +[output] +enum! +ID enu +ID a +[@0,0:3='enum',<1>,1:0] +[@1,5:7='enu',<2>,1:5] +[@2,9:9='a',<2>,1:9] +[@3,10:9='',<-1>,1:10] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/RuleSempredFunction.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/RuleSempredFunction.txt new file mode 100644 index 0000000000..a86f9077fd --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexer/RuleSempredFunction.txt @@ -0,0 +1,19 @@ +[notes] +Test for https://github.com/antlr/antlr4/issues/958 + +[type] +Lexer + +[grammar] +lexer grammar L; +T : 'a' {}? ; + +[input] +aaa + +[output] +[@0,0:0='a',<1>,1:0] +[@1,1:1='a',<1>,1:1] +[@2,2:2='a',<1>,1:2] +[@3,3:2='',<-1>,1:3] + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ActionHidesPreds.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ActionHidesPreds.txt new file mode 100644 index 0000000000..d3ec008891 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ActionHidesPreds.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +@parser::members {} +s : a+ ; +a : {} ID {}? {} + | {} ID {}? {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x x y + +[output] +alt 1 +alt 1 +alt 1 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.txt new file mode 100644 index 0000000000..151f382988 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.txt @@ -0,0 +1,31 @@ +[notes] +Regular non-forced actions can create side effects used by semantic +predicates and so we cannot evaluate any semantic predicate +encountered after having seen a regular action. This includes +during global follow operations. + +[type] +Parser + +[grammar] +grammar T; +@parser::members { + +} +s : e {} {}? {} '!' ; +t : e {} {}? ID ; +e : ID | ; // non-LL(1) so we use ATN +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a! + +[output] +eval=true +parse + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.txt new file mode 100644 index 0000000000..408cf8a3ca --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.txt @@ -0,0 +1,21 @@ +[notes] +This is a regression test for antlr/antlr4#196 +"element+ in expression grammar doesn't parse properly" +https://github.com/antlr/antlr4/issues/196 + +[type] +Parser + +[grammar] +grammar T; +start : e[0] EOF; +e[int _p] + : ( 'a' | 'b'+ ) ( {3 >= $_p}? '+' e[4] )* + ; + +[start] +start + +[input] +a+b+a + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.txt new file mode 100644 index 0000000000..4694a11182 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.txt @@ -0,0 +1,30 @@ +[notes] +We cannot collect predicates that are dependent on local context if +we are doing a global follow. They appear as if they were not there at all. + +[type] +Parser + +[grammar] +grammar T; +@parser::members { + +} +s : a[99] ; +a[int i] : e {}? {} '!' ; +b[int i] : e {}? ID ; +e : ID | ; // non-LL(1) so we use ATN +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a! + +[output] +eval=true +parse + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.txt new file mode 100644 index 0000000000..56a7e216b4 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a; +b[] : a[] ; +a[] + : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a; + +[output] +"""alt 2 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DisabledAlternative.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DisabledAlternative.txt new file mode 100644 index 0000000000..9325f07d9f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/DisabledAlternative.txt @@ -0,0 +1,20 @@ +[notes] +This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". +https://github.com/antlr/antlr4/issues/218 + +[type] +Parser + +[grammar] +grammar T; +cppCompilationUnit : content+ EOF; +content: anything | {}? .; +anything: ANY_CHAR; +ANY_CHAR: [_a-zA-Z0-9]; + +[start] +cppCompilationUnit + +[input] +hello + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.txt new file mode 100644 index 0000000000..607e990030 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : b ';' | b '.' ; +b : a ; +a + : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a; + +[output] +"""alt 2 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.txt new file mode 100644 index 0000000000..0105c71521 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +s : a a; +a : {}? ID INT {} + | {}? ID INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +y 3 x 4 + +[errors] +"""line 1:0 no viable alternative at input 'y' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/Order.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/Order.txt new file mode 100644 index 0000000000..fba98c8ffd --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/Order.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : a {} a; // do 2x: once in ATN, next in DFA; +// action blocks lookahead from falling off of 'a' +// and looking into 2nd 'a' ref. !ctx dependent pred +a : ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x y + +[output] +alt 1 +alt 1 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt new file mode 100644 index 0000000000..ca2fa6b9a1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt @@ -0,0 +1,44 @@ +[notes] +Loopback doesn't eval predicate at start of alt + +[type] +Parser + +[grammar] +grammar T; +file_ +@after {} + : para para EOF ; +para: paraContent NL NL ; +paraContent : ('s'|'x'|{})>}? NL)+ ; +NL : '\n' ; +s : 's' ; +X : 'x' ; + +[start] +file_ + +[input] +"""s + + +x +""" + +[output] +"""(file_ (para (paraContent s) \n \n) (para (paraContent \n x \n)) ) +""" + +[errors] +"""line 5:0 mismatched input '' expecting {'s', '\n', 'x'} +""" + +[skip] +Cpp +CSharp +Dart +Go +JavaScript +TypeScript +PHP +Python3 diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_2.txt new file mode 100644 index 0000000000..ec2e626591 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_2.txt @@ -0,0 +1,32 @@ +[notes] +Loopback doesn't eval predicate at start of alt + +[type] +Parser + +[grammar] +grammar T; +file_ +@after {} + : para para EOF ; +para: paraContent NL NL ; +paraContent : ('s'|'x'|{})>}? NL)+ ; +NL : '\n' ; +s : 's' ; +X : 'x' ; + +[start] +file_ + +[input] +"""s + + +x + +""" + +[output] +"""(file_ (para (paraContent s) \n \n) (para (paraContent \n x) \n \n) ) +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredTestedEvenWhenUnAmbig_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredTestedEvenWhenUnAmbig_1.txt new file mode 100644 index 0000000000..72974d2b9f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredTestedEvenWhenUnAmbig_1.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +@parser::members {} +primary + : ID {} + | {}? 'enum' {} + ; +ID : [a-z]+ ; +WS : [ \t\n\r]+ -> skip ; + +[start] +primary + +[input] +abc + +[output] +"""ID abc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredTestedEvenWhenUnAmbig_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredTestedEvenWhenUnAmbig_2.txt new file mode 100644 index 0000000000..f6a4d5c9b9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredTestedEvenWhenUnAmbig_2.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +@parser::members {} +primary + : ID {} + | {}? 'enum' {} + ; +ID : [a-z]+ ; +WS : [ \t\n\r]+ -> skip ; + +[start] +primary + +[input] +enum + +[errors] +"""line 1:0 no viable alternative at input 'enum' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredicateDependentOnArg.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredicateDependentOnArg.txt new file mode 100644 index 0000000000..168bdbf924 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredicateDependentOnArg.txt @@ -0,0 +1,31 @@ +[notes] +In this case, we're passing a parameter into a rule that uses that +information to predict the alternatives. This is the special case +where we know exactly which context we are in. The context stack +is empty and we have not dipped into the outer context to make a decision. + +[type] +Parser + +[grammar] +grammar T; +@parser::members {} +s : a[2] a[1]; +a[int i] + : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a b + +[output] +alt 2 +alt 1 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredicateDependentOnArg2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredicateDependentOnArg2.txt new file mode 100644 index 0000000000..b8fa75d735 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredicateDependentOnArg2.txt @@ -0,0 +1,31 @@ +[notes] +In this case, we have to ensure that the predicates are not tested +during the closure after recognizing the 1st ID. The closure will +fall off the end of 'a' 1st time and reach into the a[1] rule +invocation. It should not execute predicates because it does not know +what the parameter is. The context stack will not be empty and so +they should be ignored. It will not affect recognition, however. We +are really making sure the ATN simulation doesn't crash with context +object issues when it encounters preds during FOLLOW. + +[type] +Parser + +[grammar] +grammar T; +@parser::members {} +s : a[2] a[1]; +a[int i] + : {}? ID + | {}? ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a b + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredsInGlobalFOLLOW.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredsInGlobalFOLLOW.txt new file mode 100644 index 0000000000..bf16fe3826 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredsInGlobalFOLLOW.txt @@ -0,0 +1,29 @@ +[notes] +During a global follow operation, we still collect semantic +predicates as long as they are not dependent on local context + +[type] +Parser + +[grammar] +grammar T; +@parser::members { + +} +s : e {}? {} '!' ; +t : e {}? ID ; +e : ID | ; // non-LL(1) so we use ATN +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +a! + +[output] +eval=true +parse + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/RewindBeforePredEval.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/RewindBeforePredEval.txt new file mode 100644 index 0000000000..436583f395 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/RewindBeforePredEval.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +s : a a; +a : {}? ID INT {} + | {}? ID INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +y 3 x 4 + +[output] +alt 2 +alt 1 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/Simple.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/Simple.txt new file mode 100644 index 0000000000..4301cdeee6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/Simple.txt @@ -0,0 +1,25 @@ +[type] +Parser + +[grammar] +grammar T; +s : a a a; // do 3x: once in ATN, next in DFA then INT in ATN +a : {}? ID {} + | {}? ID {} + | INT{} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x y 3 + +[output] +alt 2 +alt 2 +alt 3 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/SimpleValidate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/SimpleValidate.txt new file mode 100644 index 0000000000..99d7702984 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/SimpleValidate.txt @@ -0,0 +1,23 @@ +[type] +Parser + +[grammar] +grammar T; +s : a ; +a : {}? ID {} + | {}? INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x + +[errors] +"""line 1:0 no viable alternative at input 'x' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/SimpleValidate2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/SimpleValidate2.txt new file mode 100644 index 0000000000..46c678a0db --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/SimpleValidate2.txt @@ -0,0 +1,27 @@ +[type] +Parser + +[grammar] +grammar T; +s : a a a; +a : {}? ID {} + | {}? INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +3 4 x + +[output] +alt 2 +alt 2 + +[errors] +"""line 1:4 no viable alternative at input 'x' +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ToLeft.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ToLeft.txt new file mode 100644 index 0000000000..5d7dba78c9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ToLeft.txt @@ -0,0 +1,24 @@ +[type] +Parser + +[grammar] +grammar T; + s : a+ ; +a : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x x y + +[output] +alt 2 +alt 2 +alt 2 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ToLeftWithVaryingPredicate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ToLeftWithVaryingPredicate.txt new file mode 100644 index 0000000000..2746c2535e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ToLeftWithVaryingPredicate.txt @@ -0,0 +1,36 @@ +[notes] +In this case, we use predicates that depend on global information +like we would do for a symbol table. We simply execute +the predicates assuming that all necessary information is available. +The i++ action is done outside of the prediction and so it is executed. + +[type] +Parser + +[grammar] +grammar T; +@parser::members {} +s : ({ + +} a)+ ; +a : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x x y + +[output] +i=1 +alt 2 +i=2 +alt 1 +i=3 +alt 2 + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/TwoUnpredicatedAlts.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/TwoUnpredicatedAlts.txt new file mode 100644 index 0000000000..a45cd9a319 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/TwoUnpredicatedAlts.txt @@ -0,0 +1,33 @@ +[type] +Parser + +[grammar] +grammar T; +s : {} a ';' a; // do 2x: once in ATN, next in DFA +a : ID {} + | ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x; y + +[output] +alt 1 +alt 1 + +[errors] +line 1:0 reportAttemptingFullContext d=0 (a), input='x' +line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x' +line 1:3 reportAttemptingFullContext d=0 (a), input='y' +line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/TwoUnpredicatedAltsAndOneOrthogonalAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/TwoUnpredicatedAltsAndOneOrthogonalAlt.txt new file mode 100644 index 0000000000..88f0d73435 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/TwoUnpredicatedAltsAndOneOrthogonalAlt.txt @@ -0,0 +1,35 @@ +[type] +Parser + +[grammar] +grammar T; +s : {} a ';' a ';' a; +a : INT {} + | ID {} // must pick this one for ID since pred is false + | ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +34; x; y + +[output] +alt 1 +alt 2 +alt 2 + +[errors] +line 1:4 reportAttemptingFullContext d=0 (a), input='x' +line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x' +line 1:7 reportAttemptingFullContext d=0 (a), input='y' +line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y' + +[flags] +showDiagnosticErrors + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/UnpredicatedPathsInAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/UnpredicatedPathsInAlt.txt new file mode 100644 index 0000000000..f788d880c2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/UnpredicatedPathsInAlt.txt @@ -0,0 +1,27 @@ +[type] +Parser + +[grammar] +grammar T; +s : a {} + | b {} + ; +a : {}? ID INT + | ID INT + ; +b : ID ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x 4 + +[output] +"""alt 1 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ValidateInDFA.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ValidateInDFA.txt new file mode 100644 index 0000000000..eefaa1fb90 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/ValidateInDFA.txt @@ -0,0 +1,26 @@ +[type] +Parser + +[grammar] +grammar T; +s : a ';' a; +// ';' helps us to resynchronize without consuming +// 2nd 'a' reference. We our testing that the DFA also +// throws an exception if the validating predicate fails +a : {}? ID {} + | {}? INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + +[start] +s + +[input] +x ; y + +[errors] +line 1:0 no viable alternative at input 'x' +line 1:4 no viable alternative at input 'y' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/CharSetLiteral.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/CharSetLiteral.txt new file mode 100644 index 0000000000..fc7febd648 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/CharSetLiteral.txt @@ -0,0 +1,21 @@ +[type] +Parser + +[grammar] +grammar T; +a : (A {})+ ; +A : [AaBb] ; +WS : (' '|'\n')+ -> skip ; + +[start] +a + +[input] +A a B b + +[output] +A +a +B +b + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ComplementSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ComplementSet.txt new file mode 100644 index 0000000000..5b3e10c544 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ComplementSet.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +parse : ~NEW_LINE; +NEW_LINE: '\\r'? '\\n'; + +[start] +parse + +[input] +a + +[errors] +line 1:0 token recognition error at: 'a' +line 1:1 missing {} at '' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerOptionalSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerOptionalSet.txt new file mode 100644 index 0000000000..fb2c9f317b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerOptionalSet.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : ('a'|'b')? 'c' ; + +[start] +a + +[input] +ac + +[output] +"""ac +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerPlusSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerPlusSet.txt new file mode 100644 index 0000000000..351b380c39 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerPlusSet.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : ('a'|'b')+ 'c' ; + +[start] +a + +[input] +abaac + +[output] +"""abaac +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerStarSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerStarSet.txt new file mode 100644 index 0000000000..1eccbcb059 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/LexerStarSet.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : ('a'|'b')* 'c' ; + +[start] +a + +[input] +abaac + +[output] +"""abaac +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotChar.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotChar.txt new file mode 100644 index 0000000000..fa23e88d27 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotChar.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : ~'b' ; + +[start] +a + +[input] +x + +[output] +"""x +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotCharSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotCharSet.txt new file mode 100644 index 0000000000..fc39a5b19c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotCharSet.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : ~('b'|'c') ; + +[start] +a + +[input] +x + +[output] +"""x +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotCharSetWithRuleRef3.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotCharSetWithRuleRef3.txt new file mode 100644 index 0000000000..9963097d02 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/NotCharSetWithRuleRef3.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : ('a'|B) ; // this doesn't collapse to set but works +fragment +B : ~('a'|'c') ; + +[start] +a + +[input] +x + +[output] +"""x +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalLexerSingleElement.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalLexerSingleElement.txt new file mode 100644 index 0000000000..c51c17277c --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalLexerSingleElement.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : 'b'? 'c' ; + +[start] +a + +[input] +bc + +[output] +"""bc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalSet.txt new file mode 100644 index 0000000000..f13145e6af --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : ('a'|'b')? 'c' {} ; + +[start] +a + +[input] +ac + +[output] +"""ac +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalSingleElement.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalSingleElement.txt new file mode 100644 index 0000000000..1daa7bcaff --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/OptionalSingleElement.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A? 'c' {} ; +A : 'b' ; + +[start] +a + +[input] +bc + +[output] +"""bc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotSet.txt new file mode 100644 index 0000000000..194cbfb3a1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : t=~('x'|'y') 'z' {} ; + +[start] +a + +[input] +zz + +[output] +"""z +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotToken.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotToken.txt new file mode 100644 index 0000000000..55e41be4aa --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotToken.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : ~'x' 'z' {} ; + +[start] +a + +[input] +zz + +[output] +"""zz +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotTokenWithLabel.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotTokenWithLabel.txt new file mode 100644 index 0000000000..d1f7292736 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserNotTokenWithLabel.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : t=~'x' 'z' {} ; + +[start] +a + +[input] +zz + +[output] +"""z +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserSet.txt new file mode 100644 index 0000000000..14d4e75030 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/ParserSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : t=('x'|'y') {} ; + +[start] +a + +[input] +x + +[output] +"""x +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/PlusLexerSingleElement.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/PlusLexerSingleElement.txt new file mode 100644 index 0000000000..3227f5b5c6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/PlusLexerSingleElement.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : 'b'+ 'c' ; + +[start] +a + +[input] +bbbbc + +[output] +"""bbbbc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/PlusSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/PlusSet.txt new file mode 100644 index 0000000000..7a082936e8 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/PlusSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : ('a'|'b')+ 'c' {} ; + +[start] +a + +[input] +abaac + +[output] +"""abaac +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/RuleAsSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/RuleAsSet.txt new file mode 100644 index 0000000000..fdfd6d36c1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/RuleAsSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a @after {} : 'a' | 'b' |'c' ; + +[start] +a + +[input] +b + +[output] +"""b +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/SeqDoesNotBecomeSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/SeqDoesNotBecomeSet.txt new file mode 100644 index 0000000000..63dfdbc101 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/SeqDoesNotBecomeSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : C {} ; +fragment A : '1' | '2'; +fragment B : '3' '4'; +C : A | B; + +[start] +a + +[input] +34 + +[output] +"""34 +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarLexerSingleElement_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarLexerSingleElement_1.txt new file mode 100644 index 0000000000..57a4f847aa --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarLexerSingleElement_1.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : 'b'* 'c' ; + +[start] +a + +[input] +bbbbc + +[output] +"""bbbbc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarLexerSingleElement_2.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarLexerSingleElement_2.txt new file mode 100644 index 0000000000..fa54688c38 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarLexerSingleElement_2.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : A {} ; +A : 'b'* 'c' ; + +[start] +a + +[input] +c + +[output] +"""c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarSet.txt new file mode 100644 index 0000000000..5656509c05 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/StarSet.txt @@ -0,0 +1,17 @@ +[type] +Parser + +[grammar] +grammar T; +a : ('a'|'b')* 'c' {} ; + +[start] +a + +[input] +abaac + +[output] +"""abaac +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedBMPRangeSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedBMPRangeSet.txt new file mode 100644 index 0000000000..f8a591be09 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedBMPRangeSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS* 'd' {} ; +// Note the double-backslash to avoid Java passing +// unescaped values as part of the grammar. +LETTERS : ('a'|'\\u00E0'..'\\u00E5'); + +[start] +a + +[input] +aáäáâåd + +[output] +"""aáäáâåd +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedBMPSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedBMPSet.txt new file mode 100644 index 0000000000..8f057e46dc --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedBMPSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS {} ; +// Note the double-backslash to avoid Java passing +// unescaped values as part of the grammar. +LETTERS : ('a'|'\\u00E4'|'\\u4E9C'|'\\u3042')* 'c'; + +[start] +a + +[input] +aäあ亜c + +[output] +"""aäあ亜c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPRangeSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPRangeSet.txt new file mode 100644 index 0000000000..ab51d94c6f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPRangeSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS* 'd' {} ; +// Note the double-backslash to avoid Java passing +// unescaped values as part of the grammar. +LETTERS : ('a'|'\\u{1F600}'..'\\u{1F943}'); + +[start] +a + +[input] +a😉🥂🜀d + +[output] +"""a😉🥂🜀d +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPRangeSetMismatch.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPRangeSetMismatch.txt new file mode 100644 index 0000000000..33e6f0a5a5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPRangeSetMismatch.txt @@ -0,0 +1,24 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS* 'd' {} ; +// Note the double-backslash to avoid Java passing +// unescaped values as part of the grammar. +LETTERS : ('a'|'\\u{1F600}'..'\\u{1F943}'); + +[start] +a + +[input] +a🗿🥄d + +[output] +"""ad +""" + +[errors] +line 1:1 token recognition error at: '🗿' +line 1:2 token recognition error at: '🥄' + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPSet.txt new file mode 100644 index 0000000000..a00b690c0b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeEscapedSMPSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS {} ; +// Note the double-backslash to avoid Java passing +// unescaped values as part of the grammar. +LETTERS : ('a'|'\\u{1D5BA}'|'\\u{1D5BE}'|'\\u{1D5C2}'|'\\u{1D5C8}'|'\\u{1D5CE}')* 'c'; + +[start] +a + +[input] +a𝗂𝗎𝖺c + +[output] +"""a𝗂𝗎𝖺c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeNegatedBMPSetIncludesSMPCodePoints.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeNegatedBMPSetIncludesSMPCodePoints.txt new file mode 100644 index 0000000000..5b1f8ba919 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeNegatedBMPSetIncludesSMPCodePoints.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS {} ; +LETTERS : 'a' ~('b')+ 'c'; + +[start] +a + +[input] +a😳😡😝🤓c + +[output] +"""a😳😡😝🤓c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeNegatedSMPSetIncludesBMPCodePoints.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeNegatedSMPSetIncludesBMPCodePoints.txt new file mode 100644 index 0000000000..fc8990a04f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeNegatedSMPSetIncludesBMPCodePoints.txt @@ -0,0 +1,18 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS {} ; +LETTERS : 'a' ~('\\u{1F600}'..'\\u{1F943}')+ 'c'; + +[start] +a + +[input] +abc + +[output] +"""abc +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeUnescapedBMPRangeSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeUnescapedBMPRangeSet.txt new file mode 100644 index 0000000000..e8745b35c8 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeUnescapedBMPRangeSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS* 'd' {} ; +// These are actually not escaped -- Java passes the +// raw unescaped Unicode values to the grammar compiler. +LETTERS : ('a'|'à'..'å'); + +[start] +a + +[input] +aáäáâåd + +[output] +"""aáäáâåd +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeUnescapedBMPSet.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeUnescapedBMPSet.txt new file mode 100644 index 0000000000..4d919419f2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Sets/UnicodeUnescapedBMPSet.txt @@ -0,0 +1,20 @@ +[type] +Parser + +[grammar] +grammar T; +a : LETTERS {} ; +// These are actually not escaped -- Java passes the +// raw unescaped Unicode values to the grammar compiler. +LETTERS : ('a'|'ä'|'亜'|'あ')* 'c'; + +[start] +a + +[input] +aäあ亜c + +[output] +"""aäあ亜c +""" + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg new file mode 100644 index 0000000000..d2091c31e2 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg @@ -0,0 +1,25 @@ +\ + + \ + \net7.0\ + \$(NoWarn);CS3021\ + \Test\ + \Exe\ + \.\ + \Antlr4.Test\ + \false\ + \false\ + \false\ + \false\ + \false\ + \false\ + \false\ + \ + + \ + \ + \\ + \ + \ + +\ diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Package.swift.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Package.swift.stg new file mode 100644 index 0000000000..43ad150307 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Package.swift.stg @@ -0,0 +1,11 @@ +// swift-tools-version: 5.6 + +import PackageDescription + +let package = Package( + name: "Test", + targets: [ + .executableTarget(name: "Test", path: ".", + exclude:[ "}; separator = ", ", wrap> ]), + ] +) diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cpp.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cpp.stg new file mode 100644 index 0000000000..889fd8ac0f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cpp.stg @@ -0,0 +1,56 @@ +#include \ + +#include "antlr4-runtime.h" +#include ".h" + +#include ".h" + + +using namespace antlr4; + + +class TreeShapeListener : public tree::ParseTreeListener { +public: + void visitTerminal(tree::TerminalNode *) override {} + void visitErrorNode(tree::ErrorNode *) override {} + void exitEveryRule(ParserRuleContext *) override {} + void enterEveryRule(ParserRuleContext *ctx) override { + for (auto child : ctx->children) { + tree::ParseTree *parent = child->parent; + ParserRuleContext *rule = dynamic_cast\(parent); + if (rule != ctx) { + throw "Invalid parse tree shape detected."; + } + } + } +}; + + +int main(int argc, const char* argv[]) { + ANTLRFileStream input; + input.loadFromFile(argv[1]); + lexer(&input); + CommonTokenStream tokens(&lexer); + + parser(&tokens); + parser.getInterpreter\()->setPredictionMode(antlr4::atn::PredictionMode::); + + parser.setBuildParseTree(false); + + + DiagnosticErrorListener errorListener; + parser.addErrorListener(&errorListener); + + tree::ParseTree *tree = parser.(); + TreeShapeListener listener; + tree::ParseTreeWalker::DEFAULT.walk(&listener, tree); + + tokens.fill(); + for (auto token : tokens.getTokens()) + std::cout \<\< token->toString() \<\< std::endl; + + std::cout \<\< lexer.getInterpreter\()->getDFA(Lexer::DEFAULT_MODE).toLexerString(); + + + return 0; +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cs.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cs.stg new file mode 100644 index 0000000000..ab109d02a7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cs.stg @@ -0,0 +1,54 @@ +using System; +using Antlr4.Runtime; +using Antlr4.Runtime.Atn; +using Antlr4.Runtime.Tree; +using System.Text; + +public class Test { + public static void Main(string[] args) { + Console.OutputEncoding = Encoding.UTF8; + Console.InputEncoding = Encoding.UTF8; + var input = CharStreams.fromPath(args[0]); + var lex = new (input); + var tokens = new CommonTokenStream(lex); + + var parser = new (tokens); + parser.Interpreter.PredictionMode = PredictionMode.; + + parser.BuildParseTree = false; + + + parser.AddErrorListener(new DiagnosticErrorListener()); + + + ParserATNSimulator.trace_atn_sim = true; + + var tree = parser.(); + ParseTreeWalker.Default.Walk(new TreeShapeListener(), tree); + + tokens.Fill(); + foreach (object t in tokens.GetTokens()) + Console.Out.WriteLine(t); + + Console.Out.Write(lex.Interpreter.GetDFA(Lexer.DEFAULT_MODE).ToLexerString()); + + + } +} + + +class TreeShapeListener : IParseTreeListener { + public void VisitTerminal(ITerminalNode node) { } + public void VisitErrorNode(IErrorNode node) { } + public void ExitEveryRule(ParserRuleContext ctx) { } + + public void EnterEveryRule(ParserRuleContext ctx) { + for (int i = 0; i \< ctx.ChildCount; i++) { + IParseTree parent = ctx.GetChild(i).Parent; + if (!(parent is IRuleNode) || ((IRuleNode)parent).RuleContext != ctx) { + throw new Exception("Invalid parse tree shape detected."); + } + } + } +} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.dart.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.dart.stg new file mode 100644 index 0000000000..ed1e949801 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.dart.stg @@ -0,0 +1,59 @@ +import 'dart:io'; +import 'package:antlr4/antlr4.dart'; + +import '.dart'; + +import '.dart'; + + +void main(List\ args) async { + CharStream input = await InputStream.fromPath(args[0]); + final lex = (input); + final tokens = CommonTokenStream(lex); + + final parser = (tokens); + + parser.addErrorListener(new DiagnosticErrorListener()); + + + ProfilingATNSimulator profiler = ProfilingATNSimulator(parser); + parser.setInterpreter(profiler); + + parser.interpreter!.predictionMode = PredictionMode.; + + parser.buildParseTree = false; + + ParserRuleContext tree = parser.(); + + print('[${profiler.getDecisionInfo().join(', ')}]'); + + ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree); + + tokens.fill(); + for (Object t in tokens.getTokens()!) + print(t); + + stdout.write(lex.interpreter!.getDFA(Lexer.DEFAULT_MODE).toLexerString()); + + +} + + +class TreeShapeListener implements ParseTreeListener { + @override void visitTerminal(TerminalNode node) {} + + @override void visitErrorNode(ErrorNode node) {} + + @override void exitEveryRule(ParserRuleContext ctx) {} + + @override + void enterEveryRule(ParserRuleContext ctx) { + for (var i = 0; i \< ctx.childCount; i++) { + final parent = ctx.getChild(i)?.parent; + if (!(parent is RuleNode) || (parent as RuleNode).ruleContext != ctx) { + throw StateError('Invalid parse tree shape detected.'); + } + } + } +} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.go.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.go.stg new file mode 100644 index 0000000000..ee2273291a --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.go.stg @@ -0,0 +1,69 @@ +package main +import ( + "fmt" + "github.com/antlr4-go/antlr/v4" + "os" + "test/parser" +) + + + +import "reflect" + +type TreeShapeListener struct { + *parser.BaseListener +} + +func NewTreeShapeListener() *TreeShapeListener { + return new(TreeShapeListener) +} + +func (this *TreeShapeListener) EnterEveryRule(ctx antlr.ParserRuleContext) { + for i := 0; i\ + +func main() { + + antlr.ParserATNSimulatorTraceATNSim = true + + input, err := antlr.NewFileStream(os.Args[1]) + if err != nil { + fmt.Printf("Failed to find file: %v", err) + return + } + lexer := parser.New(input) + stream := antlr.NewCommonTokenStream(lexer,0) + + p := parser.New(stream) + p.Interpreter.SetPredictionMode(antlr.PredictionMode) + + p.BuildParseTrees = false + + + p.AddErrorListener(antlr.NewDiagnosticErrorListener(true)) + + tree := p.() + antlr.ParseTreeWalkerDefault.Walk(NewTreeShapeListener(), tree) + + stream.Fill() + for _, t := range stream.GetAllTokens() { + fmt.Println(t) + } + + fmt.Print(lexer.GetInterpreter().DecisionToDFA()[antlr.LexerDefaultMode].ToLexerString()) + + +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.js.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.js.stg new file mode 100644 index 0000000000..4e19bf27f7 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.js.stg @@ -0,0 +1,62 @@ +import antlr4 from 'file:///src/antlr4/index.node.js' +import from './.js'; + +import from './.js'; + +import Listener from './Listener.js'; + + +import Visitor from './Visitor.js'; + + +class TreeShapeListener extends antlr4.tree.ParseTreeListener { + enterEveryRule(ctx) { + for (let i = 0; i \< ctx.getChildCount; i++) { + const child = ctx.getChild(i) + const parent = child.parentCtx + if (parent.getRuleContext() !== ctx || !(parent instanceof antlr4.tree.RuleNode)) { + throw `Invalid parse tree shape detected.` + } + } + } +} + + +function main(argv) { + var input = new antlr4.FileStream(argv[2], "utf-8", true); + var lexer = new (input); + var stream = new antlr4.CommonTokenStream(lexer); + + var parser = new (stream); + + parser.addErrorListener(new antlr4.error.DiagnosticErrorListener()); + + const printer = function() { + this.println = function(s) { console.log(s); } + this.print = function(s) { process.stdout.write(s); } + return this; + }; + parser.printer = new printer(); + parser._interp.predictionMode = antlr4.atn.PredictionMode.; + + parser.buildParseTrees = false; + + + parser._interp.trace_atn_sim = true; + antlr4.context.PredictionContext.trace_atn_sim = true; + + var tree = parser.(); + antlr4.tree.ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree); + + stream.fill(); + for(var i=0; i\ + process.stdout.write(lexer._interp.decisionToDFA[antlr4.Lexer.DEFAULT_MODE].toLexerString()); + + +} + +main(process.argv); + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.php.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.php.stg new file mode 100644 index 0000000000..12a50eb58e --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.php.stg @@ -0,0 +1,78 @@ +\ +final class TreeShapeListener implements ParseTreeListener { + public function visitTerminal(TerminalNode $node) : void {} + public function visitErrorNode(ErrorNode $node) : void {} + public function exitEveryRule(ParserRuleContext $ctx) : void {} + + public function enterEveryRule(ParserRuleContext $ctx) : void { + for ($i = 0, $count = $ctx->getChildCount(); $i \< $count; $i++) { + $parent = $ctx->getChild($i)->getParent(); + + if (!($parent instanceof RuleNode) || $parent->getRuleContext() !== $ctx) { + throw new RuntimeException('Invalid parse tree shape detected.'); + } + } + } +} + + +$input = InputStream::fromPath($argv[1]); +$lexer = new ($input); +$lexer->addErrorListener(new ConsoleErrorListener()); +$tokens = new CommonTokenStream($lexer); + +$parser = new ($tokens); +$parser->getInterpreter()->setPredictionMode(PredictionMode::); + +$parser->setBuildParseTree(false); + + +$parser->addErrorListener(new DiagnosticErrorListener()); + +$parser->addErrorListener(new ConsoleErrorListener()); + +ParserATNSimulator::$traceAtnSimulation = true; + +$tree = $parser->(); + +ParseTreeWalker::default()->walk(new TreeShapeListener(), $tree); + +$tokens->fill(); + +foreach ($tokens->getAllTokens() as $token) { + echo $token . \PHP_EOL; +} + +echo $lexer->getInterpreter()->getDFA(Lexer::DEFAULT_MODE)->toLexerString(); + + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.py.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.py.stg new file mode 100644 index 0000000000..df2c62ec67 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.py.stg @@ -0,0 +1,58 @@ +from __future__ import print_function +import sys +import codecs +from antlr4 import * +from import + +from import +from Listener import Listener +from Visitor import Visitor + +class TreeShapeListener(ParseTreeListener): + + def visitTerminal(self, node:TerminalNode): + pass + + def visitErrorNode(self, node:ErrorNode): + pass + + def exitEveryRule(self, ctx:ParserRuleContext): + pass + + def enterEveryRule(self, ctx:ParserRuleContext): + for child in ctx.getChildren(): + parent = child.parentCtx + if not isinstance(parent, RuleNode) or parent.getRuleContext() != ctx: + raise IllegalStateException("Invalid parse tree shape detected.") + + +def main(argv): + + ParserATNSimulator.trace_atn_sim = True + PredictionContext._trace_atn_sim = True + + input = FileStream(argv[1], encoding='utf-8', errors='replace') + lexer = (input) + stream = CommonTokenStream(lexer) + + parser = (stream) + parser._interp.predictionMode = PredictionMode. + + parser.buildParseTrees = False + + + parser.addErrorListener(DiagnosticErrorListener()) + + tree = parser.() + ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree) + + stream.fill() + [ print(tunicode(t)) for t in stream.tokens ] + + print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='') + + + +if __name__ == '__main__': + main(sys.argv) + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.ts.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.ts.stg new file mode 100644 index 0000000000..cb708e3129 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.ts.stg @@ -0,0 +1,67 @@ +import { + FileStream, + CommonTokenStream, + DiagnosticErrorListener, + Lexer, + ParseTreeListener, + ParseTreeWalker, + RuleContext, + ParserRuleContext, + RuleNode, + PredictionMode +} from 'antlr4'; +import from './.js'; + +import from './.js'; + +import Listener from './Listener.js'; + + +import Visitor from './Visitor.js'; + + +class TreeShapeListener extends ParseTreeListener { + enterEveryRule(ctx: ParserRuleContext) { + for (let i = 0; i \< ctx.getChildCount(); i++) { + const child = ctx.getChild(i) as RuleContext; + const parent = child.parentCtx; + if (parent!.ruleContext !== ctx || !(parent instanceof RuleNode)) { + throw `Invalid parse tree shape detected.`; + } + } + } +} + + +function main(argv: string[]): void { + const input = new FileStream(argv[2], "utf-8", true); + const lexer = new (input); + const stream = new CommonTokenStream(lexer); + + const parser = new (stream); + + parser.addErrorListener(new DiagnosticErrorListener()); + + parser.printer = { + println : function(s: string) { console.log(s); }, + print : function(s: string) { process.stdout.write(s); } + }; + parser._interp.predictionMode = PredictionMode.; + + parser.buildParseTrees = false; + + const tree = parser.(); + ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree); + + stream.fill(); + for(let i=0; i\ + process.stdout.write(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString()); + + +} + +main(process.argv); + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg new file mode 100644 index 0000000000..aefd731b6d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg @@ -0,0 +1,85 @@ +\ +\ + \ + \ + \Release\ + \x64\ + \ + \ + \ + \16.0\ + \Win32Proj\ + \{f3708606-c8fb-45ca-ae36-b729f91e972b}\ + \Test\ + \10.0\ + \ + \ + + \ + \Application\ + \false\ + \v143\ + \false\ \ + \Unicode\ + \ + + \ + \ + \ + \ + + \ + \false\ + \;$(IncludePath)\ + \$(VC_ReferencesPath_x64);\ + \$(SolutionDir)\ + \ + + \ + \ + \TurnOffAllWarnings\ + \false\ + \false\ + \false\ + \NDEBUG;_CONSOLE;%(PreprocessorDefinitions)\ + \true\ + \stdcpp17\ + \None\ + \ + \ + \Console\ + \true\ + \false\ + \false\ + \"\\antlr4-runtime.lib";%(AdditionalDependencies)\ + \ + \ + + \ + \ + \ + + + \ + \ + + + + \ + \ + \ + \ + + + + \ + \ + \ + \ + + + \ + \ + + \ +\ diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/main.swift.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/main.swift.stg new file mode 100644 index 0000000000..5a7de3bae6 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/main.swift.stg @@ -0,0 +1,50 @@ +import Antlr4 +import Foundation + + +class TreeShapeListener: ParseTreeListener{ + func visitTerminal(_ node: TerminalNode){ } + func visitErrorNode(_ node: ErrorNode){ } + func enterEveryRule(_ ctx: ParserRuleContext) throws { } + func exitEveryRule(_ ctx: ParserRuleContext) throws { + for i in 0..\ + +let args = CommandLine.arguments +let input = try ANTLRFileStream(args[1]) +let lex = (input) +let tokens = CommonTokenStream(lex) + +let parser = try (tokens) + +parser.addErrorListener(DiagnosticErrorListener()) + + +let profiler = ProfilingATNSimulator(parser) +parser.setInterpreter(profiler) + +parser.getInterpreter().setPredictionMode(PredictionMode.) + +parser.setBuildParseTree(false) + +let tree = try parser.() + +print(profiler.getDecisionInfo().description) + +try ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree) + +try tokens.fill() +for t in tokens.getTokens() { + print(t) +} + +print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString(), terminator: "") + + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package_js.json b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package_js.json new file mode 100644 index 0000000000..1632c2c4df --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package_js.json @@ -0,0 +1 @@ +{"type": "module"} \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package_ts.json b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package_ts.json new file mode 100644 index 0000000000..3a0b887dc1 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package_ts.json @@ -0,0 +1,9 @@ +{ + "type": "module", + "devDependencies": { + "@types/node": "^18.0.5" + }, + "dependencies": { + "antlr4": "^4.13.2" + } +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg new file mode 100644 index 0000000000..16e3c769e5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg @@ -0,0 +1,6 @@ +name: "test" +dependencies: + antlr4: + path: +environment: + sdk: ">=2.12.0 \<3.0.0" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/src/main.rs.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/src/main.rs.stg new file mode 100644 index 0000000000..b9fff4f35b --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/src/main.rs.stg @@ -0,0 +1,95 @@ +use std::env; +use antlr4rust::parser::ParserNodeType; +use antlr4rust::{InputStream, Lexer, Parser}; +use antlr4rust::common_token_stream::CommonTokenStream; +use antlr4rust::error_listener::DiagnosticErrorListener; +use antlr4rust::int_stream::IntStream; +use antlr4rust::lexer::LEXER_DEFAULT_MODE; +use antlr4rust::PredictionMode; +use antlr4rust::token_stream::TokenStream; +use antlr4rust::tree::{IsError, LeafNode, NoError, ParseTreeListener, ParseTreeWalker, TerminalNode}; +mod ; +use ::; + +mod ; +use ::; +use ::ParserContextType; +use antlr4rust::errors::ANTLRError; + + +mod baselistener; +use baselistener::BaseListener; +mod listener; +use listener::Listener; + + +mod basevisitor; +use basevisitor::BaseVisitor; +mod visitor; +use visitor::Visitor; + + +fn main() { + let args: Vec\ = env::args().collect(); + let string = &args[1]; + let result = std::fs::read_to_string(string); + let data = result.unwrap(); + let input = InputStream::new(data.as_str()); + let lex = ::new(input); + let mut tokens = CommonTokenStream::new(lex); + + let mut parser = ::new(tokens); + + parser.add_error_listener(Box::new(DiagnosticErrorListener::new(true))); + + + let profiler = ProfilingATNSimulator::new(parser); + parser.setInterpreter(profiler); + + parser.get_interpreter().set_prediction_mode(PredictionMode::); + + parser.build_parse_trees = false; + + let tree = parser.(); + + println!('[${profiler.getDecisionInfo().join(', ')}]'); + + //ParseTreeWalker::walk(Box::new(TreeShapeListener::new()), &tree); + + //tokens.fill(); + for t in tokens.iter() { + } + for idx in 0..tokens.size() { + let mut x1 = tokens.get(idx); + println!("{}", x1); + } + + print!("{}", tokens.get_dfa_string()); + + +} + + +struct TreeShapeListener { +} + +impl\<'input> ParseTreeListener\<'_, ParserContextType> for TreeShapeListener { + fn visit_terminal(&mut self, _node: &LeafNode\<'_, ParserContextType, NoError>) {} + + fn visit_error_node(&mut self, _node: &LeafNode\<'_, ParserContextType, IsError>) {} + + fn enter_every_rule( + &mut self, + _ctx: &\<ParserContextType as ParserNodeType>::Type, + ) -> Result\<(), ANTLRError> { + Ok(()) + } + + fn exit_every_rule( + &mut self, + _ctx: &\<ParserContextType as ParserNodeType>::Type, + ) -> Result\<(), ANTLRError> { + Ok(()) + } +} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/tsconfig.json b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/tsconfig.json new file mode 100644 index 0000000000..6bacc95440 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/tsconfig.json @@ -0,0 +1,11 @@ +{ + "compilerOptions": { + "module": "ES2020", + "moduleResolution": "node", + "target": "ES6", + "noImplicitAny": true, + }, + "ts-node": { + "esm": true + } +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg index cd9270286c..8cf10d710a 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg @@ -18,7 +18,7 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -AssertIsList(v) ::= "System.Collections.IList __ttt__ = ;" // just use static type system +AssertIsList(v) ::= "System.Collections.IList __ttt__ = (System.Collections.IList);" // just use static type system AssignLocal(s,v) ::= " = ;" @@ -78,6 +78,8 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.GetExpectedTokens().ToString(this.Vocabulary)" +ImportRuleInvocationStack() ::= "" + RuleInvocationStack() ::= "GetRuleInvocationStackAsString()" LL_EXACT_AMBIG_DETECTION() ::= <> @@ -232,7 +234,7 @@ public class LeafListener : TBaseListener { } sb.Length = sb.Length - 2; sb.Append ("]"); - Output.Write ("{0} {1} {2}", ctx.INT (0).Symbol.Text, + Output.Write ("{0} {1} {2}\n", ctx.INT (0).Symbol.Text, ctx.INT (1).Symbol.Text, sb.ToString()); } else @@ -253,7 +255,7 @@ public class LeafListener : TBaseListener { public override void ExitA(TParser.AContext ctx) { if (ctx.ChildCount==2) { - Output.Write("{0} {1} {2}",ctx.b(0).Start.Text, + Output.Write("{0} {1} {2}\n",ctx.b(0).Start.Text, ctx.b(1).Start.Text,ctx.b()[0].Start.Text); } else Output.WriteLine(ctx.b(0).Start.Text); @@ -293,7 +295,7 @@ public class LeafListener : TBaseListener { } public override void ExitCall(TParser.CallContext ctx) { - Output.Write("{0} {1}",ctx.e().Start.Text,ctx.eList()); + Output.Write("{0} {1}\n",ctx.e().Start.Text,ctx.eList()); } public override void ExitInt(TParser.IntContext ctx) { Output.WriteLine(ctx.INT().Symbol.Text); @@ -323,5 +325,8 @@ Declare_pred() ::= <)>> ParserTokenType(t) ::= "Parser." ContextRuleFunction(ctx, rule) ::= "." +ContextListFunction(ctx, rule) ::= ".()" StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." +ContextMember(ctx, member) ::= "." +SubContextLocal(ctx, subctx, local) ::= ".." +SubContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg deleted file mode 100644 index f4a42e5a52..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg +++ /dev/null @@ -1,290 +0,0 @@ -writeln(s) ::= < + '\\n';>> - -write(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= <Listener = require('./Listener').Listener;>> - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << - -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - -TokenGetterListener(X) ::= << -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -RuleGetterListener(X) ::= << -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - - -LRListener(X) ::= << -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -LRWithLabelsListener(X) ::= << -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." -ParserPropertyCall(p, call) ::= "

    %> + +Result(r) ::= <%%> + +ParserPropertyMember() ::= << +@members { +bool Property() { + return true; +} +} +>> + +ParserPropertyCall(p, call) ::= "

    ." + +PositionAdjustingLexerDef() ::= << +class PositionAdjustingLexerATNSimulator extends LexerATNSimulator { + PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn, + List\ decisionToDFA, PredictionContextCache sharedContextCache) + : super(atn, decisionToDFA, sharedContextCache, recog: recog); + + void resetAcceptPosition(CharStream input, int index, int line, + int charPositionInLine) { + input.seek(index); + this.line = line; + this.charPositionInLine = charPositionInLine; + consume(input); + } +} +>> + +PositionAdjustingLexer() ::= << +@override +Token nextToken() { + if (!(super.interpreter is PositionAdjustingLexerATNSimulator)) { + interpreter = new PositionAdjustingLexerATNSimulator( + this, _ATN, _decisionToDFA, _sharedContextCache); + } + + return super.nextToken(); +} + +@override +Token emit() { + switch (type) { + case TOKEN_TOKENS: + handleAcceptPositionForKeyword("tokens"); + break; + + case TOKEN_LABEL: + handleAcceptPositionForIdentifier(); + break; + + default: + break; + } + + return super.emit(); +} + +bool handleAcceptPositionForIdentifier() { + String tokenText = text; + int identifierLength = 0; + while (identifierLength \< tokenText.length && + isIdentifierChar(tokenText[identifierLength])) { + identifierLength++; + } + + if (inputStream.index > tokenStartCharIndex + identifierLength) { + int offset = identifierLength - 1; + interpreter.resetAcceptPosition(inputStream, tokenStartCharIndex + offset, + tokenStartLine, tokenStartCharPositionInLine + offset); + return true; + } + + return false; +} + +bool handleAcceptPositionForKeyword(String keyword) { + if (inputStream.index > tokenStartCharIndex + keyword.length) { + int offset = keyword.length - 1; + interpreter.resetAcceptPosition(inputStream, tokenStartCharIndex + offset, + tokenStartLine, tokenStartCharPositionInLine + offset); + return true; + } + + return false; +} + +@override +PositionAdjustingLexerATNSimulator get interpreter { + return super.interpreter as PositionAdjustingLexerATNSimulator; +} + +static bool isIdentifierChar(String c) { + return isLetterOrDigit(c) || c == '_'; +} + +static const ZERO = 48; +static const LOWER_A = 97; +static const LOWER_Z = 122; +static const UPPER_A = 65; +static const UPPER_Z = 90; + +static bool isLetterOrDigit(String char) => isLetter(char) || isDigit(char); + +// Note: this is intentially ASCII only +static bool isLetter(String char) { + if (char == null) return false; + var cc = char.codeUnitAt(0); + return cc >= LOWER_A && cc \<= LOWER_Z || cc >= UPPER_A && cc \<= UPPER_Z; +} + +static bool isDigit(String char) { + if (char == null) return false; + var cc = char.codeUnitAt(0); + return cc >= ZERO && cc \< ZERO + 10; +} +>> + +BasicListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void visitTerminal(TerminalNode node) { + print(node.symbol.text); + } +} +} +>> + +WalkListener(s) ::= << +ParseTreeWalker walker = new ParseTreeWalker(); +walker.walk(new LeafListener(), ); +>> + +TreeNodeWithAltNumField(X) ::= << +@parser::definitions { +class MyRuleNode extends ParserRuleContext { + late int altNum; + + MyRuleNode(ParserRuleContext? parent, int? invokingStateNumber) + : super(parent, invokingStateNumber); + + @override int get altNumber { + return altNum; + } + + @override void set altNumber(int altNum) { + this.altNum = altNum; + } +} +} +>> + +TokenGetterListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitA(AContext ctx) { + if (ctx.childCount==2) + TEST_platformStdoutWrite("${ctx.INT(0)?.symbol.text} ${ctx.INT(1)?.symbol.text} ${ctx.INTs()}\n"); + else + print(ctx.ID()?.symbol); + } +} +} +>> + +RuleGetterListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitA(AContext ctx) { + if (ctx.childCount==2) { + TEST_platformStdoutWrite("${ctx.b(0)?.start?.text} ${ctx.b(1)?.start?.text} ${ctx.bs()[0].start?.text}\n"); + } else + print(ctx.b(0)?.start?.text); + } +} +} +>> + + +LRListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitE(EContext ctx) { + if (ctx.childCount==3) { + print("${ctx.e(0)?.start?.text} ${ctx.e(1)?.start?.text} ${ctx.es()[0].start?.text}"); + } else + print(ctx.INT()?.symbol.text); + } +} +} +>> + +LRWithLabelsListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitCall(CallContext ctx) { + TEST_platformStdoutWrite("${ctx.e()?.start?.text} ${ctx.eList()}\n"); + } + void exitInt(IntContext ctx) { + print(ctx.INT()?.symbol.text); + } +} +} +>> + +DeclareContextListGettersFunction() ::= << +void foo() { + SContext? s = null; + List\? a = s?.as(); + List\? b = s?.bs(); +} +>> + +Declare_foo() ::= << + void foo() {print("foo");} +>> + +Invoke_foo() ::= "foo();" + +Declare_pred() ::= <> + +Invoke_pred(v) ::= <)>> + +ParserTokenType(t) ::= "Parser." +ContextRuleFunction(ctx, rule) ::= "." +ContextListFunction(ctx, rule) ::= ".()" +StringType() ::= "String" +ContextMember(ctx, member) ::= ".!" +SubContextLocal(ctx, subctx, local) ::= ".!.!" +SubContextMember(ctx, subctx, member) ::= ".!.!" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg deleted file mode 100644 index cf2802edcd..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg +++ /dev/null @@ -1,300 +0,0 @@ -writeln(s) ::= < + '\\n';>> -write(s) ::= <;>> -writeList(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ParserTokenType(t) ::= "Parser." -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg deleted file mode 100644 index c19473f5e4..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg +++ /dev/null @@ -1,302 +0,0 @@ -writeln(s) ::= < + '\\n';>> -write(s) ::= <;>> -writeList(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -CheckVectorContext(s,v) ::= " = [].concat();" - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ParserTokenType(t) ::= "Parser." -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg index ec38c5604b..f629702ca8 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg @@ -18,7 +18,12 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -AssertIsList(v) ::= "" +AssertIsList(v) ::= << +// Go will not compile this generated code if the slice vs single value is wrong. +for i := range localctx.(*ExpressionContext).GetArgs() { + _ = localctx.(*ExpressionContext).GetArgs()[i] +} +>> AssignLocal(s, v) ::= " = ;" @@ -78,6 +83,8 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "p.GetExpectedTokens().StringVerbose(p.GetTokenNames(), nil, false)" +ImportRuleInvocationStack() ::= "" + RuleInvocationStack() ::= "antlr.PrintArrayJavaStyle(p.GetRuleInvocationStack(nil))" LL_EXACT_AMBIG_DETECTION() ::= <> @@ -103,7 +110,7 @@ PositionAdjustingLexerDef() ::= "" PositionAdjustingLexer() ::= << func (p *PositionAdjustingLexer) NextToken() antlr.Token { if _, ok := p.Interpreter.(*PositionAdjustingLexerATNSimulator); !ok { - p.Interpreter = NewPositionAdjustingLexerATNSimulator(p, lexerAtn, p.Interpreter.DecisionToDFA(), p.Interpreter.SharedContextCache()) + p.Interpreter = NewPositionAdjustingLexerATNSimulator(p, p.Interpreter.ATN(), p.Interpreter.DecisionToDFA(), p.Interpreter.SharedContextCache()) p.Virt = p } @@ -236,7 +243,7 @@ func NewLeafListener() *LeafListener { func (*LeafListener) ExitA(ctx *AContext) { if ctx.GetChildCount() == 2 { - fmt.Printf("%s %s %s", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT()))) + fmt.Printf("%s %s %s\n", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT()))) } else { fmt.Println(ctx.ID().GetSymbol()) } @@ -256,7 +263,7 @@ func NewLeafListener() *LeafListener { func (*LeafListener) ExitA(ctx *AContext) { if ctx.GetChildCount() == 2 { - fmt.Printf("%s %s %s", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText()) + fmt.Printf("%s %s %s\n", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText()) } else { fmt.Println(ctx.B(0).GetStart().GetText()) } @@ -295,7 +302,7 @@ func NewLeafListener() *LeafListener { } func (*LeafListener) ExitCall(ctx *CallContext) { - fmt.Printf("%s %s", ctx.E().GetStart().GetText(), ctx.EList().String(nil, nil)) + fmt.Printf("%s %s\n", ctx.E().GetStart().GetText(), ctx.EList().String(nil, nil)) } func (*LeafListener) ExitInt(ctx *IntContext) { @@ -331,5 +338,8 @@ func pred(v bool) bool { Invoke_pred(v) ::= <)>> ContextRuleFunction(ctx, rule) ::= "." +ContextListFunction(ctx, rule) ::= ".()" StringType() ::= "string" -ContextMember(ctx, subctx, member) ::= ".." +ContextMember(ctx, member) ::= "." +SubContextLocal(ctx, subctx, local) ::= ".." +SubContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg index 16607933f4..e81f75a197 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg @@ -1,6 +1,6 @@ -writeln(s) ::= <);>> -write(s) ::= <);>> -writeList(s) ::= <);>> +writeln(s) ::= <);>> +write(s) ::= <);>> +writeList(s) ::= <);>> False() ::= "false" @@ -44,7 +44,7 @@ ModMemberEquals(n,m,v) ::= <%this. % == %> ModMemberNotEquals(n,m,v) ::= <%this. % != %> -DumpDFA() ::= "this.dumpDFA();" +DumpDFA() ::= "this.dumpDFA(outStream);" Pass() ::= "" @@ -78,6 +78,8 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)" +ImportRuleInvocationStack() ::= "" + RuleInvocationStack() ::= "getRuleInvocationStack()" LL_EXACT_AMBIG_DETECTION() ::= <<_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);>> @@ -186,9 +188,9 @@ protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimula BasicListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void visitTerminal(TerminalNode node) { - System.out.println(node.getSymbol().getText()); + outStream.println(node.getSymbol().getText()); } } } @@ -214,13 +216,13 @@ public static class MyRuleNode extends ParserRuleContext { TokenGetterListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitA(TParser.AContext ctx) { if (ctx.getChildCount()==2) - System.out.printf("%s %s %s",ctx.INT(0).getSymbol().getText(), + outStream.printf("%s %s %s\n",ctx.INT(0).getSymbol().getText(), ctx.INT(1).getSymbol().getText(),ctx.INT()); else - System.out.println(ctx.ID().getSymbol()); + outStream.println(ctx.ID().getSymbol()); } } } @@ -228,13 +230,13 @@ public static class LeafListener extends TBaseListener { RuleGetterListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitA(TParser.AContext ctx) { if (ctx.getChildCount()==2) { - System.out.printf("%s %s %s",ctx.b(0).start.getText(), + outStream.printf("%s %s %s\n",ctx.b(0).start.getText(), ctx.b(1).start.getText(),ctx.b().get(0).start.getText()); } else - System.out.println(ctx.b(0).start.getText()); + outStream.println(ctx.b(0).start.getText()); } } } @@ -243,13 +245,13 @@ public static class LeafListener extends TBaseListener { LRListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitE(TParser.EContext ctx) { if (ctx.getChildCount()==3) { - System.out.printf("%s %s %s\n",ctx.e(0).start.getText(), + outStream.printf("%s %s %s\n",ctx.e(0).start.getText(), ctx.e(1).start.getText(), ctx.e().get(0).start.getText()); } else - System.out.println(ctx.INT().getSymbol().getText()); + outStream.println(ctx.INT().getSymbol().getText()); } } } @@ -257,12 +259,12 @@ public static class LeafListener extends TBaseListener { LRWithLabelsListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitCall(TParser.CallContext ctx) { - System.out.printf("%s %s",ctx.e().start.getText(),ctx.eList()); + outStream.printf("%s %s\n",ctx.e().start.getText(),ctx.eList()); } public void exitInt(TParser.IntContext ctx) { - System.out.println(ctx.INT().getSymbol().getText()); + outStream.println(ctx.INT().getSymbol().getText()); } } } @@ -277,13 +279,13 @@ void foo() { >> Declare_foo() ::= << - public void foo() {System.out.println("foo");} + public void foo() {outStream.println("foo");} >> Invoke_foo() ::= "foo();" Declare_pred() ::= <" ContextRuleFunction(ctx, rule) ::= "." +ContextListFunction(ctx, rule) ::= ".()" StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." +ContextMember(ctx, member) ::= "." +SubContextLocal(ctx, subctx, local) ::= ".." +SubContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/JavaScript.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/JavaScript.test.stg new file mode 100644 index 0000000000..56b48a2b12 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/JavaScript.test.stg @@ -0,0 +1,307 @@ +writeln(s) ::= < || '');>> +write(s) ::= < || '');>> +writeList(s) ::= <);>> + +False() ::= "false" + +True() ::= "true" + +Not(v) ::= "!" + +Assert(s) ::= <);>> + +Cast(t,v) ::= "" + +Append(a,b) ::= " + " + +AppendStr(a,b) ::= <%%> + +Concat(a,b) ::= "" + +AssertIsList(v) ::= < instanceof Array) ) {throw "value is not an array";}>> + +AssignLocal(s,v) ::= " = ;" + +InitIntMember(n,v) ::= <%this. = ;%> + +InitBooleanMember(n,v) ::= <%this. = ;%> + +InitIntVar(n,v) ::= <%%> + +IntArg(n) ::= "" + +VarRef(n) ::= "" + +GetMember(n) ::= <%this.%> + +SetMember(n,v) ::= <%this. = ;%> + +AddMember(n,v) ::= <%this. += ;%> + +MemberEquals(n,v) ::= <%this. === %> + +ModMemberEquals(n,m,v) ::= <%this. % === %> + +ModMemberNotEquals(n,m,v) ::= <%this. % != %> + +DumpDFA() ::= "this.dumpDFA();" + +Pass() ::= "" + +StringList() ::= "list" + +BuildParseTrees() ::= "this.buildParseTrees = true;" + +BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> + +ToStringTree(s) ::= <%.toStringTree(null, this)%> + +Column() ::= "this.column" + +Text() ::= "this.text" + +ValEquals(a,b) ::= <%===%> + +TextEquals(a) ::= <%this.text===""%> + +PlusText(a) ::= <%"" + this.text%> + +InputText() ::= "this._input.getText()" + +LTEquals(i, v) ::= <%this._input.LT().text===%> + +LANotEquals(i, v) ::= <%this._input.LA()!=%> + +TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> + +ImportListener(X) ::= "" + +GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" + +ImportRuleInvocationStack() ::= "" + +RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" + +LL_EXACT_AMBIG_DETECTION() ::= <> + +ParserToken(parser, token) ::= <%.%> + +Production(p) ::= <%

    %> + +Result(r) ::= <%%> + +ParserPropertyMember() ::= << +@members { +this.Property = function() { + return true; +} +} +>> + +ParserPropertyCall(p, call) ::= "

    ." + +PositionAdjustingLexerDef() ::= "" + +PositionAdjustingLexer() ::= << + +PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { + this._input.seek(index); + this.line = line; + this.column = column; + this._interp.consume(this._input); +}; + +PositionAdjustingLexer.prototype.nextToken = function() { + if (!("resetAcceptPosition" in this._interp)) { + var lexer = this; + this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; + } + return antlr4.Lexer.prototype.nextToken.call(this); +}; + +PositionAdjustingLexer.prototype.emit = function() { + switch(this._type) { + case PositionAdjustingLexer.TOKENS: + this.handleAcceptPositionForKeyword("tokens"); + break; + case PositionAdjustingLexer.LABEL: + this.handleAcceptPositionForIdentifier(); + break; + } + return antlr4.Lexer.prototype.emit.call(this); +}; + +PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { + var tokenText = this.text; + var identifierLength = 0; + while (identifierLength \< tokenText.length && + PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) + ) { + identifierLength += 1; + } + if (this._input.index > this._tokenStartCharIndex + identifierLength) { + var offset = identifierLength - 1; + this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, + this._tokenStartLine, this._tokenStartColumn + offset); + return true; + } else { + return false; + } +}; + +PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { + if (this._input.index > this._tokenStartCharIndex + keyword.length) { + var offset = keyword.length - 1; + this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, + this._tokenStartLine, this._tokenStartColumn + offset); + return true; + } else { + return false; + } +}; + +PositionAdjustingLexer.isIdentifierChar = function(c) { + return c.match(/^[0-9a-zA-Z_]+$/); +} + +>> + +BasicListener(X) ::= << +@parser::members { +this.LeafListener = function() { + this.visitTerminal = function(node) { + console.log(node.symbol.text); + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; +} +>> + +WalkListener(s) ::= << +var walker = new antlr4.tree.ParseTreeWalker(); +walker.walk(new this.LeafListener(), ); +>> + +TreeNodeWithAltNumField(X) ::= << +@parser::header { +class MyRuleNode extends antlr4.ParserRuleContext { + constructor(parent, invokingState) { + super(parent, invokingState); + this.altNum = 0; + } + + getAltNumber() { + return this.altNum; + } + + setAltNumber(altNumber){ + this.altNum = altNumber; + } +}; + +} +>> + +TokenGetterListener(X) ::= << +@parser::members { +this.LeafListener = function() { + this.exitA = function(ctx) { + var str; + if(ctx.getChildCount()===2) { + str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); + } else { + str = ctx.ID().symbol.toString(); + } + console.log(str); + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; +} +>> + +RuleGetterListener(X) ::= << +@parser::members { +this.LeafListener = function() { + this.exitA = function(ctx) { + var str; + if(ctx.getChildCount()===2) { + str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; + } else { + str = ctx.b(0).start.text; + } + console.log(str); + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; +} +>> + +LRListener(X) ::= << +@parser::members { +this.LeafListener = function() { + this.exitE = function(ctx) { + var str; + if(ctx.getChildCount()===3) { + str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; + } else { + str = ctx.INT().symbol.text; + } + console.log(str); + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; +} +>> + +LRWithLabelsListener(X) ::= << +@parser::members { +this.LeafListener = function() { + this.exitCall = function(ctx) { + var str = ctx.e().start.text + ' ' + ctx.eList(); + console.log(str); + }; + this.exitInt = function(ctx) { + var str = ctx.INT().symbol.text; + console.log(str); + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; +} +>> + +DeclareContextListGettersFunction() ::= << + function foo() { + var s = new SContext(); + var a = s.a(); + var b = s.b(); + }; +>> + +Declare_foo() ::= "this.foo = function() {console.log('foo');};" + +Invoke_foo() ::= "this.foo();" + +Declare_pred() ::= <" +ContextRuleFunction(ctx, rule) ::= "." +ContextListFunction(ctx, rule) ::= "._list()" +StringType() ::= "String" +ContextMember(ctx, member) ::= "." +SubContextLocal(ctx, subctx, local) ::= ".." +SubContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg deleted file mode 100644 index 926bd6fe13..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg +++ /dev/null @@ -1,303 +0,0 @@ -writeln(s) ::= <);>> -write(s) ::= <);>> -writeList(s) ::= <);>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= <);>> - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - console.log(node.symbol.text); - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -MyRuleNode.prototype.getAltNumber = function() { return this.altNum; } -MyRuleNode.prototype.setAltNumber = function(altNumber) { this.altNum = altNumber; } - -} ->> - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - console.log(str); - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - console.log(str); - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - console.log(str); - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - console.log(str); - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - console.log(str); - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {console.log('foo');};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <" -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/PHP.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/PHP.test.stg index a2ea874663..1889e09951 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/PHP.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/PHP.test.stg @@ -52,6 +52,8 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "\$this->getExpectedTokens()->toStringVocabulary(\$this->getVocabulary())" +ImportRuleInvocationStack() ::= "" + RuleInvocationStack() ::= "'[' . \implode(', ', \$this->getRuleInvocationStack()) . ']'" LL_EXACT_AMBIG_DETECTION() ::= <<\$this->interp->setPredictionMode(Antlr\\Antlr4\\Runtime\\Atn\\PredictionMode::LL_EXACT_AMBIG_DETECTION);>> @@ -268,5 +270,8 @@ Invoke_pred(v) ::= "\$this->pred()" ParserTokenType(t) ::= "Parser::" ContextRuleFunction(ctx, rule) ::= "->" +ContextListFunction(ctx, rule) ::= "->()" StringType() ::= "" -ContextMember(ctx, subctx, member) ::= "->->" +ContextMember(ctx, member) ::= "->" +SubContextLocal(ctx, subctx, local) ::= "->->" +SubContextMember(ctx, subctx, member) ::= "->->" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg deleted file mode 100644 index 2292cafe7b..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg +++ /dev/null @@ -1,281 +0,0 @@ -writeln(s) ::= <, file=self._output)>> -write(s) ::= <,end='', file=self._output)>> -writeList(s) ::= <)}; separator="+">, file=self._output)>> - -False() ::= "False" - -True() ::= "True" - -Not(v) ::= "not " - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + str()" - -AppendStr(a,b) ::= " + " - -Concat(a,b) ::= "" - -AssertIsList(v) ::= "assert isinstance(v, (list, tuple))" - -AssignLocal(s,v) ::= " = " - -InitIntMember(n,v) ::= <% = %> - -InitBooleanMember(n,v) ::= <% = %> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%self.%> - -SetMember(n,v) ::= <%self. = %> - -AddMember(n,v) ::= <%self. += %> - -MemberEquals(n,v) ::= <%self. == %> - -ModMemberEquals(n,m,v) ::= <%self. % == %> - -ModMemberNotEquals(n,m,v) ::= <%self. % != %> - -DumpDFA() ::= "self.dumpDFA()" - -Pass() ::= "pass" - -StringList() ::= "" - -BuildParseTrees() ::= "self._buildParseTrees = True" - -BailErrorStrategy() ::= <%self._errHandler = BailErrorStrategy()%> - -ToStringTree(s) ::= <%.toStringTree(recog=self)%> - -Column() ::= "self.column" - -Text() ::= "self.text" - -ValEquals(a,b) ::= <%==%> - -TextEquals(a) ::= <%self.text==""%> - -PlusText(a) ::= <%"" + self.text%> - -InputText() ::= "self._input.getText()" - -LTEquals(i, v) ::= <%self._input.LT().text==%> - -LANotEquals(i, v) ::= <%self._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%self._tokenStartColumn==%> - -ImportListener(X) ::= "" - -GetExpectedTokenNames() ::= "self.getExpectedTokens().toString(self.literalNames, self.symbolicNames)" - -RuleInvocationStack() ::= "str_list(self.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -def Property(self): - return True - -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -def resetAcceptPosition(self, index, line, column): - self._input.seek(index) - self.line = line - self.column = column - self._interp.consume(self._input) - -def nextToken(self): - if self._interp.__dict__.get("resetAcceptPosition", None) is None: - self._interp.__dict__["resetAcceptPosition"] = self.resetAcceptPosition - return super(type(self),self).nextToken() - -def emit(self): - if self._type==PositionAdjustingLexer.TOKENS: - self.handleAcceptPositionForKeyword("tokens") - elif self._type==PositionAdjustingLexer.LABEL: - self.handleAcceptPositionForIdentifier() - return super(type(self),self).emit() - -def handleAcceptPositionForIdentifier(self): - tokenText = self.text - identifierLength = 0 - while identifierLength \< len(tokenText) and self.isIdentifierChar(tokenText[identifierLength]): - identifierLength += 1 - - if self._input.index > self._tokenStartCharIndex + identifierLength: - offset = identifierLength - 1 - self._interp.resetAcceptPosition(self._tokenStartCharIndex + offset, - self._tokenStartLine, self._tokenStartColumn + offset) - return True - else: - return False - - -def handleAcceptPositionForKeyword(self, keyword): - if self._input.index > self._tokenStartCharIndex + len(keyword): - offset = len(keyword) - 1 - self._interp.resetAcceptPosition(self._tokenStartCharIndex + offset, - self._tokenStartLine, self._tokenStartColumn + offset) - return True - else: - return False - -@staticmethod -def isIdentifierChar(c): - return c.isalnum() or c == '_' - ->> - -BasicListener(X) ::= << -@parser::members { -if __name__ is not None and "." in __name__: - from .Listener import Listener -else: - from Listener import Listener - -class LeafListener(TListener): - def __init__(self, output): - self._output = output - def visitTerminal(self, node): - print(node.symbol.text, file=self._output) -} ->> - -WalkListener(s) ::= << -walker = ParseTreeWalker() -walker.walk(TParser.LeafListener(self._output), ) ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::members { -class MyRuleNode(ParserRuleContext): - def __init__(self, parent = None, invokingStateNumber = None ): - super(Parser.MyRuleNode, self).__init__(parent, invokingStateNumber) - self.altNum = 0; - def getAltNumber(self): - return self.altNum - def setAltNumber(self, altNum): - self.altNum = altNum -} ->> - -TokenGetterListener(X) ::= << -@parser::members { -if __name__ is not None and "." in __name__: - from .Listener import Listener -else: - from Listener import Listener - -class LeafListener(TListener): - def __init__(self, output): - self._output = output - def exitA(self, ctx): - if ctx.getChildCount()==2: - print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()), file=self._output) - else: - print(str(ctx.ID().symbol), file=self._output) -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -if __name__ is not None and "." in __name__: - from .Listener import Listener -else: - from Listener import Listener - -class LeafListener(TListener): - def __init__(self, output): - self._output = output - def exitA(self, ctx): - if ctx.getChildCount()==2: - print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text, file=self._output) - else: - print(ctx.b(0).start.text, file=self._output) -} ->> - - -LRListener(X) ::= << -@parser::members { -if __name__ is not None and "." in __name__: - from .Listener import Listener -else: - from Listener import Listener - -class LeafListener(TListener): - def __init__(self, output): - self._output = output - def exitE(self, ctx): - if ctx.getChildCount()==3: - print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text, file=self._output) - else: - print(ctx.INT().symbol.text, file=self._output) -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -if __name__ is not None and "." in __name__: - from .Listener import Listener -else: - from Listener import Listener - -class LeafListener(TListener): - def __init__(self, output): - self._output = output - def exitCall(self, ctx): - print(ctx.e().start.text + ' ' + str(ctx.eList()), file=self._output) - def exitInt(self, ctx): - print(ctx.INT().symbol.text, file=self._output) -} ->> - -DeclareContextListGettersFunction() ::= << -def foo(): - s = SContext() - a = s.a() - b = s.b() ->> - -Declare_foo() ::= <> - -Invoke_foo() ::= "self.foo()" - -Declare_pred() ::= <> +Append(a,b) ::= "format!(\"{}{}\",, .unwrap())" +AppendStr(a,b) ::= <%.to_string() + &()%> +Concat(a,b) ::= "" + +AssertIsList(v) ::= "" //not sure if possible in Rust +AssignLocal(s,v) ::= " = ;" + +InitIntVar(n,v) ::= "let = ;" +InitIntMember(n,v) ::= <<} +@parser::fields {: isize,} +@parser::init {: , +>> +InitBooleanMember(n,v) ::= <<} +@parser::fields {: bool,} +@parser::init {: , +>> + +IntArg(n) ::= "int " +VarRef(v) ::= "" + +GetMember(n) ::= "recog." +SetMember(n,v) ::= "recog. = ;" +AddMember(n,v) ::= "recog. += ;" +PlusMember(n,v) ::= "recog. + " +MemberEquals(n,v) ::= "recog. == " +ModMemberEquals(n,m,v) ::= "recog. % == " +ModMemberNotEquals(n,m,v) ::= "recog. % != " + +DumpDFA() ::= "recog.dump_dfa();" +Pass() ::= "/* do nothing */" + +StringList() ::= "Vec\" +BuildParseTrees() ::= "recog.build_parse_trees = true;" +BailErrorStrategy() ::= +<> + +ToStringTree(s) ::= ".to_string_tree(&recog.base)" +Column() ::= "recog.get_char_position_in_line()" +Text() ::= "recog.get_text()" +ValEquals(a,b) ::= " == " +TextEquals(a) ::= "recog.get_text() == \"\"" +PlusText(a) ::="\"\".to_owned() + &recog.get_text()" +InputText() ::= "recog.base.input.get_all_text()" +LTEquals(i, v) ::= "recog.input.lt().unwrap().get_text() == " +LANotEquals(i, v) ::= "recog.input.la() != " +TokenStartColumnEquals(i) ::= "recog.token_start_column == " + +ImportListener(X) ::= "" + +GetExpectedTokenNames() ::= "recog.base.get_expected_tokens().to_token_string(recog.get_vocabulary())" + +ImportRuleInvocationStack() ::= "" + +RuleInvocationStack() ::= "format!(\"[{}]\",recog.get_rule_invocation_stack().join(\", \"))" + +LL_EXACT_AMBIG_DETECTION() ::= <> + +//ParserToken(parser, token) ::= <%.%> +ParserToken(parser, token) ::= <%_%> + +Production(p) ::= <%

    %> + +Result(r) ::= <%%> + +ParserPropertyMember() ::= << +@members { +fn Property(&self) -> bool { + return true; +} +} +>> + +ParserPropertyCall(p, call) ::= "recog." + +// unsupported +TreeNodeWithAltNumField(X) ::= << +@parser::definitions { +struct MyRuleNode\(BaseParserRuleContext\ >); +struct MyRuleNodeCtx\(T,std::cell::Cell\); +use core::fmt::{Formatter,Debug}; +impl\ Debug for MyRuleNode\{ + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + f.write_str(std::any::type_name::\()) + } +} + +impl\ CustomRuleContext for MyRuleNodeCtx\{ + fn get_alt_number(&self) -> isize {self.1.get()} + fn set_alt_number(&self, _alt_number: isize) {self.1.set(_alt_number)} +} + + +impl\ Deref for MyRuleNode\{ + type Target = dyn ParserRuleContext; + fn deref(&self) -> &Self::Target {&self.0} +} +impl\ DerefMut for MyRuleNode\{ + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} +impl\ antlr4rust::parser_rule_context::DerefSeal for MyRuleNode\{} +} +>> + +PositionAdjustingLexer() ::= << +>> + +PositionAdjustingLexerDef() ::= << +//@definitions + fn handleAcceptPositionForIdentifier\<'input, Input:CharStream\\>>(this: &mut BaseLexer\<'input,PositionAdjustingLexerActions,Input,LocalTokenFactory\<'input> >) { + let tokenText = this.get_text(); + let identifierLength = tokenText.chars().position(|it| !isIdentifierChar(it)).unwrap() as isize; +// while (identifierLength \< tokenText.len() as i32 && isIdentifierChar(tokenText[identifierLength as usize])) { +// identifierLength+=1; +// } + + if (this.get_input_stream().unwrap().index() > this.token_start_char_index + identifierLength) { + let offset = identifierLength - 1; + let mut input = this.input.take().unwrap(); + this.interpreter.as_mut().unwrap().resetAcceptPosition(&mut input, + this.token_start_char_index + offset, this.token_start_line, this.token_start_column + offset); + this.input = Some(input); + //return; + }; + + } + + fn handleAcceptPositionForKeyword\<'input, Input:CharStream\\>>(this: &mut BaseLexer\<'input,PositionAdjustingLexerActions,Input,LocalTokenFactory\<'input> >, keyword: &str) { + let mut input = this.input.take().unwrap(); + if (input.index() > this.token_start_char_index + keyword.len() as isize) { + let offset = keyword.len() as isize - 1; + this.interpreter.as_mut().unwrap().resetAcceptPosition(&mut input, + this.token_start_char_index + offset, this.token_start_line, this.token_start_column + offset); + //return true; + } + this.input = Some(input); + + //return false; + } + + fn isIdentifierChar(c: char) -> bool{ + return c.is_ascii_alphanumeric() || c == '_'; + } + +trait PositionAdjustingLexerATNSim { + fn resetAcceptPosition(&mut self, input: &mut dyn IntStream, index: isize, line: isize, charPositionInLine: isize); + } +impl PositionAdjustingLexerATNSim for antlr4rust::lexer_atn_simulator::LexerATNSimulator{ + fn resetAcceptPosition(&mut self, input: &mut dyn IntStream, index: isize, line: isize, charPositionInLine: isize) { + input.seek(index); + self.set_line(line); + self.set_char_position_in_line(charPositionInLine); + self.consume(input); + } +} + +} +@extend{ + + fn before_emit(lexer:&mut BaseLexer\<'input,PositionAdjustingLexerActions,Input,LocalTokenFactory\<'input> >) { + match (lexer.token_type) { + TOKENS => handleAcceptPositionForKeyword(lexer,"tokens"), + LABEL => handleAcceptPositionForIdentifier(lexer), + _ => {} + } + } + +>> + + +BasicListener(X) ::= << +@parser::definitions { +struct TestListener; + +impl\<'a> ParseTreeListener\<'a,TParserContextType> for TestListener{ + fn visit_terminal(&'_ mut self, node: &TerminalNode\<'a,TParserContextType> ) { + println!("{}",node.symbol.get_text()); + } +} +impl\<'a> TListener\<'a> for TestListener{} +} +>> + +WalkListener(s) ::= << +TTreeWalker::walk(Box::new(TestListener), .deref()); +>> + +TokenGetterListener(X) ::= << +@parser::definitions { +use antlr4rust::tree::Tree; +struct TestListener; + +impl TListener\<'_> for TestListener{ + fn exit_a(&mut self, node: &AContext) { + if node.get_child_count() == 2 { + println!("{} {} {:?}",node.INT(0).unwrap().symbol.get_text(),node.INT(1).unwrap().symbol.get_text(),node.INT_all()) + } else{ + println!("{}",node.ID().unwrap().symbol); + } + } +} +impl ParseTreeListener\<'_,TParserContextType> for TestListener{} +} +>> + +RuleGetterListener(X) ::= << +@parser::definitions { + +use antlr4rust::tree::Tree; +struct TestListener; + +impl TListener\<'_> for TestListener{ + fn exit_a(&mut self, node: &AContext) { + if node.get_child_count() == 2 { + println!("{} {} {}", + node.b(0).unwrap().start().get_text(), + node.b(1).unwrap().start().get_text(), + node.b_all()[0].start().get_text(), + ) + } else{ + println!("{}",node.b(0).unwrap().start().get_text()); + } + } +} +impl ParseTreeListener\<'_,TParserContextType> for TestListener{} +} +>> + + +LRListener(X) ::= << +@parser::definitions { +use antlr4rust::tree::Tree; +struct TestListener; + +impl ParseTreeListener\<'_,TParserContextType> for TestListener{} +impl TListener\<'_> for TestListener{ + fn exit_e(&mut self, ctx: &EContext) { + if ctx.get_child_count() == 3 { + println!("{} {} {}",ctx.e(0).unwrap().start().get_text(),ctx.e(1).unwrap().start().get_text(),ctx.e_all()[0].start().get_text()); + } else { + println!("{}",ctx.INT().unwrap().symbol.get_text()); + } + } +} +} +>> + +LRWithLabelsListener(X) ::= << +@parser::definitions { + +use antlr4rust::tree::Tree; +use antlr4rust::parser_rule_context::RuleContextExt; +struct TestListener; + +impl ParseTreeListener\<'_,TParserContextType> for TestListener{} +impl TListener\<'_> for TestListener{ + fn exit_Call(&mut self, ctx: &CallContext) { + println!("{} {}",ctx.e().unwrap().start().get_text(),ctx.eList().unwrap().to_string(None,None)); + } + fn exit_Int(&mut self, ctx: &IntContext){ + println!("{}",ctx.INT().unwrap().symbol.get_text()); + } +} +} +>> + +DeclareContextListGettersFunction() ::= << +fn foo() { + let s:SContext = unimplemented!(); + let a:Vec\ > = s.a_all(); + let b:Vec\ > = s.b_all(); +} +>> + +Declare_foo() ::= <> + +Invoke_foo() ::= "recog.foo();" + +Declare_pred() ::= << +pub fn pred(&self,v:bool)-> bool { + println!("eval={}",v); + return v; +} +>> + +Invoke_pred(v) ::= <)>> + +ContextRuleFunction(ctx, rule) ::= "..as_ref().unwrap()" +StringType() ::= "String" +SubContextLocal(ctx, subctx, local) ::= "..unwrap().get_()" +ContextMember(ctx, member) ::= "..as_ref().unwrap()" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg deleted file mode 100644 index e84ff373d4..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg +++ /dev/null @@ -1,301 +0,0 @@ -writeln(s) ::= < + '\\n';>> -write(s) ::= <;>> -writeList(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ParserTokenType(t) ::= "Parser." -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg index c2c12b63c2..af0606d841 100755 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg @@ -78,6 +78,8 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.getVocabulary())" +ImportRuleInvocationStack() ::= "" + RuleInvocationStack() ::= "getRuleInvocationStack().description.replacingOccurrences(of: \"\\\"\", with: \"\")" LL_EXACT_AMBIG_DETECTION() ::= <<_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);>> @@ -299,9 +301,9 @@ func pred(_ v: Bool) -> Bool { >> StringType() ::= "String" - Invoke_pred(v) ::= <)>> - ContextRuleFunction(ctx, rule) ::= "." - -ContextMember(ctx, subctx, member) ::= ".!." +ContextListFunction(ctx, rule) ::= ".()" +ContextMember(ctx, member) ::= "!." +SubContextLocal(ctx, subctx, local) ::= ".!." +SubContextMember(ctx, subctx, member) ::= ".!." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/TypeScript.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/TypeScript.test.stg new file mode 100644 index 0000000000..4df39f55e9 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/TypeScript.test.stg @@ -0,0 +1,312 @@ +writeln(s) ::= <);>> +write(s) ::= <);>> +writeList(s) ::= <);>> + +False() ::= "false" + +True() ::= "true" + +Not(v) ::= "!" + +Assert(s) ::= <);>> + +Cast(t,v) ::= "( as )" + +Append(a,b) ::= " + " + +AppendStr(a,b) ::= <%%> + +Concat(a,b) ::= "" + +AssertIsList(v) ::= <%if ( !( instanceof Array) ) {throw "value is not an array";}%> + +AssignLocal(s,v) ::= " = ;" + +InitIntMember(n,v) ::= <% : number = ;%> + +InitBooleanMember(n,v) ::= <% : boolean = ;%> + +InitIntVar(n,v) ::= <%let : number = ;%> + +IntArg(n) ::= ": number" + +VarRef(n) ::= "" + +GetMember(n) ::= <%this.%> + +SetMember(n,v) ::= <%this. = ;%> + +AddMember(n,v) ::= <%this. += ;%> + +MemberEquals(n,v) ::= <%this. === %> + +ModMemberEquals(n,m,v) ::= <%this. % === %> + +ModMemberNotEquals(n,m,v) ::= <%this. % != %> + +DumpDFA() ::= "this.dumpDFA();" + +Pass() ::= "" + +StringList() ::= "string[]" + +BuildParseTrees() ::= "this.buildParseTrees = true;" + +BailErrorStrategy() ::= <%this._errHandler = new BailErrorStrategy();%> + +ToStringTree(s) ::= <%.toStringTree(null, this)%> + +Column() ::= "this.column" + +Text() ::= "this.text" + +ValEquals(a,b) ::= <%===%> + +TextEquals(a) ::= <%this.text===""%> + +PlusText(a) ::= <%"" + this.text%> + +InputText() ::= "this._input.getText()" + +LTEquals(i, v) ::= <%this._input.LT().text===%> + +LANotEquals(i, v) ::= <%this._input.LA()!=%> + +TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> + +ImportListener(X) ::= "" + +GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" + +ImportRuleInvocationStack() ::= << +@parser::header { + import { arrayToString } from 'antlr4'; +}>> + +RuleInvocationStack() ::= "arrayToString(this.getRuleInvocationStack())" + +LL_EXACT_AMBIG_DETECTION() ::= <> + +ParserToken(parser, token) ::= <%.%> + +Production(p) ::= <%

    %> + +Result(r) ::= <%%> + +ParserPropertyMember() ::= << +@members { +public Property() { + return true; +} +} +>> + +ParserPropertyCall(p, call) ::= "

    ." + +PositionAdjustingLexerDef() ::= "" + +PositionAdjustingLexer() ::= << + +resetAcceptPosition (index: number, line: number, column: number) : void { + this._input.seek(index); + this.line = line; + this.column = column; + this._interp.consume(this._input); +} + +emit () : any /* should be Token */ { + switch(this._type) { + case PositionAdjustingLexer.TOKENS: + this.handleAcceptPositionForKeyword("tokens"); + break; + case PositionAdjustingLexer.LABEL: + this.handleAcceptPositionForIdentifier(); + break; + } + return super.emit(); +} + +handleAcceptPositionForIdentifier () : boolean { + var tokenText = this.text; + var identifierLength = 0; + while (identifierLength \< tokenText.length && PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) + ) { + identifierLength += 1; + } + if (this._input.index > this._tokenStartCharIndex + identifierLength) { + var offset = identifierLength - 1; + this.resetAcceptPosition(this._tokenStartCharIndex + offset, + this._tokenStartLine, this._tokenStartColumn + offset); + return true; + } else { + return false; + } +} + +handleAcceptPositionForKeyword (keyword: string) : boolean { + if (this._input.index > this._tokenStartCharIndex + keyword.length) { + var offset = keyword.length - 1; + this.resetAcceptPosition(this._tokenStartCharIndex + offset, + this._tokenStartLine, this._tokenStartColumn + offset); + return true; + } else { + return false; + } +} + +static isIdentifierChar (c: string) { + return c.match(/^[0-9a-zA-Z_]+$/); +} + +>> + +BasicListener(X) ::= << +@parser::header { +import { ParseTreeWalker } from 'antlr4'; + +class LeafListener extends Listener { + visitTerminal(node: TerminalNode) { + console.log(node.symbol.text); + } +} + +} +>> + +WalkListener(s) ::= << +var walker = new ParseTreeWalker(); +walker.walk(new LeafListener(), ); +>> + +TreeNodeWithAltNumField(X) ::= << +@parser::header { +class MyRuleNode extends ParserRuleContext { + + altNum: number; + + constructor(parent?: ParserRuleContext, invokingState?: number) { + super(parent, invokingState); + this.altNum = 0; + } + + getAltNumber(): number { + return this.altNum; + } + + setAltNumber(altNumber: number){ + this.altNum = altNumber; + } +}; + +} +>> + +TokenGetterListener(X) ::= << +@parser::header { +import { arrayToString, ParseTreeWalker } from 'antlr4'; + +class LeafListener extends Listener { + + exitA? = (ctx: AContext) => { + var str; + if(ctx.getChildCount()===2) { + str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + arrayToString(ctx.INT_list()); + } else { + str = ctx.ID()!.symbol.toString(); + } + console.log(str); + }; + +} + +} +>> + +RuleGetterListener(X) ::= << +@parser::header { +import { ParseTreeWalker } from 'antlr4'; + +class LeafListener extends Listener { + exitA? = (ctx: AContext) => { + var str; + if(ctx.getChildCount()===2) { + str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b_list()[0].start.text; + } else { + str = ctx.b(0).start.text; + } + console.log(str); + }; +} + +} +>> + +LRListener(X) ::= << +@parser::header { +import { ParseTreeWalker } from 'antlr4'; + +class LeafListener extends Listener { + + exitE? = (ctx: EContext) => { + var str; + if(ctx.getChildCount()===3) { + str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e_list()[0].start.text; + } else { + str = ctx.INT()!.symbol.text; + } + console.log(str); + }; +} + +} +>> + +LRWithLabelsListener(X) ::= << +@parser::header { +import { ParseTreeWalker } from 'antlr4'; + +class LeafListener extends Listener { + + exitCall? = (ctx: CallContext) => { + var str = ctx.e().start.text + ' ' + ctx.eList(); + console.log(str); + }; + + exitInt? = (ctx: IntContext) => { + var str = ctx.INT().symbol.text; + console.log(str); + } + +} + +} +>> + +DeclareContextListGettersFunction() ::= << + foo() { + var s = new SContext(); + var a = s.a_list(); + var b = s.b_list(); + }; +>> + +Declare_foo() ::= "foo() {console.log('foo');}" + +Invoke_foo() ::= "this.foo();" + +Declare_pred() ::= << + + pred(v: any) { + console.log("eval=" + v.toString()); + return v; + } +>> + +Invoke_pred(v) ::= <)>> +ParserTokenType(t) ::= "Parser." +ContextRuleFunction(ctx, rule) ::= "." +ContextListFunction(ctx, rule) ::= "._list()" +StringType() ::= "string | undefined" +ContextMember(ctx, member) ::= "._" +SubContextLocal(ctx, subctx, local) ::= ".." +SubContextMember(ctx, subctx, member) ::= ".._" diff --git a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java index c40c4048ca..7c1792374e 100644 --- a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java +++ b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java @@ -7,17 +7,11 @@ package org.antlr.v4.runtime; import org.antlr.v4.runtime.misc.Interval; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.*; public class TestCodePointCharStream { - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void emptyBytesHasSize0() { CodePointCharStream s = CharStreams.fromString(""); @@ -36,9 +30,11 @@ public void emptyBytesLookAheadReturnsEOF() { @Test public void consumingEmptyStreamShouldThrow() { CodePointCharStream s = CharStreams.fromString(""); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows( + IllegalStateException.class, + s::consume + ); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test @@ -59,9 +55,8 @@ public void consumingSingleLatinCodePointShouldMoveIndex() { public void consumingPastSingleLatinCodePointShouldThrow() { CodePointCharStream s = CharStreams.fromString("X"); s.consume(); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, s::consume); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test @@ -107,9 +102,8 @@ public void consumingSingleCJKCodePointShouldMoveIndex() { public void consumingPastSingleCJKCodePointShouldThrow() { CodePointCharStream s = CharStreams.fromString("\u611B"); s.consume(); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, s::consume); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test @@ -150,9 +144,8 @@ public void consumingPastEndOfEmojiCodePointWithShouldThrow() { assertEquals(0, s.index()); s.consume(); assertEquals(1, s.index()); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, s::consume); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeLexerTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeLexerTestDescriptor.java deleted file mode 100644 index c9f603a542..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeLexerTestDescriptor.java +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -public class BaseCompositeLexerTestDescriptor extends BaseRuntimeTestDescriptor { - @Override - public String getTestType() { - return "CompositeLexer"; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeParserTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeParserTestDescriptor.java deleted file mode 100644 index 2b5f0ac3c6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseCompositeParserTestDescriptor.java +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -public class BaseCompositeParserTestDescriptor extends BaseRuntimeTestDescriptor { - @Override - public String getTestType() { - return "CompositeParser"; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseDiagnosticParserTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseDiagnosticParserTestDescriptor.java deleted file mode 100644 index 95bcaf7956..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseDiagnosticParserTestDescriptor.java +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -public abstract class BaseDiagnosticParserTestDescriptor extends BaseParserTestDescriptor { - @Override - public boolean showDiagnosticErrors() { - return true; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseLexerTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseLexerTestDescriptor.java deleted file mode 100644 index cf215a5d8e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseLexerTestDescriptor.java +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -public class BaseLexerTestDescriptor extends BaseRuntimeTestDescriptor { - @Override - public String getTestType() { - return "Lexer"; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseParserTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseParserTestDescriptor.java deleted file mode 100644 index cb7f7d297b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseParserTestDescriptor.java +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -public class BaseParserTestDescriptor extends BaseRuntimeTestDescriptor { - @Override - public String getTestType() { - return "Parser"; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java deleted file mode 100644 index f7874d6717..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import org.antlr.v4.Tool; -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DefaultToolListener; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupFile; -import org.stringtemplate.v4.StringRenderer; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Modifier; -import java.net.URL; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.fail; -import static junit.framework.TestCase.failNotEquals; -import static org.junit.Assume.assumeFalse; - -/** This class represents a single runtime test. It pulls data from - * a {@link RuntimeTestDescriptor} and uses junit to trigger a test. - * The only functionality needed to execute a test is defined in - * {@link RuntimeTestSupport}. All of the various test rig classes - * derived from this one. E.g., see {@link org.antlr.v4.test.runtime.java.TestParserExec}. - * - * @since 4.6. - */ -public abstract class BaseRuntimeTest { - public final static String[] Targets = { - "Cpp", - "Java", - "Go", - "CSharp", - "Python2", "Python3", - "PHP", - "Node", "Safari", "Firefox", "Explorer", "Chrome" - }; - public final static String[] JavaScriptTargets = { - "Node", "Safari", "Firefox", "Explorer", "Chrome" - }; - - static { - // Add heartbeat thread to gen minimal output for travis, appveyor to - // avoid timeout. - Thread t = new Thread("heartbeat") { - @Override - public void run() { - while (true) { - System.out.print('.'); - try { - Thread.sleep(5000); - } - catch (Exception e) { - e.printStackTrace(); - } - } - } - }; - t.start(); - } - - /** ANTLR isn't thread-safe to process grammars so we use a global lock for testing */ - public static final Object antlrLock = new Object(); - - protected RuntimeTestSupport delegate; - protected RuntimeTestDescriptor descriptor; - - public BaseRuntimeTest(RuntimeTestDescriptor descriptor, RuntimeTestSupport delegate) { - this.descriptor = descriptor; - this.delegate = delegate; - } - - public static void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - @Before - public void setUp() throws Exception { - // From http://junit.sourceforge.net/javadoc/org/junit/Assume.html - // "The default JUnit runner treats tests with failing assumptions as ignored" - assumeFalse(descriptor.ignore(descriptor.getTarget())); - delegate.testSetUp(); - } - - @Rule - public final TestRule testWatcher = new TestWatcher() { - @Override - protected void succeeded(Description description) { - // remove tmpdir if no error. - delegate.eraseTempDir(); - } - }; - - @Test - public void testOne() throws Exception { - // System.out.println(delegate.getTmpDir()); - if ( descriptor.ignore(descriptor.getTarget()) ) { - System.out.printf("Ignore "+descriptor); - return; - } - - if ( descriptor.getTestType().contains("Parser") ) { - testParser(descriptor); - } - else { - testLexer(descriptor); - } - } - - public void testParser(RuntimeTestDescriptor descriptor) throws Exception { - mkdir(delegate.getTmpDir()); - - Pair pair = descriptor.getGrammar(); - - ClassLoader cloader = getClass().getClassLoader(); - URL templates = cloader.getResource("org/antlr/v4/test/runtime/templates/"+descriptor.getTarget()+".test.stg"); - STGroupFile targetTemplates = new STGroupFile(templates, "UTF-8", '<', '>'); - targetTemplates.registerRenderer(String.class, new StringRenderer()); - - // write out any slave grammars - List> slaveGrammars = descriptor.getSlaveGrammars(); - if ( slaveGrammars!=null ) { - for (Pair spair : slaveGrammars) { - STGroup g = new STGroup('<', '>'); - g.registerRenderer(String.class, new StringRenderer()); - g.importTemplates(targetTemplates); - ST grammarST = new ST(g, spair.b); - writeFile(delegate.getTmpDir(), spair.a+".g4", grammarST.render()); - } - } - - String grammarName = pair.a; - String grammar = pair.b; - STGroup g = new STGroup('<', '>'); - g.importTemplates(targetTemplates); - g.registerRenderer(String.class, new StringRenderer()); - ST grammarST = new ST(g, grammar); - grammar = grammarST.render(); - - String found = delegate.execParser(grammarName+".g4", grammar, - grammarName+"Parser", - grammarName+"Lexer", - grammarName+"Listener", - grammarName+"Visitor", - descriptor.getStartRule(), - descriptor.getInput(), - descriptor.showDiagnosticErrors() - ); - assertCorrectOutput(descriptor, delegate, found); - } - - public void testLexer(RuntimeTestDescriptor descriptor) throws Exception { - mkdir(delegate.getTmpDir()); - - Pair pair = descriptor.getGrammar(); - - ClassLoader cloader = getClass().getClassLoader(); - URL templates = cloader.getResource("org/antlr/v4/test/runtime/templates/"+descriptor.getTarget()+".test.stg"); - STGroupFile targetTemplates = new STGroupFile(templates, "UTF-8", '<', '>'); - targetTemplates.registerRenderer(String.class, new StringRenderer()); - - // write out any slave grammars - List> slaveGrammars = descriptor.getSlaveGrammars(); - if ( slaveGrammars!=null ) { - for (Pair spair : slaveGrammars) { - STGroup g = new STGroup('<', '>'); - g.registerRenderer(String.class, new StringRenderer()); - g.importTemplates(targetTemplates); - ST grammarST = new ST(g, spair.b); - writeFile(delegate.getTmpDir(), spair.a+".g4", grammarST.render()); - } - } - - String grammarName = pair.a; - String grammar = pair.b; - STGroup g = new STGroup('<', '>'); - g.registerRenderer(String.class, new StringRenderer()); - g.importTemplates(targetTemplates); - ST grammarST = new ST(g, grammar); - grammar = grammarST.render(); - - String found = delegate.execLexer(grammarName+".g4", grammar, grammarName, descriptor.getInput(), descriptor.showDFA()); - assertCorrectOutput(descriptor, delegate, found); - } - - /** Write a grammar to tmpdir and run antlr */ - public static ErrorQueue antlrOnString(String workdir, - String targetName, - String grammarFileName, - String grammarStr, - boolean defaultListener, - String... extraOptions) - { - mkdir(workdir); - writeFile(workdir, grammarFileName, grammarStr); - return antlrOnString(workdir, targetName, grammarFileName, defaultListener, extraOptions); - } - - /** Run ANTLR on stuff in workdir and error queue back */ - public static ErrorQueue antlrOnString(String workdir, - String targetName, - String grammarFileName, - boolean defaultListener, - String... extraOptions) - { - final List options = new ArrayList<>(); - Collections.addAll(options, extraOptions); - if ( targetName!=null ) { - options.add("-Dlanguage="+targetName); - } - if ( !options.contains("-o") ) { - options.add("-o"); - options.add(workdir); - } - if ( !options.contains("-lib") ) { - options.add("-lib"); - options.add(workdir); - } - if ( !options.contains("-encoding") ) { - options.add("-encoding"); - options.add("UTF-8"); - } - options.add(new File(workdir,grammarFileName).toString()); - - final String[] optionsA = new String[options.size()]; - options.toArray(optionsA); - Tool antlr = new Tool(optionsA); - ErrorQueue equeue = new ErrorQueue(antlr); - antlr.addListener(equeue); - if (defaultListener) { - antlr.addListener(new DefaultToolListener(antlr)); - } - synchronized (antlrLock) { - antlr.processGrammarsOnCommandLine(); - } - - List errors = new ArrayList<>(); - - if ( !defaultListener && !equeue.errors.isEmpty() ) { - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage msg = equeue.errors.get(i); - ST msgST = antlr.errMgr.getMessageTemplate(msg); - errors.add(msgST.render()); - } - } - if ( !defaultListener && !equeue.warnings.isEmpty() ) { - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage msg = equeue.warnings.get(i); - // antlrToolErrors.append(msg); warnings are hushed - } - } - - return equeue; - } - - // ---- support ---- - - public static RuntimeTestDescriptor[] getRuntimeTestDescriptors(Class clazz, String targetName) { - Class[] nestedClasses = clazz.getClasses(); - List descriptors = new ArrayList(); - for (Class nestedClass : nestedClasses) { - int modifiers = nestedClass.getModifiers(); - if ( RuntimeTestDescriptor.class.isAssignableFrom(nestedClass) && !Modifier.isAbstract(modifiers) ) { - try { - RuntimeTestDescriptor d = (RuntimeTestDescriptor) nestedClass.newInstance(); - d.setTarget(targetName); - descriptors.add(d); - } catch (Exception e) { - e.printStackTrace(System.err); - } - } - } - return descriptors.toArray(new RuntimeTestDescriptor[0]); - } - - public static void writeFile(String dir, String fileName, String content) { - try { - Utils.writeFile(dir+"/"+fileName, content, "UTF-8"); - } - catch (IOException ioe) { - System.err.println("can't write file"); - ioe.printStackTrace(System.err); - } - } - - - protected static void assertCorrectOutput(RuntimeTestDescriptor descriptor, RuntimeTestSupport delegate, String actualOutput) { - String actualParseErrors = delegate.getParseErrors(); - String actualToolErrors = delegate.getANTLRToolErrors(); - String expectedOutput = descriptor.getOutput(); - String expectedParseErrors = descriptor.getErrors(); - String expectedToolErrors = descriptor.getANTLRToolErrors(); - - if (actualOutput == null) { - actualOutput = ""; - } - if (actualParseErrors == null) { - actualParseErrors = ""; - } - if (actualToolErrors == null) { - actualToolErrors = ""; - } - if (expectedOutput == null) { - expectedOutput = ""; - } - if (expectedParseErrors == null) { - expectedParseErrors = ""; - } - if (expectedToolErrors == null) { - expectedToolErrors = ""; - } - - if (actualOutput.equals(expectedOutput) && - actualParseErrors.equals(expectedParseErrors) && - actualToolErrors.equals(expectedToolErrors)) { - return; - } - - if (actualOutput.equals(expectedOutput)) { - if (actualParseErrors.equals(expectedParseErrors)) { - failNotEquals("[" + descriptor.getTarget() + ":" + descriptor.getTestName() + "] " + - "Parse output and parse errors are as expected, but tool errors are incorrect", - expectedToolErrors, actualToolErrors); - } - else { - fail("[" + descriptor.getTarget() + ":" + descriptor.getTestName() + "] " + - "Parse output is as expected, but errors are not: " + - "expectedParseErrors:<" + expectedParseErrors + - ">; actualParseErrors:<" + actualParseErrors + - ">; expectedToolErrors:<" + expectedToolErrors + - ">; actualToolErrors:<" + actualToolErrors + - ">."); - } - } - else { - fail("[" + descriptor.getTarget() + ":" + descriptor.getTestName() + "] " + - "Parse output is incorrect: " + - "expectedOutput:<" + expectedOutput + - ">; actualOutput:<" + actualOutput + - ">; expectedParseErrors:<" + expectedParseErrors + - ">; actualParseErrors:<" + actualParseErrors + - ">; expectedToolErrors:<" + expectedToolErrors + - ">; actualToolErrors:<" + actualToolErrors + - ">."); - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTestDescriptor.java deleted file mode 100644 index 0447c78b47..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTestDescriptor.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.misc.Utils; - -import java.lang.reflect.Field; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** An abstract but mostly complete test descriptor that pulls values - * for the various runtime test descriptor methods such as {@link #getInput()} - * from fields using reflection. - * - * @since 4.6 - */ -public abstract class BaseRuntimeTestDescriptor implements RuntimeTestDescriptor { - protected String targetName; - - @Override - public String getTestName() { - return this.getClass().getSimpleName(); - } - - @Override - public String getInput() { - try { - Field f = this.getClass().getField("input"); - return stringIndentation((String)f.get(this)); - } - catch (Exception nsfe) { - ; // we are optional - } - return ""; - } - - @Override - public String getOutput() { - try { - Field f = this.getClass().getField("output"); - String s = stringIndentation((String)f.get(this)); - if ( s.length()==0 ) return null; - return s; - } - catch (Exception nsfe) { - ; // we are optional - } - return null; - } - - @Override - public String getErrors() { - try { - Field f = this.getClass().getField("errors"); - String s = stringIndentation((String)f.get(this)); - if ( s.length()==0 ) return null; - return s; - } - catch (Exception nsfe) { - ; // we are optional - } - return null; - } - - @Override - public String getANTLRToolErrors() { - try { - Field f = this.getClass().getField("toolErrors"); - String s = stringIndentation((String)f.get(this)); - if ( s.length()==0 ) return null; - return s; - } - catch (Exception nsfe) { - ; // we are optional - } - return null; - } - - @Override - public String getStartRule() { - try { - Field f = this.getClass().getField("startRule"); - return (String)f.get(this); - } - catch (Exception nsfe) { - System.err.println("No start rule specified for test "+getTestName()); - } - return null; - } - - @Override - public Pair getGrammar() { - String grammarName = null; - try { - Field f = this.getClass().getField("grammarName"); - grammarName = (String)f.get(this); - } - catch (Exception nsfe) { - System.err.println("No grammar name specified for test "+getTestName()); - } - String grammar = rawGetGrammar(); - return new Pair(grammarName,grammar); - } - - private String rawGetGrammar() { - String grammar = null; - try { - Field f = this.getClass().getField("grammar"); - grammar = (String)f.get(this); - } - catch (Exception nsfe) { - System.err.println("No start rule specified for test "+getTestName()); - } - grammar = stringIndentation(grammar); - return grammar; - } - - /** strip indentation; use first line's indent as prefix to strip */ - public static String stringIndentation(String s) { - if ( s==null ) return ""; - if ( s.equals("\n") ) return s; - s = Utils.expandTabs(s, 4); - String lines[] = s.split("\\r?\\n"); - String first = lines[0]; - Pattern wspat = Pattern.compile("^\\s+"); - Matcher matcher = wspat.matcher(first); - if ( matcher.find() ) { - String indent = matcher.group(0); - s = s.replace(indent, ""); // wack first indent - s = s.replaceAll("\\n"+indent, "\n"); // wack the others - } - return s; - } - - @Override - public List> getSlaveGrammars() { - return null; - } - - @Override - public String getTarget() { - return targetName; - } - - @Override - public void setTarget(String targetName) { - this.targetName = targetName; - } - - @Override - public boolean showDFA() { - return false; - } - - @Override - public boolean showDiagnosticErrors() { - return false; - } - - @Override - public boolean ignore(String targetName) { - return false; - } - - @Override - public String toString() { - return getTarget()+":"+getTestName(); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/CustomDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/CustomDescriptors.java new file mode 100644 index 0000000000..11134db496 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/CustomDescriptors.java @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.atn.PredictionMode; + +import java.net.URI; +import java.nio.file.Paths; +import java.util.*; + +public class CustomDescriptors { + public final static HashMap descriptors; + private final static URI uri; + + static { + uri = Paths.get(RuntimeTestUtils.runtimeTestsuitePath.toString(), + "test", "org", "antlr", "v4", "test", "runtime", "CustomDescriptors.java").toUri(); + + descriptors = new HashMap<>(); + descriptors.put("LexerExec", + new RuntimeTestDescriptor[]{ + getLineSeparatorLfDescriptor(), + getLineSeparatorCrLfDescriptor(), + getLargeLexerDescriptor(), + getAtnStatesSizeMoreThan65535Descriptor() + }); + descriptors.put("ParserExec", + new RuntimeTestDescriptor[] { + getMultiTokenAlternativeDescriptor() + }); + } + + private static RuntimeTestDescriptor getLineSeparatorLfDescriptor() { + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "LineSeparatorLf", + "", + "1\n2\n3", + "[@0,0:0='1',<1>,1:0]\n" + + "[@1,1:1='\\n',<2>,1:1]\n" + + "[@2,2:2='2',<1>,2:0]\n" + + "[@3,3:3='\\n',<2>,2:1]\n" + + "[@4,4:4='3',<1>,3:0]\n" + + "[@5,5:4='',<-1>,3:1]\n", + "", + null, + "L", + "lexer grammar L;\n" + + "T: ~'\\n'+;\n" + + "SEPARATOR: '\\n';", + null, false, false, false, PredictionMode.LL, true, null, uri); + } + + private static RuntimeTestDescriptor getLineSeparatorCrLfDescriptor() { + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "LineSeparatorCrLf", + "", + "1\r\n2\r\n3", + "[@0,0:0='1',<1>,1:0]\n" + + "[@1,1:2='\\r\\n',<2>,1:1]\n" + + "[@2,3:3='2',<1>,2:0]\n" + + "[@3,4:5='\\r\\n',<2>,2:1]\n" + + "[@4,6:6='3',<1>,3:0]\n" + + "[@5,7:6='',<-1>,3:1]\n", + "", + "", + "L", + "lexer grammar L;\n" + + "T: ~'\\r'+;\n" + + "SEPARATOR: '\\r\\n';", + null, false, false, false, PredictionMode.LL, true, null, uri); + } + + private static RuntimeTestDescriptor getLargeLexerDescriptor() { + final int tokensCount = 4000; + final String grammarName = "L"; + + StringBuilder grammar = new StringBuilder(); + grammar.append("lexer grammar ").append(grammarName).append(";\n"); + grammar.append("WS: [ \\t\\r\\n]+ -> skip;\n"); + for (int i = 0; i < tokensCount; i++) { + grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n"); + } + + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "LargeLexer", + "This is a regression test for antlr/antlr4#76 \"Serialized ATN strings\n" + + "should be split when longer than 2^16 bytes (class file limitation)\"\n" + + "https://github.com/antlr/antlr4/issues/76", + "KW400", + "[@0,0:4='KW400',<402>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n", + "", + "", + grammarName, + grammar.toString(), + null, false, false, false, PredictionMode.LL, true, null, uri); + } + + private static RuntimeTestDescriptor getAtnStatesSizeMoreThan65535Descriptor() { + // I tried playing around with different sizes, and I think 1002 works for Go but 1003 does not; + // the executing lexer gets a token syntax error for T208 or something like that + final int tokensCount = 1024; + final String suffix = String.join("", Collections.nCopies(70, "_")); + + final String grammarName = "L"; + StringBuilder grammar = new StringBuilder(); + grammar.append("lexer grammar ").append(grammarName).append(";\n"); + grammar.append('\n'); + StringBuilder input = new StringBuilder(); + StringBuilder output = new StringBuilder(); + int startOffset; + int stopOffset = -2; + for (int i = 0; i < tokensCount; i++) { + String ruleName = String.format("T_%06d", i); + String value = ruleName+suffix; + grammar.append(ruleName).append(": '").append(value).append("';\n"); + input.append(value).append('\n'); + + startOffset = stopOffset + 2; + stopOffset += value.length() + 1; + + output.append("[@").append(i).append(',').append(startOffset).append(':').append(stopOffset) + .append("='").append(value).append("',<").append(i + 1).append(">,").append(i + 1) + .append(":0]\n"); + } + + grammar.append("\n"); + grammar.append("WS: [ \\t\\r\\n]+ -> skip;\n"); + + startOffset = stopOffset + 2; + stopOffset = startOffset - 1; + output.append("[@").append(tokensCount).append(',').append(startOffset).append(':').append(stopOffset) + .append("='',<-1>,").append(tokensCount + 1).append(":0]\n"); + + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "AtnStatesSizeMoreThan65535", + "Regression for https://github.com/antlr/antlr4/issues/1863", + input.toString(), + output.toString(), + "", + "", + grammarName, + grammar.toString(), + null, false, false, false, PredictionMode.LL, true, + new String[] {"CSharp", "Python3", "Go", "PHP", "Swift", "JavaScript", "TypeScript", "Dart"}, + uri); + } + + private static RuntimeTestDescriptor getMultiTokenAlternativeDescriptor() { + final int tokensCount = 64; + + StringBuilder rule = new StringBuilder("r1: "); + StringBuilder tokens = new StringBuilder(); + StringBuilder input = new StringBuilder(); + StringBuilder output = new StringBuilder(); + + for (int i = 0; i < tokensCount; i++) { + String currentToken = "T" + i; + rule.append(currentToken); + if (i < tokensCount - 1) { + rule.append(" | "); + } else { + rule.append(";"); + } + tokens.append(currentToken).append(": '").append(currentToken).append("';\n"); + input.append(currentToken).append(" "); + output.append(currentToken); + } + String currentToken = "T" + tokensCount; + tokens.append(currentToken).append(": '").append(currentToken).append("';\n"); + input.append(currentToken).append(" "); + output.append(currentToken); + + String grammar = "grammar P;\n" + + "r: (r1 | T" + tokensCount + ")+ EOF {};\n" + + rule + "\n" + + tokens + "\n" + + "WS: [ ]+ -> skip;"; + + return new RuntimeTestDescriptor( + GrammarType.Parser, + "MultiTokenAlternative", + "https://github.com/antlr/antlr4/issues/3698, https://github.com/antlr/antlr4/issues/3703", + input.toString(), + output + "\n", + "", + "r", + "P", + grammar, + null, false, false, false, PredictionMode.LL, true, null, uri); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/FileUtils.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/FileUtils.java new file mode 100644 index 0000000000..3fec87267e --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/FileUtils.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.misc.Utils; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.DosFileAttributes; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class FileUtils { + public static void writeFile(String dir, String fileName, String content) { + try { + Utils.writeFile(dir + FileSeparator + fileName, content, "UTF-8"); + } + catch (IOException ioe) { + System.err.println("can't write file"); + ioe.printStackTrace(System.err); + } + } + + public static String readFile(String dir, String fileName) { + try { + return String.copyValueOf(Utils.readFile(dir+"/"+fileName, "UTF-8")); + } + catch (IOException ioe) { + System.err.println("can't read file"); + ioe.printStackTrace(System.err); + } + return null; + } + + public static void replaceInFile(Path sourcePath, String target, String replacement) throws IOException { + replaceInFile(sourcePath, sourcePath, target, replacement); + } + + public static void replaceInFile(Path sourcePath, Path destPath, String target, String replacement) throws IOException { + String content = new String(Files.readAllBytes(sourcePath), StandardCharsets.UTF_8); + String newContent = content.replace(target, replacement); + try (PrintWriter out = new PrintWriter(destPath.toString())) { + out.println(newContent); + } + } + + public static void mkdir(String dir) { + File f = new File(dir); + //noinspection ResultOfMethodCallIgnored + f.mkdirs(); + } + + public static void deleteDirectory(File f) throws IOException { + if (f.isDirectory() && !isLink(f.toPath())) { + File[] files = f.listFiles(); + if (files != null) { + for (File c : files) + deleteDirectory(c); + } + } + if (!f.delete()) + throw new IOException("Failed to delete file: " + f); + } + + public static boolean isLink(Path path) throws IOException { + try { + BasicFileAttributes attrs = Files.readAttributes(path, BasicFileAttributes.class, LinkOption.NOFOLLOW_LINKS); + return attrs.isSymbolicLink() || (attrs instanceof DosFileAttributes && attrs.isOther()); + } catch (IOException ignored) { + return false; + } + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedFile.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedFile.java new file mode 100644 index 0000000000..6db8a47fbc --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedFile.java @@ -0,0 +1,16 @@ +package org.antlr.v4.test.runtime; + +public class GeneratedFile { + public final String name; + public final boolean isParser; + + public GeneratedFile(String name, boolean isParser) { + this.name = name; + this.isParser = isParser; + } + + @Override + public String toString() { + return name + "; isParser:" + isParser; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/Generator.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/Generator.java new file mode 100644 index 0000000000..18bd776494 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/Generator.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.Tool; +import org.antlr.v4.tool.ANTLRMessage; +import org.antlr.v4.tool.DefaultToolListener; +import org.stringtemplate.v4.ST; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; + +public class Generator { + /** Write a grammar to tmpdir and run antlr */ + public static ErrorQueue antlrOnString(String workdir, + String targetName, + String grammarFileName, + String grammarStr, + boolean defaultListener, + String... extraOptions) + { + FileUtils.mkdir(workdir); + writeFile(workdir, grammarFileName, grammarStr); + return antlrOnString(workdir, targetName, grammarFileName, defaultListener, extraOptions); + } + + /** Run ANTLR on stuff in workdir and error queue back */ + public static ErrorQueue antlrOnString(String workdir, + String targetName, + String grammarFileName, + boolean defaultListener, + String... extraOptions) + { + final List options = new ArrayList<>(); + Collections.addAll(options, extraOptions); + if ( targetName!=null ) { + options.add("-Dlanguage="+targetName); + } + if ( !options.contains("-o") ) { + options.add("-o"); + options.add(workdir); + } + if ( !options.contains("-lib") ) { + options.add("-lib"); + options.add(workdir); + } + if ( !options.contains("-encoding") ) { + options.add("-encoding"); + options.add("UTF-8"); + } + options.add(new File(workdir,grammarFileName).toString()); + + final String[] optionsA = new String[options.size()]; + options.toArray(optionsA); + Tool antlr = new Tool(optionsA); + ErrorQueue equeue = new ErrorQueue(antlr); + antlr.addListener(equeue); + if (defaultListener) { + antlr.addListener(new DefaultToolListener(antlr)); + } + antlr.processGrammarsOnCommandLine(); + + List errors = new ArrayList<>(); + + if ( !defaultListener && !equeue.errors.isEmpty() ) { + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage msg = equeue.errors.get(i); + ST msgST = antlr.errMgr.getMessageTemplate(msg); + errors.add(msgST.render()); + } + } + if ( !defaultListener && !equeue.warnings.isEmpty() ) { + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage msg = equeue.warnings.get(i); + // antlrToolErrors.append(msg); warnings are hushed + } + } + + return equeue; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/GrammarType.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/GrammarType.java new file mode 100644 index 0000000000..92a1498b59 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/GrammarType.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public enum GrammarType { + Lexer, + Parser, + CompositeLexer, + CompositeParser +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/OSType.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/OSType.java new file mode 100644 index 0000000000..1dcad27a10 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/OSType.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public enum OSType { + Windows, + Linux, + Mac, + Unknown +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/Processor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/Processor.java new file mode 100644 index 0000000000..a032d4f25f --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/Processor.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; + +public class Processor { + /** Turn this on to see output like: + * RUNNING cmake . -DCMAKE_BUILD_TYPE=Release in /Users/parrt/antlr/code/antlr4/runtime/Cpp + * RUNNING make -j 20 in /Users/parrt/antlr/code/antlr4/runtime/Cpp + * RUNNING ln -s /Users/parrt/antlr/code/antlr4/runtime/Cpp/dist/libantlr4-runtime.dylib in /var/folders/w1/_nr4stn13lq0rvjdkwh7q8cc0000gn/T/CppRunner-ForkJoinPool-1-worker-23-1668284191961 + * RUNNING clang++ -std=c++17 -I /Users/parrt/antlr/code/antlr4/runtime/Cpp/runtime/src -L. -lantlr4-runtime -pthread -o Test.out Test.cpp TLexer.cpp TParser.cpp TListener.cpp TBaseListener.cpp TVisitor.cpp TBaseVisitor.cpp in /var/folders/w1/_nr4stn13lq0rvjdkwh7q8cc0000gn/T/CppRunner-ForkJoinPool-1-worker-23-1668284191961 + */ + public static final boolean WATCH_COMMANDS_EXEC = false; + public final String[] arguments; + public final String workingDirectory; + public final Map environmentVariables; + public final boolean throwOnNonZeroErrorCode; + + public static ProcessorResult run(String[] arguments, String workingDirectory, Map environmentVariables) + throws InterruptedException, IOException + { + return new Processor(arguments, workingDirectory, environmentVariables, true).start(); + } + + public static ProcessorResult run(String[] arguments, String workingDirectory) throws InterruptedException, IOException { + return new Processor(arguments, workingDirectory, new HashMap<>(), true).start(); + } + + public Processor(String[] arguments, String workingDirectory, Map environmentVariables, + boolean throwOnNonZeroErrorCode) { + this.arguments = arguments; + this.workingDirectory = workingDirectory; + this.environmentVariables = environmentVariables; + this.throwOnNonZeroErrorCode = throwOnNonZeroErrorCode; + } + + public ProcessorResult start() throws InterruptedException, IOException { + if ( WATCH_COMMANDS_EXEC ) { + System.out.println("RUNNING "+ String.join(" ", arguments)+" in "+workingDirectory); + } + ProcessBuilder builder = new ProcessBuilder(arguments); + if (workingDirectory != null) { + builder.directory(new File(workingDirectory)); + } + if (environmentVariables != null && environmentVariables.size() > 0) { + Map environment = builder.environment(); + for (String key : environmentVariables.keySet()) { + environment.put(key, environmentVariables.get(key)); + } + } + + Process process = builder.start(); + StreamReader stdoutReader = new StreamReader(process.getInputStream()); + StreamReader stderrReader = new StreamReader(process.getErrorStream()); + stdoutReader.start(); + stderrReader.start(); + process.waitFor(); + stdoutReader.join(); + stderrReader.join(); + + String output = stdoutReader.toString(); + String errors = stderrReader.toString(); + if (throwOnNonZeroErrorCode && process.exitValue() != 0) { + throw new InterruptedException("Exit code "+process.exitValue()+" with output:\n"+joinLines(output, errors)); + } + return new ProcessorResult(process.exitValue(), output, errors); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/ProcessorResult.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/ProcessorResult.java new file mode 100644 index 0000000000..ef88abda16 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/ProcessorResult.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public class ProcessorResult { + public final int exitCode; + public final String output; + public final String errors; + + public ProcessorResult(int exitCode, String output, String errors) { + this.exitCode = exitCode; + this.output = output; + this.errors = errors; + } + + public boolean isSuccess() { + return exitCode == 0; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RunOptions.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RunOptions.java new file mode 100644 index 0000000000..f4f5aa869b --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RunOptions.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.atn.PredictionMode; + +public class RunOptions { + public final String grammarFileName; + public final String grammarStr; + public final String parserName; + public final String lexerName; + public final String grammarName; + public final boolean useListener; + public final boolean useVisitor; + public final String startRuleName; + public final String input; + public final boolean profile; + public final boolean showDiagnosticErrors; + public final boolean traceATN; + public final boolean showDFA; + public final Stage endStage; + public final String superClass; + public final PredictionMode predictionMode; + public final boolean buildParseTree; + + public RunOptions(String grammarFileName, String grammarStr, String parserName, String lexerName, + boolean useListener, boolean useVisitor, String startRuleName, + String input, boolean profile, boolean showDiagnosticErrors, + boolean traceATN, boolean showDFA, Stage endStage, + String language, String superClass, PredictionMode predictionMode, boolean buildParseTree) { + this.grammarFileName = grammarFileName; + this.grammarStr = grammarStr; + this.parserName = parserName; + this.lexerName = lexerName; + String grammarName = null; + boolean isCombinedGrammar = lexerName != null && parserName != null || language.equals("Go"); + if (isCombinedGrammar) { + if (parserName != null) { + grammarName = parserName.endsWith("Parser") + ? parserName.substring(0, parserName.length() - "Parser".length()) + : parserName; + } + else if (lexerName != null) { + grammarName = lexerName.endsWith("Lexer") + ? lexerName.substring(0, lexerName.length() - "Lexer".length()) + : lexerName; + } + } + else { + if (parserName != null) { + grammarName = parserName; + } + else { + grammarName = lexerName; + } + } + this.grammarName = grammarName; + this.useListener = useListener; + this.useVisitor = useVisitor; + this.startRuleName = startRuleName; + this.input = input; + this.profile = profile; + this.showDiagnosticErrors = showDiagnosticErrors; + this.traceATN = traceATN; + this.showDFA = showDFA; + this.endStage = endStage; + this.superClass = superClass; + this.predictionMode = predictionMode; + this.buildParseTree = buildParseTree; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeRunner.java new file mode 100644 index 0000000000..d1df8ae5fa --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeRunner.java @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.antlr.v4.test.runtime.states.State; +import org.stringtemplate.v4.NumberRenderer; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.StringRenderer; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +import static org.antlr.v4.test.runtime.FileUtils.deleteDirectory; +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.*; + +public abstract class RuntimeRunner implements AutoCloseable { + + public abstract String getLanguage(); + + protected String getExtension() { return getLanguage().toLowerCase(); } + + protected String getTitleName() { return getLanguage(); } + + protected String getTestFileName() { return "Test"; } + + protected String getLexerSuffix() { return "Lexer"; } + + protected String getParserSuffix() { return "Parser"; } + + protected String getBaseListenerSuffix() { return "BaseListener"; } + + protected String getListenerSuffix() { return "Listener"; } + + protected String getBaseVisitorSuffix() { return "BaseVisitor"; } + + protected String getVisitorSuffix() { return "Visitor"; } + + protected String grammarNameToFileName(String grammarName) { return grammarName; } + + private static String runtimeToolPath; + private static String compilerPath; + + public final static String InputFileName = "input"; + + protected final String getCompilerPath() { + if (compilerPath == null) { + compilerPath = getCompilerName(); + if (compilerPath != null) { + String compilerPathFromProperty = System.getProperty(getPropertyPrefix() + "-compiler"); + if (compilerPathFromProperty != null && compilerPathFromProperty.length() > 0) { + compilerPath = compilerPathFromProperty; + } + } + } + + return compilerPath; + } + + protected final String getRuntimeToolPath() { + if (runtimeToolPath == null) { + runtimeToolPath = getRuntimeToolName(); + if (runtimeToolPath != null) { + String runtimeToolPathFromProperty = System.getProperty(getPropertyPrefix() + "-exec"); + if (runtimeToolPathFromProperty != null && runtimeToolPathFromProperty.length() > 0) { + runtimeToolPath = runtimeToolPathFromProperty; + } + } + } + + return runtimeToolPath; + } + + protected String getCompilerName() { return null; } + + protected String getRuntimeToolName() { return getLanguage().toLowerCase(); } + + protected String getTestFileWithExt() { return getTestFileName() + "." + getExtension(); } + + protected String getExecFileName() { return getTestFileWithExt(); } + + protected String[] getExtraRunArgs() { return null; } + + protected Map getExecEnvironment() { return null; } + + protected String getPropertyPrefix() { + return "antlr-" + getLanguage().toLowerCase(); + } + + public final String getTempDirPath() { + return tempTestDir.toString(); + } + + private boolean saveTestDir; + + protected final Path tempTestDir; + + protected RuntimeRunner() { + this(null, false); + } + + protected RuntimeRunner(Path tempDir, boolean saveTestDir) { + if (tempDir == null) { + String dirName = getClass().getSimpleName() + "-" + Thread.currentThread().getName() + "-" + System.currentTimeMillis(); + tempTestDir = Paths.get(TempDirectory, dirName); + } + else { + tempTestDir = tempDir; + } + this.saveTestDir = saveTestDir; + } + + public void setSaveTestDir(boolean saveTestDir) { + this.saveTestDir = saveTestDir; + } + + public void close() { + removeTempTestDirIfRequired(); + } + + public final static String cacheDirectory; + + private static class InitializationStatus { + public final Object lockObject = new Object(); + public volatile Boolean isInitialized; + public Exception exception; + } + + private final static HashMap runtimeInitializationStatuses = new HashMap<>(); + + static { + cacheDirectory = new File(System.getProperty("java.io.tmpdir"), "ANTLR-runtime-testsuite-cache").getAbsolutePath(); + } + + protected final String getCachePath() { + return getCachePath(getLanguage()); + } + + public static String getCachePath(String language) { + return cacheDirectory + FileSeparator + language; + } + + protected final String getRuntimePath() { + return getRuntimePath(getLanguage()); + } + + public static String getRuntimePath(String language) { + return runtimePath.toString() + FileSeparator + language; + } + + // Allows any target to add additional options for the antlr tool such as the location of the output files + // which is useful for the Go target for instance to avoid having to move them before running the test + // + protected List getTargetToolOptions(RunOptions ro) { + return null; + } + + public State run(RunOptions runOptions) { + List options = new ArrayList<>(); + if (runOptions.useVisitor) { + options.add("-visitor"); + } + if (runOptions.superClass != null && runOptions.superClass.length() > 0) { + options.add("-DsuperClass=" + runOptions.superClass); + } + + // See if the target wants to add tool options. + // + List targetOpts = getTargetToolOptions(runOptions); + if (targetOpts != null) { + options.addAll(targetOpts); + } + + ErrorQueue errorQueue = Generator.antlrOnString(getTempDirPath(), getLanguage(), + runOptions.grammarFileName, runOptions.grammarStr, false, options.toArray(new String[0])); + + List generatedFiles = getGeneratedFiles(runOptions); + GeneratedState generatedState = new GeneratedState(errorQueue, generatedFiles, null); + + if (generatedState.containsErrors() || runOptions.endStage == Stage.Generate) { + return generatedState; + } + + if (!initAntlrRuntimeIfRequired(runOptions)) { + // Do not repeat ANTLR runtime initialization error + return new CompiledState(generatedState, new Exception(getTitleName() + " ANTLR runtime is not initialized")); + } + + writeRecognizerFile(runOptions); + + CompiledState compiledState = compile(runOptions, generatedState); + + if (compiledState.containsErrors() || runOptions.endStage == Stage.Compile) { + return compiledState; + } + + writeInputFile(runOptions); + + return execute(runOptions, compiledState); + } + + protected List getGeneratedFiles(RunOptions runOptions) { + List files = new ArrayList<>(); + String extensionWithDot = "." + getExtension(); + String fileGrammarName = grammarNameToFileName(runOptions.grammarName); + boolean isCombinedGrammarOrGo = runOptions.lexerName != null && runOptions.parserName != null || getLanguage().equals("Go"); + if (runOptions.lexerName != null) { + files.add(new GeneratedFile(fileGrammarName + (isCombinedGrammarOrGo ? getLexerSuffix() : "") + extensionWithDot, false)); + } + if (runOptions.parserName != null) { + files.add(new GeneratedFile(fileGrammarName + (isCombinedGrammarOrGo ? getParserSuffix() : "") + extensionWithDot, true)); + if (runOptions.useListener) { + files.add(new GeneratedFile(fileGrammarName + getListenerSuffix() + extensionWithDot, true)); + String baseListenerSuffix = getBaseListenerSuffix(); + if (baseListenerSuffix != null) { + files.add(new GeneratedFile(fileGrammarName + baseListenerSuffix + extensionWithDot, true)); + } + } + if (runOptions.useVisitor) { + files.add(new GeneratedFile(fileGrammarName + getVisitorSuffix() + extensionWithDot, true)); + String baseVisitorSuffix = getBaseVisitorSuffix(); + if (baseVisitorSuffix != null) { + files.add(new GeneratedFile(fileGrammarName + baseVisitorSuffix + extensionWithDot, true)); + } + } + } + return files; + } + + protected void writeRecognizerFile(RunOptions runOptions) { + String text = RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/" + getTestFileWithExt() + ".stg"); + ST outputFileST = new ST(text); + STGroup nativeGroup = outputFileST.impl.nativeGroup; + nativeGroup.registerRenderer(Integer.class, new NumberRenderer()); + nativeGroup.registerRenderer(String.class, new StringRenderer()); + outputFileST.add("grammarName", runOptions.grammarName); + outputFileST.add("lexerName", runOptions.lexerName); + outputFileST.add("parserName", runOptions.parserName); + outputFileST.add("parserStartRuleName", grammarParseRuleToRecognizerName(runOptions.startRuleName)); + outputFileST.add("showDiagnosticErrors", runOptions.showDiagnosticErrors); + outputFileST.add("traceATN", runOptions.traceATN); + outputFileST.add("profile", runOptions.profile); + outputFileST.add("showDFA", runOptions.showDFA); + outputFileST.add("useListener", runOptions.useListener); + outputFileST.add("useVisitor", runOptions.useVisitor); + outputFileST.add("predictionMode", runOptions.predictionMode); + outputFileST.add("buildParseTree", runOptions.buildParseTree); + addExtraRecognizerParameters(outputFileST); + writeFile(getTempDirPath(), getTestFileWithExt(), outputFileST.render()); + } + + protected String grammarParseRuleToRecognizerName(String startRuleName) { + return startRuleName; + } + + protected void addExtraRecognizerParameters(ST template) { + } + + private boolean initAntlrRuntimeIfRequired(RunOptions runOptions) { + String language = getLanguage(); + InitializationStatus status; + + // Create initialization status for every runtime with lock object + synchronized (runtimeInitializationStatuses) { + status = runtimeInitializationStatuses.get(language); + if (status == null) { + status = new InitializationStatus(); + runtimeInitializationStatuses.put(language, status); + } + } + + if (status.isInitialized != null) { + return status.isInitialized; + } + + // Locking per runtime, several runtimes can be being initialized simultaneously + synchronized (status.lockObject) { + if (status.isInitialized == null) { + Exception exception = null; + try { + initRuntime(runOptions); + } catch (Exception e) { + exception = e; + e.printStackTrace(); + } + status.isInitialized = exception == null; + status.exception = exception; + } + } + return status.isInitialized; + } + + protected void initRuntime(RunOptions runOptions) throws Exception { + } + + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + return new CompiledState(generatedState, null); + } + + protected void writeInputFile(RunOptions runOptions) { + writeFile(getTempDirPath(), InputFileName, runOptions.input); + } + + protected ExecutedState execute(RunOptions runOptions, CompiledState compiledState) { + String output = null; + String errors = null; + Exception exception = null; + try { + List args = new ArrayList<>(); + String runtimeToolPath = getRuntimeToolPath(); + if (runtimeToolPath != null) { + args.add(runtimeToolPath); + } + String[] extraRunArgs = getExtraRunArgs(); + if (extraRunArgs != null) { + args.addAll(Arrays.asList(extraRunArgs)); + } + args.add(getExecFileName()); + args.add(InputFileName); + ProcessorResult result = Processor.run(args.toArray(new String[0]), getTempDirPath(), getExecEnvironment()); + output = result.output; + errors = result.errors; + } catch (InterruptedException | IOException e) { + exception = e; + } + return new ExecutedState(compiledState, output, errors, exception); + } + + protected ProcessorResult runCommand(String[] command, String workPath) throws Exception { + return runCommand(command, workPath, null); + } + + protected ProcessorResult runCommand(String[] command, String workPath, String description) throws Exception { + String cmd = String.join(" ", command); + try { + return Processor.run(command, workPath); + } catch (InterruptedException | IOException e) { + String msg = "command \"" + cmd + "\"\n in " + workPath + " failed"; + if (description != null) { + msg += ":\n can't " + description; + } + throw new Exception(msg, e); + } + } + + private void removeTempTestDirIfRequired() { + if (!saveTestDir) { + File dirFile = tempTestDir.toFile(); + if (dirFile.exists()) { + try { + deleteDirectory(dirFile); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java index faaaaba932..098cedf0af 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java @@ -1,68 +1,99 @@ /* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ package org.antlr.v4.test.runtime; +import org.antlr.v4.runtime.atn.PredictionMode; import org.antlr.v4.runtime.misc.Pair; +import java.net.URI; +import java.util.Arrays; import java.util.List; -/** This interface describes everything that a runtime test - * descriptor can specify. Most testing descriptors will - * subclass {@link BaseRuntimeTestDescriptor} rather than - * implement this directly. The {@link BaseRuntimeTest} - * class pulls data from descriptors to execute tests. - * - * @since 4.6 +/** This object represents all the information we need about a single test and is the + * in-memory representation of a descriptor file */ -public interface RuntimeTestDescriptor { - /** The name of this test such as TokenAndRuleContextString (see - * {@link org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors.TokenAndRuleContextString}) +public class RuntimeTestDescriptor { + /** A type in {"Lexer", "Parser", "CompositeLexer", "CompositeParser"} */ + public final GrammarType testType; + + /** Return a string representing the name of the target currently testing + * this descriptor. + * Multiple instances of the same descriptor class + * can be created to test different targets. */ - String getTestName(); + public final String name; - /** A type in {"Lexer", "Parser", "CompositeLexer", "CompositeParser"} */ - String getTestType(); + public final String notes; /** Parser input. Return "" if not input should be provided to the parser or lexer. */ - String getInput(); + public final String input; /** Output from executing the parser. Return null if no output is expected. */ - String getOutput(); + public final String output; /** Parse errors Return null if no errors are expected. */ - String getErrors(); - - /** Errors generated by ANTLR processing the grammar. Return null if no errors are expected. */ - String getANTLRToolErrors(); + public final String errors; /** The rule at which parsing should start */ - String getStartRule(); // TODO: alter tests to use same default start rule? + public final String startRule; + public final String grammarName; + + public final String grammar; + /** List of grammars imported into the grammar */ + public final List> slaveGrammars; /** For lexical tests, dump the DFA of the default lexer mode to stdout */ - boolean showDFA(); + public final boolean showDFA; /** For parsing, engage the DiagnosticErrorListener, dumping results to stderr */ - boolean showDiagnosticErrors(); - - /** Associates name of grammar like M in M.g4 to string (template) of grammar */ - Pair getGrammar(); - - /** Return a list of grammars imported into the grammar specified in {#getGrammar}. */ - List> getSlaveGrammars(); - - /** Return a string representing the name of the target currently testing - * this descriptor. Multiple instances of the same descriptor class - * can be created to test different targets. - */ - String getTarget(); - - /** Set the target we are testing */ - void setTarget(String targetName); + public final boolean showDiagnosticErrors; + + public final boolean traceATN; + + public final PredictionMode predictionMode; + + public final boolean buildParseTree; + + public final String[] skipTargets; + + public final URI uri; + + public RuntimeTestDescriptor(GrammarType testType, String name, String notes, + String input, String output, String errors, + String startRule, + String grammarName, String grammar, List> slaveGrammars, + boolean showDiagnosticErrors, boolean traceATN, boolean showDFA, PredictionMode predictionMode, + boolean buildParseTree, String[] skipTargets, URI uri) { + this.testType = testType; + this.name = name; + this.notes = notes; + this.input = input; + this.output = output; + this.errors = errors; + this.startRule = startRule; + this.grammarName = grammarName; + this.grammar = grammar; + this.slaveGrammars = slaveGrammars; + this.showDFA = showDFA; + this.showDiagnosticErrors = showDiagnosticErrors; + this.traceATN = traceATN; + this.predictionMode = predictionMode; + this.buildParseTree = buildParseTree; + this.skipTargets = skipTargets != null ? skipTargets : new String[0]; + this.uri = uri; + } /** Return true if this test should be ignored for the indicated target */ - boolean ignore(String targetName); + public boolean ignore(String targetName) { + return Arrays.asList(skipTargets).contains(targetName); + } + + @Override + public String toString() { + return name; + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptorParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptorParser.java new file mode 100644 index 0000000000..6921297899 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptorParser.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.runtime.misc.Pair; + +import java.net.URI; +import java.util.*; + +public class RuntimeTestDescriptorParser { + private final static Set sections = new HashSet<>(Arrays.asList( + "notes", "type", "grammar", "slaveGrammar", "start", "input", "output", "errors", "flags", "skip" + )); + + /** Read stuff like: + [grammar] + grammar T; + s @after {} + : ID | ID {} ; + ID : 'a'..'z'+; + WS : (' '|'\t'|'\n')+ -> skip ; + + [grammarName] + T + + [start] + s + + [input] + abc + + [output] + Decision 0: + s0-ID->:s1^=>1 + + [errors] + """line 1:0 reportAttemptingFullContext d=0 (s), input='abc' + """ + + Some can be missing like [errors]. + + Get gr names automatically "lexer grammar Unicode;" "grammar T;" "parser grammar S;" + + Also handle slave grammars: + + [grammar] + grammar M; + import S,T; + s : a ; + B : 'b' ; // defines B from inherited token space + WS : (' '|'\n') -> skip ; + + [slaveGrammar] + parser grammar T; + a : B {}; + + [slaveGrammar] + parser grammar S; + a : b {}; + b : B; + */ + public static RuntimeTestDescriptor parse(String name, String text, URI uri) throws RuntimeException { + String currentField = null; + StringBuilder currentValue = new StringBuilder(); + + List> pairs = new ArrayList<>(); + String[] lines = text.split("\r?\n"); + + for (String line : lines) { + boolean newSection = false; + String sectionName = null; + if (line.startsWith("[") && line.length() > 2) { + sectionName = line.substring(1, line.length() - 1); + newSection = sections.contains(sectionName); + } + + if (newSection) { + if (currentField != null) { + pairs.add(new Pair<>(currentField, currentValue.toString())); + } + currentField = sectionName; + currentValue.setLength(0); + } + else { + currentValue.append(line); + currentValue.append("\n"); + } + } + pairs.add(new Pair<>(currentField, currentValue.toString())); + + String notes = ""; + GrammarType testType = GrammarType.Lexer; + String grammar = ""; + String grammarName = ""; + List> slaveGrammars = new ArrayList<>(); + String startRule = ""; + String input = ""; + String output = ""; + String errors = ""; + boolean showDFA = false; + boolean showDiagnosticErrors = false; + boolean traceATN = false; + PredictionMode predictionMode = PredictionMode.LL; + boolean buildParseTree = true; + String[] skipTargets = new String[0]; + for (Pair p : pairs) { + String section = p.a; + String value = ""; + if ( p.b!=null ) { + value = p.b.trim(); + } + if ( value.startsWith("\"\"\"") ) { + value = value.replace("\"\"\"", ""); + } + else if ( value.indexOf('\n')>=0 ) { + value = value + "\n"; // if multi line and not quoted, leave \n on end. + } + switch (section) { + case "notes": + notes = value; + break; + case "type": + testType = Enum.valueOf(GrammarType.class, value); + break; + case "grammar": + grammarName = getGrammarName(value.split("\n")[0]); + grammar = value; + break; + case "slaveGrammar": + String gname = getGrammarName(value.split("\n")[0]); + slaveGrammars.add(new Pair<>(gname, value)); + case "start": + startRule = value; + break; + case "input": + input = value; + break; + case "output": + output = value; + break; + case "errors": + errors = value; + break; + case "flags": + String[] flags = value.split("\n"); + for (String f : flags) { + String[] parts = f.split("=", 2); + switch (parts[0]) { + case "showDFA": + showDFA = true; + break; + case "showDiagnosticErrors": + showDiagnosticErrors = true; + break; + case "traceATN": + traceATN = true; + break; + case "predictionMode": + predictionMode = PredictionMode.valueOf(parts[1]); + break; + case "notBuildParseTree": + buildParseTree = false; + break; + } + } + break; + case "skip": + skipTargets = value.split("\n"); + break; + default: + throw new RuntimeException("Unknown descriptor section ignored: "+section); + } + } + return new RuntimeTestDescriptor(testType, name, notes, input, output, errors, startRule, grammarName, grammar, + slaveGrammars, showDiagnosticErrors, traceATN, showDFA, predictionMode, buildParseTree, skipTargets, uri); + } + + /** Get A, B, or C from: + * "lexer grammar A;" "grammar B;" "parser grammar C;" + */ + private static String getGrammarName(String grammarDeclLine) { + int gi = grammarDeclLine.indexOf("grammar "); + if ( gi<0 ) { + return ""; + } + gi += "grammar ".length(); + int gsemi = grammarDeclLine.indexOf(';'); + return grammarDeclLine.substring(gi, gsemi); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestSupport.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestSupport.java deleted file mode 100644 index ad94e29070..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestSupport.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -/** This interface describes functionality needed to execute a runtime test. - * Unfortunately the Base*Test.java files are big junk drawers. This is - * an attempt to make it more obvious what new target implementers have to - * implement. - * - * @since 4.6 - */ -public interface RuntimeTestSupport { - void testSetUp() throws Exception; - void testTearDown() throws Exception; - void eraseTempDir(); - - String getTmpDir(); - - String getStdout(); - String getParseErrors(); - String getANTLRToolErrors(); - - String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA); - - String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors); -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java new file mode 100644 index 0000000000..598155f2e6 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.Rule; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public abstract class RuntimeTestUtils { + public static final String NewLine = System.getProperty("line.separator"); + public static final String PathSeparator = System.getProperty("path.separator"); + public static final String FileSeparator = System.getProperty("file.separator"); + public static final String TempDirectory = System.getProperty("java.io.tmpdir"); + + public final static Path runtimePath; + public final static Path runtimeTestsuitePath; + public final static Path resourcePath; + + private final static Map resourceCache = new HashMap<>(); + private static OSType detectedOS; + private static Boolean isWindows; + + static { + String locationPath = RuntimeTestUtils.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + if (isWindows()) { + locationPath = locationPath.replaceFirst("/", ""); + } + Path potentialRuntimeTestsuitePath = Paths.get(locationPath, "..", "..").normalize().toAbsolutePath(); + Path potentialResourcePath = Paths.get(potentialRuntimeTestsuitePath.toString(), "resources"); + + if (Files.exists(potentialResourcePath)) { + runtimeTestsuitePath = potentialRuntimeTestsuitePath; + } + else { + runtimeTestsuitePath = Paths.get("..", "runtime-testsuite").normalize().toAbsolutePath(); + } + + runtimePath = Paths.get(runtimeTestsuitePath.toString(), "..", "runtime").normalize().toAbsolutePath(); + resourcePath = Paths.get(runtimeTestsuitePath.toString(), "resources"); + } + + public static boolean isWindows() { + if (isWindows == null) { + isWindows = getOS() == OSType.Windows; + } + + return isWindows; + } + + public static OSType getOS() { + if (detectedOS == null) { + String os = System.getProperty("os.name", "generic").toLowerCase(Locale.ENGLISH); + if (os.contains("mac") || os.contains("darwin")) { + detectedOS = OSType.Mac; + } + else if (os.contains("win")) { + detectedOS = OSType.Windows; + } + else if (os.contains("nux")) { + detectedOS = OSType.Linux; + } + else { + detectedOS = OSType.Unknown; + } + } + return detectedOS; + } + + public static synchronized String getTextFromResource(String name) { + try { + String text = resourceCache.get(name); + if (text == null) { + Path path = Paths.get(resourcePath.toString(), name); + text = new String(Files.readAllBytes(path)); + resourceCache.put(name, text); + } + return text; + } + catch (Exception ex) { + throw new RuntimeException(ex); + } + } + + public static void checkRuleATN(Grammar g, String ruleName, String expecting) { + Rule r = g.getRule(ruleName); + ATNState startState = g.getATN().ruleToStartState[r.index]; + ATNPrinter serializer = new ATNPrinter(g, startState); + String result = serializer.asString(); + + assertEquals(expecting, result); + } + + public static String joinLines(Object... args) { + StringBuilder result = new StringBuilder(); + for (Object arg : args) { + String str = arg.toString(); + result.append(str); + if (!str.endsWith("\n")) + result.append("\n"); + } + return result.toString(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTests.java new file mode 100644 index 0000000000..09931d0864 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTests.java @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.java.JavaRuntimeTests; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.State; +import org.junit.jupiter.api.DynamicNode; +import org.junit.jupiter.api.TestFactory; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; +import org.stringtemplate.v4.*; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.stream.Stream; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.DynamicContainer.dynamicContainer; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +/** This class represents runtime tests for specified runtime. + * It pulls data from {@link RuntimeTestDescriptor} and uses junit to trigger tests. + * The only functionality needed to execute a test is defined in {@link RuntimeRunner}. + * All the various test rig classes derived from this one. + * E.g., see {@link JavaRuntimeTests}. + */ +public abstract class RuntimeTests { + protected abstract RuntimeRunner createRuntimeRunner(); + + private final static HashMap testDescriptors = new HashMap<>(); + private final static Map cachedTargetTemplates = new HashMap<>(); + private final static StringRenderer rendered = new StringRenderer(); + + static { + File descriptorsDir = new File(Paths.get(RuntimeTestUtils.resourcePath.toString(), "org/antlr/v4/test/runtime/descriptors").toString()); + File[] directoryListing = descriptorsDir.listFiles(); + assert directoryListing != null; + for (File directory : directoryListing) { + String groupName = directory.getName(); + if (groupName.startsWith(".")) { + continue; // Ignore service directories (like .DS_Store in Mac) + } + + List descriptors = new ArrayList<>(); + + File[] descriptorFiles = directory.listFiles(); + assert descriptorFiles != null; + for (File descriptorFile : descriptorFiles) { + String name = descriptorFile.getName().replace(".txt", ""); + if (name.startsWith(".")) { + continue; + } + + String text; + try { + text = new String(Files.readAllBytes(descriptorFile.toPath())); + } catch (IOException e) { + throw new RuntimeException(e); + } + descriptors.add(RuntimeTestDescriptorParser.parse(name, text, descriptorFile.toURI())); + } + + testDescriptors.put(groupName, descriptors.toArray(new RuntimeTestDescriptor[0])); + } + + for (String key : CustomDescriptors.descriptors.keySet()) { + RuntimeTestDescriptor[] descriptors = CustomDescriptors.descriptors.get(key); + RuntimeTestDescriptor[] existedDescriptors = testDescriptors.putIfAbsent(key, descriptors); + if (existedDescriptors != null) { + testDescriptors.put(key, Stream.concat(Arrays.stream(existedDescriptors), Arrays.stream(descriptors)) + .toArray(RuntimeTestDescriptor[]::new)); + } + } + } + + @TestFactory + @Execution(ExecutionMode.CONCURRENT) + public List runtimeTests() { + List result = new ArrayList<>(); + + for (String group : testDescriptors.keySet()) { + ArrayList descriptorTests = new ArrayList<>(); + RuntimeTestDescriptor[] descriptors = testDescriptors.get(group); + for (RuntimeTestDescriptor descriptor : descriptors) { + descriptorTests.add(dynamicTest(descriptor.name, descriptor.uri, () -> { + try (RuntimeRunner runner = createRuntimeRunner()) { + String errorMessage = test(descriptor, runner); + if (errorMessage != null) { + runner.setSaveTestDir(true); + fail(joinLines("Test: " + descriptor.name + "; " + errorMessage, "Test directory: " + runner.getTempDirPath())); + } + } + })); + } + + Path descriptorGroupPath = Paths.get(RuntimeTestUtils.resourcePath.toString(), "descriptors", group); + result.add(dynamicContainer(group, descriptorGroupPath.toUri(), Arrays.stream(descriptorTests.toArray(new DynamicNode[0])))); + } + + return result; + } + + private static String test(RuntimeTestDescriptor descriptor, RuntimeRunner runner) { + String targetName = runner.getLanguage(); + if (descriptor.ignore(targetName)) { + System.out.println("Ignore " + descriptor); + return null; + } + + FileUtils.mkdir(runner.getTempDirPath()); + + String grammarName = descriptor.grammarName; + String grammar = prepareGrammars(descriptor, runner); + + String lexerName, parserName; + boolean useListenerOrVisitor; + String superClass; + if (descriptor.testType == GrammarType.Parser || descriptor.testType == GrammarType.CompositeParser) { + lexerName = grammarName + "Lexer"; + parserName = grammarName + "Parser"; + useListenerOrVisitor = true; + if (targetName.equals("Java")) { + superClass = JavaRunner.runtimeTestParserName; + } + else { + superClass = null; + } + } + else { + lexerName = grammarName; + parserName = null; + useListenerOrVisitor = false; + if (targetName.equals("Java")) { + superClass = JavaRunner.runtimeTestLexerName; + } + else { + superClass = null; + } + } + + RunOptions runOptions = new RunOptions(grammarName + ".g4", + grammar, + parserName, + lexerName, + useListenerOrVisitor, + useListenerOrVisitor, + descriptor.startRule, + descriptor.input, + false, + descriptor.showDiagnosticErrors, + descriptor.traceATN, + descriptor.showDFA, + Stage.Execute, + targetName, + superClass, + descriptor.predictionMode, + descriptor.buildParseTree + ); + + State result = runner.run(runOptions); + + return assertCorrectOutput(descriptor, targetName, result); + } + + private static String prepareGrammars(RuntimeTestDescriptor descriptor, RuntimeRunner runner) { + String targetName = runner.getLanguage(); + + STGroup targetTemplates; + synchronized (cachedTargetTemplates) { + targetTemplates = cachedTargetTemplates.get(targetName); + if (targetTemplates == null) { + ClassLoader classLoader = RuntimeTests.class.getClassLoader(); + URL templates = classLoader.getResource("org/antlr/v4/test/runtime/templates/" + targetName + ".test.stg"); + assert templates != null; + targetTemplates = new STGroupFile(templates, "UTF-8", '<', '>'); + targetTemplates.registerRenderer(String.class, rendered); + cachedTargetTemplates.put(targetName, targetTemplates); + } + } + + // write out any slave grammars + List> slaveGrammars = descriptor.slaveGrammars; + if (slaveGrammars != null) { + for (Pair spair : slaveGrammars) { + STGroup g = new STGroup('<', '>'); + g.registerRenderer(String.class, rendered); + g.importTemplates(targetTemplates); + ST grammarST = new ST(g, spair.b); + writeFile(runner.getTempDirPath(), spair.a + ".g4", grammarST.render()); + } + } + + STGroup g = new STGroup('<', '>'); + g.importTemplates(targetTemplates); + g.registerRenderer(String.class, rendered); + ST grammarST = new ST(g, descriptor.grammar); + return grammarST.render(); + } + + public static String assertCorrectOutput(RuntimeTestDescriptor descriptor, String targetName, State state) { + ExecutedState executedState; + if (state instanceof ExecutedState) { + executedState = (ExecutedState)state; + if (executedState.exception != null) { + return state.getErrorMessage(); + } + } + else { + return state.getErrorMessage(); + } + + String expectedOutput = descriptor.output; + String expectedParseErrors = descriptor.errors; + + boolean doesOutputEqualToExpected = executedState.output.equals(expectedOutput); + if (!doesOutputEqualToExpected || !executedState.errors.equals(expectedParseErrors)) { + String message; + if (doesOutputEqualToExpected) { + message = "Parse output is as expected, but errors are not: "; + } + else { + message = "Parse output is incorrect: " + + "expectedOutput:<" + expectedOutput + ">; actualOutput:<" + executedState.output + ">; "; + } + + return "[" + targetName + ":" + descriptor.name + "] " + + message + + "expectedParseErrors:<" + expectedParseErrors + ">;" + + "actualParseErrors:<" + executedState.errors + ">."; + } + + return null; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/Stage.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/Stage.java new file mode 100644 index 0000000000..ed807ad6f8 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/Stage.java @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public enum Stage { + Generate, + Compile, + Execute +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamReader.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamReader.java new file mode 100644 index 0000000000..2c84ab9c70 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamReader.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; + +public final class StreamReader implements Runnable { + private final StringBuilder buffer = new StringBuilder(); + private final BufferedReader in; + private final Thread worker; + + public StreamReader(InputStream in) { + this.in = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8) ); + worker = new Thread(this); + } + + public void start() { + worker.start(); + } + + @Override + public void run() { + try { + while (true) { + int c = in.read(); + if (c == -1) { + break; + } + if (c == '\r') { + continue; + } + buffer.append((char) c); + } + } + catch (IOException ioe) { + System.err.println("can't read output from process"); + } + } + + /** wait for the thread to finish */ + public void join() throws InterruptedException { + worker.join(); + } + + @Override + public String toString() { + return buffer.toString(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamVacuum.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamVacuum.java deleted file mode 100644 index d0daa19479..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamVacuum.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; - -public final class StreamVacuum implements Runnable { - private StringBuilder buf = new StringBuilder(); - private BufferedReader in; - private Thread sucker; - public StreamVacuum(InputStream in) { - this.in = new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8) ); - } - public void start() { - sucker = new Thread(this); - sucker.start(); - } - @Override - public void run() { - try { - TestOutputReading.append(in, buf); - } - catch (IOException ioe) { - System.err.println("can't read output from process"); - } - } - /** wait for the thread to finish */ - public void join() throws InterruptedException { - sucker.join(); - } - @Override - public String toString() { - return buf.toString(); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestOutputReading.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/TestOutputReading.java deleted file mode 100644 index 91bb1a61ed..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestOutputReading.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; - -public abstract class TestOutputReading { - public static void append(BufferedReader in, StringBuilder buf) throws IOException { - String line = in.readLine(); - while (line!=null) { - buf.append(line); - // NOTE: This appends a newline at EOF - // regardless of whether or not the - // input actually ended with a - // newline. - // - // We should revisit this and read a - // block at a time rather than a line - // at a time, and change all tests - // which rely on this behavior to - // remove the trailing newline at EOF. - // - // When we fix this, we can remove the - // TestOutputReading class entirely. - buf.append('\n'); - line = in.readLine(); - } - } - - /** - * Read in the UTF-8 bytes at {@code path}, convert all - * platform-specific line terminators to NL, and append NL - * if the file was non-empty and didn't already end with one. - * - * {@see StreamVacuum#run()} for why this method exists. - * - * Returns {@code null} if the file does not exist or the output - * was empty. - */ - public static String read(Path path) throws IOException { - // Mimic StreamVacuum.run()'s behavior of replacing all platform-specific - // EOL sequences with NL. - StringBuilder buf = new StringBuilder(); - try (BufferedReader in = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { - append(in, buf); - } catch (FileNotFoundException | NoSuchFileException e) { - return null; - } - if (buf.length() > 0) { - return buf.toString(); - } else { - return null; - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/TraceATN.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/TraceATN.java new file mode 100644 index 0000000000..067f3ba1c0 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/TraceATN.java @@ -0,0 +1,235 @@ +package org.antlr.v4.test.runtime; + +import org.antlr.runtime.RecognitionException; +import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.State; +import org.antlr.v4.test.runtime.swift.SwiftRunner; +import org.antlr.v4.tool.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; +import static org.junit.jupiter.api.Assertions.fail; + +/** Run a lexer/parser and dump ATN debug/trace information + * + * java org.antlr.v4.test.runtime.TraceATN [X.g4|XParser.g4 XLexer.g4] startRuleName -target [Java|Cpp|...] inputFileName + * + * + * In preparation, run this so we get right jars before trying this script: + * + * cd ANTLR-ROOT-DIR + * mvn install -DskipTests=true + * cd runtime-tests + * mvn install jar:test-jar -DskipTests=true + * + * Run shell script with + * + * scripts/traceatn.sh /tmp/JSON.g4 json /tmp/foo.json + * + * Here is scripts/traceatn.sh: + * + * export ANTLRJAR=/Users/parrt/.m2/repository/org/antlr/antlr4/4.11.2-SNAPSHOT/antlr4-4.11.2-SNAPSHOT-complete.jar + * export TESTJAR=/Users/parrt/.m2/repository/org/antlr/antlr4-runtime-testsuite/4.11.2-SNAPSHOT/antlr4-runtime-testsuite-4.11.2-SNAPSHOT-tests.jar + * java -classpath $ANTLRJAR:$TESTJAR org.antlr.v4.test.runtime.TraceATN $@ + */ +public class TraceATN { + protected static class IgnoreTokenVocabGrammar extends Grammar { + public IgnoreTokenVocabGrammar(String fileName, + String grammarText, + Grammar tokenVocabSource, + ANTLRToolListener listener) + throws RecognitionException + { + super(fileName, grammarText, tokenVocabSource, listener); + } + + @Override + public void importTokensFromTokensFile() { + // don't try to import tokens files; must give me both grammars if split + } + } + + protected String grammarFileName; + protected String parserGrammarFileName; + protected String lexerGrammarFileName; + protected String startRuleName; + protected String inputFileName; + protected String targetName = "Java"; + protected String encoding; + + public TraceATN(String[] args) { + if ( args.length < 2 ) { + System.err.println("java org.antlr.v4.test.runtime.TraceATN [X.g4|XParser.g4 XLexer.g4] startRuleName\n" + + " [-encoding encodingname] -target (Java|Cpp|...) input-filename"); + System.err.println("Omitting input-filename makes program read from stdin."); + return; + } + int i=0; + grammarFileName = args[i]; + i++; + if ( args[i].endsWith(".g4") ) { + parserGrammarFileName = grammarFileName; + lexerGrammarFileName = args[i]; + i++; + grammarFileName = null; + + if ( parserGrammarFileName.toLowerCase().endsWith("lexer.g4") ) { // swap + String save = parserGrammarFileName; + parserGrammarFileName = lexerGrammarFileName; + lexerGrammarFileName = save; + } + } + startRuleName = args[i]; + i++; + while ( i=args.length ) { + System.err.println("missing encoding on -encoding"); + return; + } + encoding = args[i]; + i++; + } + else if ( arg.equals("-target") ) { + if ( i>=args.length ) { + System.err.println("missing name on -target"); + return; + } + targetName = args[i]; + i++; + } + } + } + + public String test(RuntimeTestDescriptor descriptor, RuntimeRunner runner, String targetName) { + FileUtils.mkdir(runner.getTempDirPath()); + + String grammarName = descriptor.grammarName; + String grammar = descriptor.grammar; + + String lexerName, parserName; + boolean useListenerOrVisitor; + String superClass; + if (descriptor.testType == GrammarType.Parser || descriptor.testType == GrammarType.CompositeParser) { + lexerName = grammarName + "Lexer"; + parserName = grammarName + "Parser"; + useListenerOrVisitor = true; + if ( targetName!=null && targetName.equals("Java") ) { + superClass = JavaRunner.runtimeTestParserName; + } + else { + superClass = null; + } + } + else { + lexerName = grammarName; + parserName = null; + useListenerOrVisitor = false; + if (targetName.equals("Java")) { + superClass = JavaRunner.runtimeTestLexerName; + } + else { + superClass = null; + } + } + + RunOptions runOptions = new RunOptions(grammarName + ".g4", + grammar, + parserName, + lexerName, + useListenerOrVisitor, + useListenerOrVisitor, + descriptor.startRule, + descriptor.input, + false, + descriptor.showDiagnosticErrors, + descriptor.traceATN, + descriptor.showDFA, + Stage.Execute, + targetName, + superClass, + PredictionMode.LL, + true + ); + + State result = runner.run(runOptions); + + ExecutedState executedState; + if (result instanceof ExecutedState) { + executedState = (ExecutedState)result; + if (executedState.exception != null) { + return result.getErrorMessage(); + } + return executedState.output; + } + else { + return result.getErrorMessage(); + } + } + + void execParse() throws Exception { + if ( grammarFileName==null && (parserGrammarFileName==null && lexerGrammarFileName==null) ) { + System.err.println("No grammar specified"); + return; + } + + if ( inputFileName==null ) { + System.err.println("No input file specified"); + return; + } + + String grammarName = + grammarFileName.substring(grammarFileName.lastIndexOf('/')+1, grammarFileName.length()); + grammarName = grammarName.substring(0, grammarName.indexOf(".g4")); + if ( grammarFileName!=null ) { + String grammar = new String(Files.readAllBytes(Paths.get(grammarFileName))); + + String input = new String(Files.readAllBytes(Paths.get(inputFileName))); + + RuntimeTestDescriptor descriptor = new RuntimeTestDescriptor( + GrammarType.CompositeParser, + "TraceATN-" + grammarFileName, + "", + input, + "", + "", + startRuleName, + grammarName, + grammar, + null, + false, + true, + false, + PredictionMode.LL, + true, + null, + null); + + RuntimeRunner runner = getRunner(targetName); + + String result = test(descriptor, runner, targetName); + System.out.println(result); + } + } + + public static RuntimeRunner getRunner(String targetName) throws Exception { + Class cl = Class.forName("org.antlr.v4.test.runtime."+ + targetName.toLowerCase() + "." + targetName + "Runner"); + return (RuntimeRunner)cl.newInstance(); + } + + public static void main(String[] args) throws Exception { + TraceATN I = new TraceATN(args); + I.execParse(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/category/LeftRecursionTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/category/LeftRecursionTests.java deleted file mode 100644 index 11c90a9afc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/category/LeftRecursionTests.java +++ /dev/null @@ -1,7 +0,0 @@ -package org.antlr.v4.test.runtime.category; - -/** - * Created by ericvergnaud on 27/06/2017. - */ -public class LeftRecursionTests { -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/category/LexerTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/category/LexerTests.java deleted file mode 100644 index 8e534c0d1f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/category/LexerTests.java +++ /dev/null @@ -1,7 +0,0 @@ -package org.antlr.v4.test.runtime.category; - -/** - * Created by ericvergnaud on 27/06/2017. - */ -public class LexerTests { -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/category/ParserTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/category/ParserTests.java deleted file mode 100644 index 7ad7cd9578..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/category/ParserTests.java +++ /dev/null @@ -1,7 +0,0 @@ -package org.antlr.v4.test.runtime.category; - -/** - * Created by ericvergnaud on 27/06/2017. - */ -public class ParserTests { -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/BaseCppTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/BaseCppTest.java deleted file mode 100644 index fa8ae24cde..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/BaseCppTest.java +++ /dev/null @@ -1,1130 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.DecisionState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public class BaseCppTest implements RuntimeTestSupport { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - // private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public String tmpdir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - private String getPropertyPrefix() { - return "antlr-" + getLanguage().toLowerCase(); - } - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String propName = getPropertyPrefix() + "-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) { - tmpdir = prop; - } - else { - tmpdir = new File(System.getProperty("java.io.tmpdir"), - getClass().getSimpleName()+"-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()).getAbsolutePath(); - } - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if ( g.isLexer() ) { - f = new LexerATNFactory((LexerGrammar)g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - public DFA createDFA(Grammar g, DecisionState s) { -// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); -// DFA dfa = conv.createDFA(); -// conv.issueAmbiguityWarnings(); -// System.out.print("DFA="+dfa); -// return dfa; - return null; - } - -// public void minimizeDFA(DFA dfa) { -// DFAMinimizer dmin = new DFAMinimizer(dfa); -// dfa.minimized = dmin.minimize(); -// } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if ( expecting!=null && !expecting.trim().isEmpty() ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) - { - LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype == Token.EOF ) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if ( t== IntStream.EOF ) { - hitEOF = true; - } - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - List checkRuleDFA(String gtext, String ruleName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; - if ( s==null ) { - System.err.println("no such rule: "+ruleName); - return null; - } - ATNState t = s.transition(0).target; - if ( !(t instanceof DecisionState) ) { - System.out.println(ruleName+" has no decision"); - return null; - } - DecisionState blk = (DecisionState)t; - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - List checkRuleDFA(String gtext, int decision, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - DecisionState blk = atn.decisionToState.get(decision); - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - void checkRuleDFA(Grammar g, DecisionState blk, String expecting) - throws Exception - { - DFA dfa = createDFA(g, blk); - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - } - - List checkLexerDFA(String gtext, String expecting) - throws Exception - { - return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); - } - - List checkLexerDFA(String gtext, String modeName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - LexerGrammar g = new LexerGrammar(gtext, equeue); - g.atn = createATN(g, false); -// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); -// DFA dfa = conv.createDFA(modeName); -// g.setLookaheadDFA(0, dfa); // only one decision to worry about -// -// String result = null; -// if ( dfa!=null ) result = dfa.toString(); -// assertEquals(expecting, result); -// -// return equeue.all; - return null; - } - - protected String getLanguage() { - return "Cpp"; - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) - { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName,"-no-listener"); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.cpp"); - return output; - } - - public ParseTree execStartRule(String startRuleName, Parser parser) - throws IllegalAccessException, InvocationTargetException, - NoSuchMethodException - { - Method startRule = null; - Object[] args = null; - try { - startRule = parser.getClass().getMethod(startRuleName); - } - catch (NoSuchMethodException nsme) { - // try with int _p arg for recursive func - startRule = parser.getClass().getMethod(startRuleName, int.class); - args = new Integer[] {0}; - } - ParseTree result = (ParseTree)startRule.invoke(parser, args); -// System.out.println("parse tree = "+result.toStringTree(parser)); - return result; - } - -// protected String execParser(String grammarFileName, -// String grammarStr, -// String parserName, -// String lexerName, -// String listenerName, -// String visitorName, -// String startRuleName, -// String input, -// boolean debug) { -// return execParser(grammarFileName, grammarStr, parserName, lexerName, -// listenerName, visitorName, startRuleName, input, debug); -// } -// - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - rawBuildRecognizerTestFile(parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - showDiagnosticErrors, - false); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - antlrOnString(getTmpDir(), "Cpp", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".cpp"); - files.add(lexerName+".h"); - } - if ( parserName!=null ) { - files.add(parserName+".cpp"); - files.add(parserName+".h"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.cpp"); - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.h"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.cpp"); - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.h"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean trace) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, trace); - } - } - - public String execRecognizer() { - return execModule("Test.cpp"); - } - - - private static String detectedOS; - public static String getOS() { - if (detectedOS == null) { - String os = System.getProperty("os.name", "generic").toLowerCase(Locale.ENGLISH); - if ((os.indexOf("mac") >= 0) || (os.indexOf("darwin") >= 0)) { - detectedOS = "mac"; - } - else if (os.indexOf("win") >= 0) { - detectedOS = "windows"; - } - else if (os.indexOf("nux") >= 0) { - detectedOS = "linux"; - } - else { - detectedOS = "unknown"; - } - } - return detectedOS; - } - - public List allCppFiles(String path) { - ArrayList files = new ArrayList(); - File folder = new File(path); - File[] listOfFiles = folder.listFiles(); - for (int i = 0; i < listOfFiles.length; i++) { - String file = listOfFiles[i].getAbsolutePath(); - if (file.endsWith(".cpp")) { - files.add(file); - } - } - return files; - } - - private String runProcess(ProcessBuilder builder, String description, boolean showStderr) throws Exception { -// System.out.println("BUILDER: "+builder.command()); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - int errcode = process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - if ( showStderr ) System.err.println(this.stderrDuringParse); - } - if (errcode != 0) { - String err = "execution of '"+description+"' failed with error code: "+errcode; - if ( this.stderrDuringParse!=null ) { - this.stderrDuringParse += err; - } - else { - this.stderrDuringParse = err; - } - } - - return output; - } - - private String runCommand(String command[], String workPath, String description, boolean showStderr) throws Exception { - ProcessBuilder builder = new ProcessBuilder(command); - builder.directory(new File(workPath)); - - return runProcess(builder, description, showStderr); - } - - // TODO: add a buildRuntimeOnWindows variant. - private boolean buildRuntime() { - String runtimePath = locateRuntime(); - System.out.println("Building ANTLR4 C++ runtime (if necessary) at "+ runtimePath); - - try { - String command[] = { "cmake", ".", /*"-DCMAKE_CXX_COMPILER=clang++",*/ "-DCMAKE_BUILD_TYPE=release" }; - if (runCommand(command, runtimePath, "antlr runtime cmake", false) == null) { - return false; - } - } - catch (Exception e) { - System.err.println("can't configure antlr cpp runtime cmake file"); - } - - try { - String command[] = { "make", "-j", "8" }; // Assuming a reasonable amount of available CPU cores. - if (runCommand(command, runtimePath, "building antlr runtime", true) == null) - return false; - } - catch (Exception e) { - System.err.println("can't compile antlr cpp runtime"); - e.printStackTrace(System.err); - try { - String command[] = { "ls", "-la" }; - String output = runCommand(command, runtimePath + "/dist/", "printing library folder content", true); - System.out.println(output); - } - catch (Exception e2) { - System.err.println("can't even list folder content"); - e2.printStackTrace(System.err); - } - } - -/* for debugging - try { - String command[] = { "ls", "-la" }; - String output = runCommand(command, runtimePath + "/dist/", "printing library folder content"); - System.out.println(output); - } - catch (Exception e) { - System.err.println("can't print folder content"); - } -*/ - - return true; - } - - static Boolean runtimeBuiltOnce = false; - - public String execModule(String fileName) { - String runtimePath = locateRuntime(); - String includePath = runtimePath + "/runtime/src"; - String binPath = new File(new File(tmpdir), "a.out").getAbsolutePath(); - String inputPath = new File(new File(tmpdir), "input").getAbsolutePath(); - - // Build runtime using cmake once. - synchronized (runtimeBuiltOnce) { - if ( !runtimeBuiltOnce ) { - try { - String command[] = {"clang++", "--version"}; - String output = runCommand(command, tmpdir, "printing compiler version", false); - System.out.println("Compiler version is: "+output); - } - catch (Exception e) { - System.err.println("Can't get compiler version"); - } - - runtimeBuiltOnce = true; - if ( !buildRuntime() ) { - System.out.println("C++ runtime build failed\n"); - return null; - } - System.out.println("C++ runtime build succeeded\n"); - } - } - - // Create symlink to the runtime. Currently only used on OSX. - String libExtension = (getOS().equals("mac")) ? "dylib" : "so"; - try { - String command[] = { "ln", "-s", runtimePath + "/dist/libantlr4-runtime." + libExtension }; - if (runCommand(command, tmpdir, "sym linking C++ runtime", true) == null) - return null; - } - catch (Exception e) { - System.err.println("can't create link to " + runtimePath + "/dist/libantlr4-runtime." + libExtension); - e.printStackTrace(System.err); - return null; - } - - try { - List command2 = new ArrayList(Arrays.asList("clang++", "-std=c++11", "-I", includePath, "-L.", "-lantlr4-runtime", "-o", "a.out")); - command2.addAll(allCppFiles(tmpdir)); - if (runCommand(command2.toArray(new String[0]), tmpdir, "building test binary", true) == null) { - return null; - } - } - catch (Exception e) { - System.err.println("can't compile test module: " + e.getMessage()); - e.printStackTrace(System.err); - return null; - } - - // Now run the newly minted binary. Reset the error output, as we could have got compiler warnings which are not relevant here. - this.stderrDuringParse = null; - try { - ProcessBuilder builder = new ProcessBuilder(binPath, inputPath); - builder.directory(new File(tmpdir)); - Map env = builder.environment(); - env.put("LD_PRELOAD", runtimePath + "/dist/libantlr4-runtime." + libExtension); - String output = runProcess(builder, "running test binary", false); - if ( output.length()==0 ) { - output = null; - } - - /* for debugging - System.out.println("========================================================="); - System.out.println(output); - System.out.println("========================================================="); - */ - return output; - } - catch (Exception e) { - System.err.println("can't exec module: " + fileName); - e.printStackTrace(System.err); - } - - return null; - } - - protected String locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeURL = loader.getResource("Cpp"); - if (runtimeURL == null) { - throw new RuntimeException("Cannot find runtime"); - } - // Windows not getting runtime right. See: - // http://stackoverflow.com/questions/6164448/convert-url-to-normal-windows-filename-java - // it was coming back "/C:/projects/antlr4-l7imv/runtime-testsuite/target/classes/Cpp" - String p; - try { - p = Paths.get(runtimeURL.toURI()).toFile().toString(); - } - catch (URISyntaxException use) { - p = "Can't find runtime"; - } - return p; - } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, - ANTLRMessage expectedMessage) - throws Exception - { - //System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); - assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); - assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); - /* - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if(!parserStartRuleName.endsWith(")")) - parserStartRuleName += "()"; - ST outputFileST = new ST( - "#include \\\n" - + "\n" - + "#include \"antlr4-runtime.h\"\n" - + "#include \".h\"\n" - + "#include \".h\"\n" - + "\n" - + "using namespace antlr4;\n" - + "\n" - + "class TreeShapeListener : public tree::ParseTreeListener {\n" - + "public:\n" - + " void visitTerminal(tree::TerminalNode *) override {}\n" - + " void visitErrorNode(tree::ErrorNode *) override {}\n" - + " void exitEveryRule(ParserRuleContext *) override {}\n" - + " void enterEveryRule(ParserRuleContext *ctx) override {\n" - + " for (auto child : ctx->children) {\n" - + " tree::ParseTree *parent = child->parent;\n" - + " ParserRuleContext *rule = dynamic_cast\\(parent);\n" - + " if (rule != ctx) {\n" - + " throw \"Invalid parse tree shape detected.\";\n" - + " }\n" - - + " }\n" - + " }\n" - + "};\n" - + "\n" - + "\n" - + "int main(int argc, const char* argv[]) {\n" - + " ANTLRFileStream input(argv[1]);\n" - + " lexer(&input);\n" - + " CommonTokenStream tokens(&lexer);\n" - + "" - + "\n" - + " tree::ParseTree *tree = parser.;\n" - + " TreeShapeListener listener;\n" - + " tree::ParseTreeWalker::DEFAULT.walk(&listener, tree);\n" - + "\n" - + " return 0;\n" - + "}\n" - ); - - String stSource = " parser(&tokens);\n"; - if(debug) { - stSource += " DiagnosticErrorListener errorListener;\n"; - stSource += " parser.addErrorListener(&errorListener);\n"; - } - if(trace) - stSource += " parser.setTrace(true);\n"; - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.cpp", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "#include \\\n" - + "\n" - + "#include \"antlr4-runtime.h\"\n" - + "#include \".h\"\n" - + "\n" - + "#include \"support/StringUtils.h\"\n" - + "\n" - + "using namespace antlr4;\n" - + "\n" - + "int main(int argc, const char* argv[]) {\n" - + " ANTLRFileStream input(argv[1]);\n" - + " lexer(&input);\n" - + " CommonTokenStream tokens(&lexer);\n" - + " tokens.fill();\n" - + " for (auto token : tokens.getTokens())\n" - + " std::cout \\<\\< token->toString() \\<\\< std::endl;\n" - + (showDFA ? " std::cout \\<\\< lexer.getInterpreter\\()->getDFA(Lexer::DEFAULT_MODE).toLexerString();\n" : "\n") - + " return 0;\n" - + "}\n"); - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.cpp", outputFileST.render()); - } - - public void writeRecognizer(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if ( parserName==null ) { - writeLexerTestFile(lexerName, debug); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, - trace); - } - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles(File dir) { - String[] files = dir.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(dir,files[i]).delete(); - } - } - - @Override - public void eraseTempDir() { - boolean doErase = true; - String propName = getPropertyPrefix() + "-erase-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) - doErase = Boolean.getBoolean(prop); - if(doErase) { - File tmpdirF = new File(tmpdir); - if ( tmpdirF.exists() ) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on different JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p=0; - public IntTokenStream(IntegerList types) { this.types = types; } - - @Override - public void consume() { p++; } - - @Override - public int LA(int i) { return LT(i).getType(); } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { return p; } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); - else t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRunner.java new file mode 100644 index 0000000000..52bb286bcb --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRunner.java @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.cpp; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.getOS; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.isWindows; + +/** + * For my own information on I'm recording what I needed to do to get a unit test to compile and run in C++ on the Mac. + * I got a segmentation violation and couldn't figure out how to get information about it, so I turned on debugging + * and then figured out lldb enough to create this issue: https://github.com/antlr/antlr4/issues/3845 on a bug. + * + * cd ~/antlr/code/antlr4/runtime/Cpp + * cmake . -D CMAKE_OSX_ARCHITECTURES="arm64; x86_64" -DCMAKE_BUILD_TYPE=Debug + * make -j 8 + * + * In test dir with generated test code: + * + * clang++ -g -std=c++17 -I /Users/parrt/antlr/code/antlr4/runtime/Cpp/runtime/src -L. -lantlr4-runtime *.cpp + * ./a.out input + * + * $ lldb ./a.out input + * (lldb) run + * ... crash ... + * (lldb) thread backtrace + */ +public class CppRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Cpp"; + } + + @Override + public String getTitleName() { return "C++"; } + + private static final String runtimeSourcePath; + private static final String runtimeBinaryPath; + private static final String runtimeLibraryFileName; + private static String compilerName; + private static final String visualStudioProjectContent; + private static final Map environment; + + static { + String runtimePath = getRuntimePath("Cpp"); + runtimeSourcePath = Paths.get(runtimePath, "runtime", "src").toString(); + + environment = new HashMap<>(); + if (isWindows()) { + runtimeBinaryPath = Paths.get(runtimePath, "runtime", "bin", "vs-2022", "x64", "Release DLL").toString(); + runtimeLibraryFileName = Paths.get(runtimeBinaryPath, "antlr4-runtime.dll").toString(); + String path = System.getenv("PATH"); + environment.put("PATH", path == null ? runtimeBinaryPath : path + ";" + runtimeBinaryPath); + } + else { + runtimeBinaryPath = Paths.get(runtimePath, "dist").toString(); + runtimeLibraryFileName = Paths.get(runtimeBinaryPath, + "libantlr4-runtime." + (getOS() == OSType.Mac ? "dylib" : "so")).toString(); + environment.put("LD_PRELOAD", runtimeLibraryFileName); + } + + if (isWindows()) { + visualStudioProjectContent = RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg"); + } else { + visualStudioProjectContent = null; + } + } + + @Override + protected String getCompilerName() { + if (compilerName == null) { + if (isWindows()) { + compilerName = "MSBuild"; + } + else { + compilerName = "clang++"; + } + } + + return compilerName; + } + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + String runtimePath = getRuntimePath(); + + if (isWindows()) { + String[] command = { + getCompilerPath(), "antlr4cpp-vs2022.vcxproj", "/p:configuration=Release DLL", "/p:platform=x64" + }; + + runCommand(command, runtimePath + "\\runtime","build c++ ANTLR runtime using MSBuild"); + } + else { + // cmake ignores default of OFF and must explicitly say yes or no on tracing arg. grrr... + String trace = "-DTRACE_ATN="+(runOptions.traceATN?"ON":"OFF"); + String[] command = {"cmake", ".", trace, "-DCMAKE_BUILD_TYPE=Release"}; + runCommand(command, runtimePath, "run cmake on antlr c++ runtime"); + + command = new String[] {"make", "-j", Integer.toString(Runtime.getRuntime().availableProcessors())}; + runCommand(command, runtimePath, "run make on antlr c++ runtime"); + } + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + if (isWindows()) { + writeVisualStudioProjectFile(runOptions.grammarName, runOptions.lexerName, runOptions.parserName, + runOptions.useListener, runOptions.useVisitor); + } + + Exception exception = null; + try { + if (!isWindows()) { + String[] linkCommand = new String[]{"ln", "-s", runtimeLibraryFileName}; + runCommand(linkCommand, getTempDirPath(), "sym link C++ runtime"); + } + + List buildCommand = new ArrayList<>(); + buildCommand.add(getCompilerPath()); + if (isWindows()) { + buildCommand.add(getTestFileName() + ".vcxproj"); + buildCommand.add("/p:configuration=Release"); + buildCommand.add("/p:platform=x64"); + } + else { + buildCommand.add("-std=c++17"); + buildCommand.add("-I"); + buildCommand.add(runtimeSourcePath); + buildCommand.add("-L."); + buildCommand.add("-lantlr4-runtime"); + buildCommand.add("-pthread"); + buildCommand.add("-o"); + buildCommand.add(getTestFileName() + ".out"); + buildCommand.add(getTestFileWithExt()); + buildCommand.addAll(generatedState.generatedFiles.stream().map(file -> file.name).collect(Collectors.toList())); + } + + runCommand(buildCommand.toArray(new String[0]), getTempDirPath(), "build test c++ binary"); + } + catch (Exception ex) { + exception = ex; + } + return new CompiledState(generatedState, exception); + } + + private void writeVisualStudioProjectFile(String grammarName, String lexerName, String parserName, + boolean useListener, boolean useVisitor) { + ST projectFileST = new ST(visualStudioProjectContent); + projectFileST.add("runtimeSourcePath", runtimeSourcePath); + projectFileST.add("runtimeBinaryPath", runtimeBinaryPath); + projectFileST.add("grammarName", grammarName); + projectFileST.add("lexerName", lexerName); + projectFileST.add("parserName", parserName); + projectFileST.add("useListener", useListener); + projectFileST.add("useVisitor", useVisitor); + writeFile(getTempDirPath(), "Test.vcxproj", projectFileST.render()); + } + + @Override + public String getRuntimeToolName() { + return null; + } + + @Override + public String getExecFileName() { + return Paths.get(getTempDirPath(), getTestFileName() + "." + (isWindows() ? "exe" : "out")).toString(); + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} + diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRuntimeTests.java new file mode 100644 index 0000000000..a1728674f1 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.cpp; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class CppRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new CppRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeLexers.java deleted file mode 100644 index 182d2536ce..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeLexers.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeParsers.java deleted file mode 100644 index 98dfbc79a5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeParsers.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.codegen.model.Parser; -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestFullContextParsing.java deleted file mode 100644 index 96d1d5b637..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestFullContextParsing.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLeftRecursion.java deleted file mode 100644 index 962be4bbec..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLeftRecursion.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LeftRecursionTests; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LeftRecursionTests.class) -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerErrors.java deleted file mode 100644 index 05307d6f7c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerErrors.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerExec.java deleted file mode 100644 index 9c5cc89a96..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerExec.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestListeners.java deleted file mode 100644 index 75a6d1000c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestListeners.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParseTrees.java deleted file mode 100644 index ca333afc5c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParseTrees.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserErrors.java deleted file mode 100644 index aa8fc80fd4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserErrors.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserExec.java deleted file mode 100644 index 999f3116fa..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserExec.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestPerformance.java deleted file mode 100644 index 7ab384ce56..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestPerformance.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalLexer.java deleted file mode 100644 index 23f90bd83a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalLexer.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalParser.java deleted file mode 100644 index 3b3eb5ca1a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalParser.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSets.java deleted file mode 100644 index a1345ae177..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSets.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.dotnet.csproj b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.dotnet.csproj deleted file mode 100644 index 42a540de51..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.dotnet.csproj +++ /dev/null @@ -1,20 +0,0 @@ - - - - netcoreapp1.0 - $(NoWarn);CS3021 - Test - Exe - Antlr4.Test.dotnet - 1.1.1 - $(PackageTargetFallback);dnxcore50 - false - false - false - false - false - false - false - - - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.mono.csproj b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.mono.csproj deleted file mode 100644 index eb4870943b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.mono.csproj +++ /dev/null @@ -1,51 +0,0 @@ - - - - Debug - AnyCPU - {EDC70A11-C4C1-4209-93A6-CCE2B19E8E95} - Exe - Antlr4.Test.mono - Test - Test - v3.5 - - - true - full - false - bin\Debug - DEBUG; - prompt - 4 - true - - - true - bin\Release - prompt - 4 - true - - - - - - - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF} - Antlr4.Runtime.mono - - - - - - - - - - - Test.exe.config - Always - - - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.vs2013.csproj b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.vs2013.csproj deleted file mode 100644 index bab5315d5b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.vs2013.csproj +++ /dev/null @@ -1,44 +0,0 @@ - - - Debug - AnyCPU - {EDC70A11-C4C1-4209-93A6-CCE2B19E8E95} - Exe - Antlr4.Test.mono - Test - Test - v3.5 - - - true - full - false - bin\Debug - DEBUG; - prompt - 4 - true - - - true - bin\Release - prompt - 4 - true - - - - - - - - - - - - - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF} - Antlr4.Runtime.vs2013 - - - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/App.config b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/App.config deleted file mode 100644 index 43157903b5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/App.config +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/AssemblyInfo.cs b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/AssemblyInfo.cs deleted file mode 100644 index f674086256..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/AssemblyInfo.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System; -using System.Reflection; -using System.Runtime.CompilerServices; - -// Information about this assembly is defined by the following attributes. -// Change them to the values specific to your project. - -[assembly: AssemblyTitle ("Antlr4.Test.mono")] -[assembly: AssemblyDescription ("")] -[assembly: AssemblyConfiguration ("")] -[assembly: AssemblyCompany ("")] -[assembly: AssemblyProduct ("")] -[assembly: AssemblyCopyright ("ericvergnaud")] -[assembly: AssemblyTrademark ("")] -[assembly: AssemblyCulture ("")] -[assembly: CLSCompliant (true)] -// The assembly version has the format "{Major}.{Minor}.{Build}.{Revision}". -// The form "{Major}.{Minor}.*" will automatically update the build and revision, -// and "{Major}.{Minor}.{Build}.*" will update just the revision. - -[assembly: AssemblyVersion ("1.0.*")] - -// The following attributes are used to specify the signing key for the assembly, -// if desired. See the Mono documentation for more information about signing. - -//[assembly: AssemblyDelaySign(false)] -//[assembly: AssemblyKeyFile("")] - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/BaseCSharpTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/BaseCSharpTest.java deleted file mode 100644 index 9ad30452b6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/BaseCSharpTest.java +++ /dev/null @@ -1,913 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.Tool; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.test.runtime.TestOutputReading; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.stringtemplate.v4.ST; -import org.w3c.dom.Document; -import org.w3c.dom.Element; - -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.transform.OutputKeys; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; -import javax.xml.xpath.XPathConstants; -import javax.xml.xpath.XPathExpression; -import javax.xml.xpath.XPathFactory; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.URL; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public class BaseCSharpTest implements RuntimeTestSupport { - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - /** - * When {@code true}, on Linux will call dotnet cli toolchain, otherwise - * will continue to use mono - */ - public static final boolean NETSTANDARD = Boolean.parseBoolean(System.getProperty("antlr-csharp-netstandard")); - - /** - * When the {@code antlr.preserve-test-dir} runtime property is set to - * {@code true}, the temporary directories created by the test run will not - * be removed at the end of the test run, even for tests that completed - * successfully. - * - *

    - * The default behavior (used in all other cases) is removing the temporary - * directories for all tests which completed successfully, and preserving - * the directories for tests which failed.

    - */ - public static final boolean PRESERVE_TEST_DIR = Boolean.parseBoolean(System.getProperty("antlr-preserve-csharp-test-dir")); - - /** - * The base test directory is the directory where generated files get placed - * during unit test execution. - * - *

    - * The default value for this property is the {@code java.io.tmpdir} system - * property, and can be overridden by setting the - * {@code antlr.java-test-dir} property to a custom location. Note that the - * {@code antlr.java-test-dir} property directly affects the - * {@link #CREATE_PER_TEST_DIRECTORIES} value as well.

    - */ - public static final String BASE_TEST_DIR; - - /** - * When {@code true}, a temporary directory will be created for each test - * executed during the test run. - * - *

    - * This value is {@code true} when the {@code antlr.java-test-dir} system - * property is set, and otherwise {@code false}.

    - */ - public static final boolean CREATE_PER_TEST_DIRECTORIES; - - static { - String baseTestDir = System.getProperty("antlr-csharp-test-dir"); - boolean perTestDirectories = false; - if (baseTestDir == null || baseTestDir.isEmpty()) { - baseTestDir = System.getProperty("java.io.tmpdir"); - perTestDirectories = true; - } - - if (!new File(baseTestDir).isDirectory()) { - throw new UnsupportedOperationException("The specified base test directory does not exist: " + baseTestDir); - } - - BASE_TEST_DIR = baseTestDir; - CREATE_PER_TEST_DIRECTORIES = perTestDirectories; - } - - public String tmpdir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - @Override - public void testSetUp() throws Exception { - if (CREATE_PER_TEST_DIRECTORIES) { - // new output dir for each test - String testDirectory = getClass().getSimpleName() + "-"+Thread.currentThread().getName()+ "-" + System.currentTimeMillis(); - tmpdir = new File(BASE_TEST_DIR, testDirectory).getAbsolutePath(); - } - else { - tmpdir = new File(BASE_TEST_DIR).getAbsolutePath(); - if (!PRESERVE_TEST_DIR && new File(tmpdir).exists()) { - eraseDirectory(new File(tmpdir)); - } - } - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); - return tool; - } - - protected String load(String fileName, String encoding) - throws IOException - { - if ( fileName==null ) { - return null; - } - - String fullFileName = getClass().getPackage().getName().replace('.', '/') + '/' + fileName; - int size = 65000; - InputStreamReader isr; - InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); - if ( encoding!=null ) { - isr = new InputStreamReader(fis, encoding); - } - else { - isr = new InputStreamReader(fis); - } - try { - char[] data = new char[size]; - int n = isr.read(data); - return new String(data, 0, n); - } - finally { - isr.close(); - } - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) - { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) - { - boolean success = rawGenerateRecognizer(grammarFileName, - grammarStr, - null, - lexerName); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - addSourceFiles("Test.cs"); - if(!compile()) { - System.err.println("Failed to compile!"); - return stderrDuringParse; - } - String output = execTest(); - if ( output!=null && output.length()==0 ) { - output = null; - } - return output; - } - - Set sourceFiles = new HashSet(); - - private void addSourceFiles(String ... files) { - for(String file : files) - this.sourceFiles.add(file); - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - return rawExecRecognizer(parserName, - lexerName, - startRuleName, - showDiagnosticErrors); - } - - /** Return true if all is well */ - protected boolean rawGenerateRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = antlrOnString(getTmpDir(), "CSharp", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".cs"); - } - if ( parserName!=null ) { - files.add(parserName+".cs"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName+"Listener.cs"); - files.add(grammarName+"BaseListener.cs"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName+"Visitor.cs"); - files.add(grammarName+"BaseVisitor.cs"); - } - } - addSourceFiles(files.toArray(new String[files.size()])); - return true; - } - - protected String rawExecRecognizer(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - parserStartRuleName, - debug); - } - - addSourceFiles("Test.cs"); - return execRecognizer(); - } - - public String execRecognizer() { - boolean success = compile(); - assertTrue(success); - - String output = execTest(); - if ( output!=null && output.length()==0 ) { - output = null; - } - return output; - } - - public boolean compile() { - if(!NETSTANDARD) { - try { - if(!createProject()) - return false; - if(!buildProject()) - return false; - return true; - } catch(Exception e) { - e.printStackTrace(System.err); - return false; - } - } - else - { - try { - return buildDotnetProject(); - } catch(Exception e) { - e.printStackTrace(System.err); - return false; - } - } - } - - private File getTestProjectFile() { - return new File(tmpdir, "Antlr4.Test.mono.csproj"); - } - - private boolean buildProject() throws Exception { - String msbuild = locateMSBuild(); - String[] args = { - msbuild, - "/p:Configuration=Release", - getTestProjectFile().getAbsolutePath() - }; -// System.err.println("Starting build "+ Utils.join(args, " ")); - ProcessBuilder pb = new ProcessBuilder(args); - pb.directory(new File(tmpdir)); - Process process = pb.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - // xbuild sends errors to output, so check exit code - int exitValue = process.exitValue(); - boolean success = (exitValue == 0); - if ( !success ) { - this.stderrDuringParse = stdoutVacuum.toString(); - String stderrString = stderrVacuum.toString(); - System.err.println("buildProject command: " + Utils.join(args, " ")); - System.err.println("buildProject exitValue: " + exitValue); - System.err.println("buildProject stdout: " + stderrDuringParse); - System.err.println("buildProject stderr: " + stderrString); - } - return success; - } - - private String locateMSBuild() { - if(isWindows()) - return "\"C:\\Program Files (x86)\\MSBuild\\12.0\\Bin\\MSBuild.exe\""; - else - return locateTool("xbuild"); - } - - private boolean isWindows() { - return System.getProperty("os.name").toLowerCase().contains("windows"); - } - - private String locateExec() { - if (!NETSTANDARD) - return new File(tmpdir, "bin/Release/Test.exe").getAbsolutePath(); - - return new File(tmpdir, "bin/Release/netcoreapp1.0/Test.dll").getAbsolutePath(); - } - - private String locateTool(String tool) { - String[] roots = { "/opt/local/bin/", "/usr/local/bin/", "/usr/bin/" }; - for(String root : roots) { - if(new File(root + tool).exists()) - return root + tool; - } - throw new RuntimeException("Could not locate " + tool); - } - - public boolean createProject() { - try { - String pack = BaseCSharpTest.class.getPackage().getName().replace(".", "/") + "/"; - // save auxiliary files - saveResourceAsFile(pack + "AssemblyInfo.cs", new File(tmpdir, "AssemblyInfo.cs")); - saveResourceAsFile(pack + "App.config", new File(tmpdir, "App.config")); - // update project - String projectName = isWindows() ? "Antlr4.Test.vs2013.csproj" : "Antlr4.Test.mono.csproj"; - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - InputStream input = loader.getResourceAsStream(pack + projectName); - Document prjXml = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(input); - // update runtime project reference - // find project file as a resource not relative pathname (now that we've merged repos) - String runtimeName = isWindows() ? "Antlr4.Runtime.vs2013.csproj" : "Antlr4.Runtime.mono.csproj"; - final URL runtimeProj = loader.getResource("CSharp/runtime/CSharp/Antlr4.Runtime/"+runtimeName); - if ( runtimeProj==null ) { - throw new RuntimeException("C# runtime project file not found!"); - } - String runtimeProjPath = runtimeProj.getPath(); - if(isWindows()){ - runtimeProjPath = runtimeProjPath.replaceFirst("/", ""); - } - XPathExpression exp = XPathFactory.newInstance().newXPath() - .compile("/Project/ItemGroup/ProjectReference[@Include='" + runtimeName + "']"); - Element node = (Element)exp.evaluate(prjXml, XPathConstants.NODE); - node.setAttribute("Include", runtimeProjPath.replace("/", "\\")); - // update project file list - exp = XPathFactory.newInstance().newXPath().compile("/Project/ItemGroup[Compile/@Include='AssemblyInfo.cs']"); - Element group = (Element)exp.evaluate(prjXml, XPathConstants.NODE); - if(group==null) - return false; - // remove existing children - while(group.hasChildNodes()) - group.removeChild(group.getFirstChild()); - // add AssemblyInfo.cs, not a generated source - sourceFiles.add("AssemblyInfo.cs"); - // add files to compile - for(String file : sourceFiles) { - Element elem = group.getOwnerDocument().createElement("Compile"); - elem.setAttribute("Include", file); - group.appendChild(elem); - } - // save project - File prjFile = getTestProjectFile(); - Transformer transformer = TransformerFactory.newInstance().newTransformer(); - transformer.setOutputProperty(OutputKeys.INDENT, "yes"); - transformer.transform(new DOMSource(prjXml), new StreamResult(prjFile)); - return true; - } - catch(Exception e) { - e.printStackTrace(System.err); - return false; - } - } - - public boolean buildDotnetProject() { - try { - // save auxiliary files - String pack = BaseCSharpTest.class.getPackage().getName().replace(".", "/") + "/"; - saveResourceAsFile(pack + "Antlr4.Test.dotnet.csproj", new File(tmpdir, "Antlr4.Test.dotnet.csproj")); - - // find runtime package - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeProj = loader.getResource("CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj"); - if ( runtimeProj==null ) { - throw new RuntimeException("C# runtime project file not found!"); - } - File runtimeProjFile = new File(runtimeProj.getFile()); - String runtimeProjPath = runtimeProjFile.getPath(); - - // add Runtime project reference - String dotnetcli = locateTool("dotnet"); - String[] args = new String[] { - dotnetcli, - "add", - "Antlr4.Test.dotnet.csproj", - "reference", - runtimeProjPath - }; - boolean success = runProcess(args, tmpdir); - assertTrue(success); - - // restore project - args = new String[] { - dotnetcli, - "restore", - "Antlr4.Test.dotnet.csproj", - "--no-dependencies" - }; - success = runProcess(args, tmpdir); - assertTrue(success); - - // build test - args = new String[] { - dotnetcli, - "build", - "Antlr4.Test.dotnet.csproj", - "-c", - "Release", - "--no-dependencies" - }; - success = runProcess(args, tmpdir); - assertTrue(success); - } - catch(Exception e) { - e.printStackTrace(System.err); - return false; - } - - return true; - } - - private boolean runProcess(String[] args, String path) throws Exception { - return runProcess(args, path, 0); - } - - private boolean runProcess(String[] args, String path, int retries) throws Exception { - ProcessBuilder pb = new ProcessBuilder(args); - pb.directory(new File(path)); - Process process = pb.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - int exitValue = process.exitValue(); - boolean success = (exitValue == 0); - if ( !success ) { - this.stderrDuringParse = stderrVacuum.toString(); - System.err.println("runProcess command: " + Utils.join(args, " ")); - System.err.println("runProcess exitValue: " + exitValue); - System.err.println("runProcess stdoutVacuum: " + stdoutVacuum.toString()); - System.err.println("runProcess stderrVacuum: " + stderrDuringParse); - } - if (exitValue == 132) { - // Retry after SIGILL. We are seeing this intermittently on - // macOS (issue #2078). - if (retries < 3) { - System.err.println("runProcess retrying; " + retries + - " retries so far"); - return runProcess(args, path, retries + 1); - } - else { - System.err.println("runProcess giving up after " + retries + - " retries"); - return false; - } - } - return success; - } - - private void saveResourceAsFile(String resourceName, File file) throws IOException { - InputStream input = Thread.currentThread().getContextClassLoader().getResourceAsStream(resourceName); - if ( input==null ) { - System.err.println("Can't find " + resourceName + " as resource"); - throw new IOException("Missing resource:" + resourceName); - } - OutputStream output = new FileOutputStream(file.getAbsolutePath()); - while(input.available()>0) { - output.write(input.read()); - } - output.close(); - input.close(); - } - - public String execTest() { - String exec = locateExec(); - try { - File tmpdirFile = new File(tmpdir); - Path output = tmpdirFile.toPath().resolve("output"); - Path errorOutput = tmpdirFile.toPath().resolve("error-output"); - String[] args = getExecTestArgs(exec, output, errorOutput); - ProcessBuilder pb = new ProcessBuilder(args); - pb.directory(tmpdirFile); - Process process = pb.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String writtenOutput = TestOutputReading.read(output); - this.stderrDuringParse = TestOutputReading.read(errorOutput); - int exitValue = process.exitValue(); - String stdoutString = stdoutVacuum.toString().trim(); - String stderrString = stderrVacuum.toString().trim(); - if (exitValue != 0) { - System.err.println("execTest command: " + Utils.join(args, " ")); - System.err.println("execTest exitValue: " + exitValue); - } - if (!stdoutString.isEmpty()) { - System.err.println("execTest stdoutVacuum: " + stdoutString); - } - if (!stderrString.isEmpty()) { - System.err.println("execTest stderrVacuum: " + stderrString); - } - return writtenOutput; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String[] getExecTestArgs(String exec, Path output, Path errorOutput) { - if ( isWindows() ) { - return new String[]{ - exec, new File(tmpdir, "input").getAbsolutePath(), - output.toAbsolutePath().toString(), - errorOutput.toAbsolutePath().toString() - }; - } - else { - if (!NETSTANDARD) { - String mono = locateTool("mono"); - return new String[] { - mono, exec, new File(tmpdir, "input").getAbsolutePath(), - output.toAbsolutePath().toString(), - errorOutput.toAbsolutePath().toString() - }; - } - - String dotnet = locateTool("dotnet"); - return new String[] { - dotnet, exec, new File(tmpdir, "input").getAbsolutePath(), - output.toAbsolutePath().toString(), - errorOutput.toAbsolutePath().toString() - }; - } - } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeParserTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug) - { - ST outputFileST = new ST( - "using System;\n" + - "using Antlr4.Runtime;\n" + - "using Antlr4.Runtime.Tree;\n" + - "using System.IO;\n" + - "using System.Text;\n" + - "\n" + - "public class Test {\n" + - " public static void Main(string[] args) {\n" + - " var input = CharStreams.fromPath(args[0]);\n" + - " using (FileStream fsOut = new FileStream(args[1], FileMode.Create, FileAccess.Write))\n" + - " using (FileStream fsErr = new FileStream(args[2], FileMode.Create, FileAccess.Write))\n" + - " using (TextWriter output = new StreamWriter(fsOut),\n" + - " errorOutput = new StreamWriter(fsErr)) {\n" + - " lex = new (input, output, errorOutput);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " \n"+ - " parser.BuildParseTree = true;\n" + - " ParserRuleContext tree = parser.();\n" + - " ParseTreeWalker.Default.Walk(new TreeShapeListener(), tree);\n" + - " }\n" + - " }\n" + - "}\n" + - "\n" + - "class TreeShapeListener : IParseTreeListener {\n" + - " public void VisitTerminal(ITerminalNode node) { }\n" + - " public void VisitErrorNode(IErrorNode node) { }\n" + - " public void ExitEveryRule(ParserRuleContext ctx) { }\n" + - "\n" + - " public void EnterEveryRule(ParserRuleContext ctx) {\n" + - " for (int i = 0; i \\< ctx.ChildCount; i++) {\n" + - " IParseTree parent = ctx.GetChild(i).Parent;\n" + - " if (!(parent is IRuleNode) || ((IRuleNode)parent).RuleContext != ctx) {\n" + - " throw new Exception(\"Invalid parse tree shape detected.\");\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - ST createParserST = new ST(" parser = new (tokens, output, errorOutput);\n"); - if ( debug ) { - createParserST = - new ST( - " parser = new (tokens, output, errorOutput);\n" + - " parser.AddErrorListener(new DiagnosticErrorListener());\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.cs", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "using System;\n" + - "using Antlr4.Runtime;\n" + - "using System.IO;\n" + - "using System.Text;\n" + - "\n" + - "public class Test {\n" + - " public static void Main(string[] args) {\n" + - " var input = CharStreams.fromPath(args[0]);\n" + - " using (FileStream fsOut = new FileStream(args[1], FileMode.Create, FileAccess.Write))\n" + - " using (FileStream fsErr = new FileStream(args[2], FileMode.Create, FileAccess.Write))\n" + - " using (TextWriter output = new StreamWriter(fsOut),\n" + - " errorOutput = new StreamWriter(fsErr)) {\n" + - " lex = new (input, output, errorOutput);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " tokens.Fill();\n" + - " foreach (object t in tokens.GetTokens())\n" + - " output.WriteLine(t);\n" + - (showDFA?" output.Write(lex.Interpreter.GetDFA(Lexer.DEFAULT_MODE).ToLexerString());\n":"")+ - " }\n" + - "}\n" + - "}" - ); - - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.cs", outputFileST.render()); - } - - public void writeRecognizerAndCompile(String parserName, String lexerName, - String parserStartRuleName, - boolean debug) { - if ( parserName==null ) { - writeLexerTestFile(lexerName, debug); - } - else { - writeParserTestFile(parserName, - lexerName, - parserStartRuleName, - debug); - } - - addSourceFiles("Test.cs"); - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseDirectory(File dir) { - File[] files = dir.listFiles(); - if (files != null) { - for (File file : files) { - if (file.isDirectory()) { - eraseDirectory(file); - } - else { - file.delete(); - } - } - } - dir.delete(); - } - - @Override - public void eraseTempDir() { - if (!PRESERVE_TEST_DIR) { - File tmpdirF = new File(tmpdir); - if ( tmpdirF.exists() ) { - eraseDirectory(tmpdirF); - tmpdirF.delete(); - } - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } - - protected static void assertEquals(String msg, int a, int b) { - org.junit.Assert.assertEquals(msg, a, b); - } - - protected static void assertEquals(String a, String b) { - org.junit.Assert.assertEquals(a, b); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRunner.java new file mode 100644 index 0000000000..e3ffec5b56 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRunner.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.csharp; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.nio.file.Paths; + +import static org.antlr.v4.test.runtime.FileUtils.mkdir; +import static org.antlr.v4.test.runtime.FileUtils.writeFile; + +public class CSharpRunner extends RuntimeRunner { + @Override + public String getLanguage() { return "CSharp"; } + + @Override + public String getTitleName() { return "C#"; } + + @Override + public String getExtension() { return "cs"; } + + @Override + public String getRuntimeToolName() { return "dotnet"; } + + @Override + public String getExecFileName() { return getTestFileName() + ".dll"; } + + private final static String testProjectFileName = "Antlr4.Test.csproj"; + private final static String cSharpAntlrRuntimeDllName = + Paths.get(getCachePath("CSharp"), "Antlr4.Runtime.Standard.dll").toString(); + + private final static String cSharpTestProjectContent; + + static { + ST projectTemplate = new ST(RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg")); + projectTemplate.add("runtimeLibraryPath", cSharpAntlrRuntimeDllName); + cSharpTestProjectContent = projectTemplate.render(); + } + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + String cachePath = getCachePath(); + mkdir(cachePath); + String projectPath = Paths.get(getRuntimePath(), "src", "Antlr4.csproj").toString(); + String[] args = new String[]{getRuntimeToolPath(), "build", projectPath, "-c", "Release", "-o", cachePath}; + runCommand(args, cachePath, "build " + getTitleName() + " ANTLR runtime"); + } + + @Override + public CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + Exception exception = null; + try { + writeFile(getTempDirPath(), testProjectFileName, cSharpTestProjectContent); + runCommand(new String[]{getRuntimeToolPath(), "build", testProjectFileName, "-c", "Release"}, getTempDirPath(), + "build C# test binary"); + } catch (Exception e) { + exception = e; + } + return new CompiledState(generatedState, exception); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRuntimeTests.java new file mode 100644 index 0000000000..3d466ae259 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.csharp; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class CSharpRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new CSharpRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeLexers.java deleted file mode 100644 index 69ca0ca0f5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeLexers.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeParsers.java deleted file mode 100644 index 930d408d5d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeParsers.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestFullContextParsing.java deleted file mode 100644 index 543674a978..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestFullContextParsing.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLeftRecursion.java deleted file mode 100644 index b5e9994bc5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLeftRecursion.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LeftRecursionTests; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LeftRecursionTests.class) -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerErrors.java deleted file mode 100644 index a49ec51e9c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerErrors.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerExec.java deleted file mode 100644 index 7ba1801190..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerExec.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestListeners.java deleted file mode 100644 index e8d5999981..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestListeners.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParseTrees.java deleted file mode 100644 index 22110fa23a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParseTrees.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserErrors.java deleted file mode 100644 index b7a2463f05..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserErrors.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserExec.java deleted file mode 100644 index 2561dbf107..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserExec.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestPerformance.java deleted file mode 100644 index 108ea1878d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestPerformance.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalLexer.java deleted file mode 100644 index f2fa2d2a33..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalLexer.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalParser.java deleted file mode 100644 index 5ecab67c89..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalParser.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSets.java deleted file mode 100644 index 1b970e30b4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSets.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRunner.java new file mode 100644 index 0000000000..9ef7889b5b --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRunner.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.dart; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.io.*; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class DartRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Dart"; + } + + private static String cacheDartPackageConfig; + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + String cachePath = getCachePath(); + mkdir(cachePath); + + ST projectTemplate = new ST(RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg")); + projectTemplate.add("runtimePath", getRuntimePath()); + + writeFile(cachePath, "pubspec.yaml", projectTemplate.render()); + + runCommand(new String[]{getRuntimeToolPath(), "pub", "get"}, cachePath); + + cacheDartPackageConfig = readFile(cachePath + FileSeparator + ".dart_tool", "package_config.json"); + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + String dartToolDirPath = new File(getTempDirPath(), ".dart_tool").getAbsolutePath(); + mkdir(dartToolDirPath); + writeFile(dartToolDirPath, "package_config.json", cacheDartPackageConfig); + + return new CompiledState(generatedState, null); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRuntimeTests.java new file mode 100644 index 0000000000..b049f7d0c3 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.dart; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class DartRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new DartRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeLexersDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeLexersDescriptors.java deleted file mode 100644 index 878b9e1f2b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeLexersDescriptors.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.test.runtime.BaseCompositeLexerTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -import java.util.ArrayList; -import java.util.List; - -public class CompositeLexersDescriptors { - public static class LexerDelegatorInvokesDelegateRule extends BaseCompositeLexerTestDescriptor { - public String input = "abc"; - /** - S.A - [@0,0:0='a',<3>,1:0] - [@1,1:1='b',<1>,1:1] - [@2,2:2='c',<4>,1:2] - [@3,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "M"; - - /** - lexer grammar M; - import S; - B : 'b'; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - lexer grammar S; - A : 'a' {}; - C : 'c' ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",slaveGrammarS)); - - return slaves; - } - } - - public static class LexerDelegatorRuleOverridesDelegate extends BaseCompositeLexerTestDescriptor { - public String input = "ab"; - /** - M.A - [@0,0:1='ab',<1>,1:0] - [@1,2:1='',<-1>,1:2] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "M"; - - /** - lexer grammar M; - import S; - A : 'a' B {} ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - lexer grammar S; - A : 'a' {} ; - B : 'b' {} ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",slaveGrammarS)); - - return slaves; - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeParsersDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeParsersDescriptors.java deleted file mode 100644 index 5d2f398e82..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/CompositeParsersDescriptors.java +++ /dev/null @@ -1,587 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.test.runtime.BaseCompositeParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -import java.util.ArrayList; -import java.util.List; - -public class CompositeParsersDescriptors { - public static class BringInLiteralsFromDelegate extends BaseCompositeParserTestDescriptor { - public String input = "=a"; - public String output = "S.a\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : a ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - a : '=' 'a' {}; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class CombinedImportsCombined extends BaseCompositeParserTestDescriptor { - public String input = "x 34 9"; - public String output = "S.x\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : x INT; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - tokens { A, B, C } - x : 'x' INT {}; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatesSeeSameTokenType extends BaseCompositeParserTestDescriptor { - public String input = "aa"; - /** - S.x - T.y - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - // The lexer will create rules to match letters a, b, c. - // The associated token types A, B, C must have the same value - // and all import'd parsers. Since ANTLR regenerates all imports - // for use with the delegator M, it can generate the same token type - // mapping in each parser: - // public static final int C=6; - // public static final int EOF=-1; - // public static final int B=5; - // public static final int WS=7; - // public static final int A=4; - grammar M; - import S,T; - s : x y ; // matches AA, which should be 'aa' - B : 'b' ; // another order: B, A, C - A : 'a' ; - C : 'c' ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar T; - tokens { C, B, A } // reverse order - y : A {}; - */ - @CommentHasStringValue - public String slaveGrammarT; - /** - parser grammar S; - tokens { A, B, C } - x : A {}; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("T",stringIndentation(slaveGrammarT))); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorAccessesDelegateMembers extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = "foo\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; // uses no rules from the import - import S; - s : 'b' {} ; // gS is import pointer - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - @parser::members { - - } - a : B; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorInvokesDelegateRule extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = "S.a\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : a ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - a : B {}; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorInvokesDelegateRuleWithArgs extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = "S.a1000\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : label=a[3] {} ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - a[] returns [] : B {} {$y=1000;} ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorInvokesDelegateRuleWithReturnStruct extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = "S.ab\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : a {} ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - a : B {} ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorInvokesFirstVersionOfDelegateRule extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = "S.a\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S,T; - s : a ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar T; - a : B {}; - */ - @CommentHasStringValue - public String slaveGrammarT; - /** - parser grammar S; - a : b {}; - b : B; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("T",stringIndentation(slaveGrammarT))); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorRuleOverridesDelegate extends BaseCompositeParserTestDescriptor { - public String input = "c"; - public String output = "S.a\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "M"; - - /** - grammar M; - import S; - b : 'b'|'c'; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - a : b {}; - b : B ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorRuleOverridesDelegates extends BaseCompositeParserTestDescriptor { - public String input = "c"; - /** - M.b - S.a - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "a"; - public String grammarName = "M"; - - /** - grammar M; - import S, T; - b : 'b'|'c' {}|B|A; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar T; - tokens { A } - b : 'b' {}; - */ - @CommentHasStringValue - public String slaveGrammarT; - /** - parser grammar S; - a : b {}; - b : 'b' ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("T",stringIndentation(slaveGrammarT))); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class DelegatorRuleOverridesLookaheadInDelegate extends BaseCompositeParserTestDescriptor { - public String input = "float x = 3;"; - public String output = "JavaDecl: floatx=3;\n"; - public String errors = null; - public String startRule = "prog"; - public String grammarName = "M"; - - /** - grammar M; - import S; - prog : decl ; - type_ : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - type_ : 'int' ; - decl : type_ ID ';' - | type_ ID init_ ';' {}; - init_ : '=' INT; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - /* - * This is a regression test for antlr/antlr4#248 "Including grammar with only - * fragments breaks generated lexer". - * https://github.com/antlr/antlr4/issues/248 - */ - public static class ImportLexerWithOnlyFragmentRules extends BaseCompositeParserTestDescriptor { - public String input = "test test"; - public String output = null; - public String errors = null; - public String startRule = "program"; - public String grammarName = "Test"; - - /** - grammar Test; - import Unicode; - - program : 'test' 'test'; - - WS : (UNICODE_CLASS_Zs)+ -> skip; - - */ - @CommentHasStringValue - public String grammar; - - /** - lexer grammar Unicode; - - fragment - UNICODE_CLASS_Zs : '\u0020' | '\u00A0' | '\u1680' | '\u180E' - | '\u2000'..'\u200A' - | '\u202F' | '\u205F' | '\u3000' - ; - - */ - @CommentHasStringValue - public String slaveGrammarUnicode; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("Unicode",stringIndentation(slaveGrammarUnicode))); - - return slaves; - } - } - - public static class ImportedGrammarWithEmptyOptions extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = null; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : a ; - B : 'b' ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - options {} - a : B ; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class ImportedRuleWithAction extends BaseCompositeParserTestDescriptor { - public String input = "b"; - public String output = null; - public String errors = null; - public String startRule = "s"; - public String grammarName = "M"; - - /** - grammar M; - import S; - s : a; - B : 'b'; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - parser grammar S; - a @after {} : B; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } - - public static class KeywordVSIDOrder extends BaseCompositeParserTestDescriptor { - public String input = "abc"; - /** - M.A - M.a: [@0,0:2='abc',<1>,1:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "a"; - public String grammarName = "M"; - - /** - grammar M; - import S; - a : A {}; - A : 'abc' {}; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - /** - lexer grammar S; - ID : 'a'..'z'+; - */ - @CommentHasStringValue - public String slaveGrammarS; - - @Override - public List> getSlaveGrammars() { - List> slaves = new ArrayList>(); - slaves.add(new Pair("S",stringIndentation(slaveGrammarS))); - - return slaves; - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/FullContextParsingDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/FullContextParsingDescriptors.java deleted file mode 100644 index 3453c1ae01..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/FullContextParsingDescriptors.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseDiagnosticParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class FullContextParsingDescriptors { - public static class AmbigYieldsCtxSensitiveDFA extends BaseDiagnosticParserTestDescriptor { - public String input = "abc"; - /** - Decision 0: - s0-ID->:s1^=>1 - */ - @CommentHasStringValue - public String output; - - public String errors = "line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} - : ID | ID {} ; - ID : 'a'..'z'+; - WS : (' '|'\t'|'\n')+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - } - - public static class AmbiguityNoLoop extends BaseDiagnosticParserTestDescriptor { - public String input = "a@"; - public String output = "alt 1\n"; - /** - line 1:2 reportAttemptingFullContext d=0 (prog), input='a@' - line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@' - line 1:2 reportAttemptingFullContext d=1 (expr), input='a@' - line 1:2 reportContextSensitivity d=1 (expr), input='a@' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "prog"; - public String grammarName = "T"; - - /** - grammar T; - prog - @init {} - : expr expr {} - | expr - ; - expr: '@' - | ID '@' - | ID - ; - ID : [a-z]+ ; - WS : [ \r\n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - } - - public static class CtxSensitiveDFATwoDiffInput extends BaseDiagnosticParserTestDescriptor { - public String input = "$ 34 abc @ 34 abc"; - /** - Decision 2: - s0-INT->s1 - s1-ID->:s2^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:5 reportAttemptingFullContext d=2 (e), input='34abc' - line 1:2 reportContextSensitivity d=2 (e), input='34' - line 1:14 reportAttemptingFullContext d=2 (e), input='34abc' - line 1:14 reportContextSensitivity d=2 (e), input='34abc' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} - : ('$' a | '@' b)+ ; - a : e ID ; - b : e INT ID ; - e : INT | ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\t'|'\n')+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class CtxSensitiveDFA extends BaseDiagnosticParserTestDescriptor { - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} - : '$' a | '@' b ; - a : e ID ; - b : e INT ID ; - e : INT | ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\t'|'\n')+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class CtxSensitiveDFA_1 extends CtxSensitiveDFA { - public String input = "$ 34 abc"; - /** - Decision 1: - s0-INT->s1 - s1-ID->:s2^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:5 reportAttemptingFullContext d=1 (e), input='34abc' - line 1:2 reportContextSensitivity d=1 (e), input='34' - */ - @CommentHasStringValue - public String errors; - - } - - public static class CtxSensitiveDFA_2 extends CtxSensitiveDFA { - public String input = "@ 34 abc"; - /** - Decision 1: - s0-INT->s1 - s1-ID->:s2^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:5 reportAttemptingFullContext d=1 (e), input='34abc' - line 1:5 reportContextSensitivity d=1 (e), input='34abc' - */ - @CommentHasStringValue - public String errors; - } - - public static abstract class ExprAmbiguity extends BaseDiagnosticParserTestDescriptor { - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init {} - : expr[0] {}; - expr[] - : ID - ( - {5 >= $_p}? '*' expr[6] - | {4 >= $_p}? '+' expr[5] - )* - ; - ID : [a-zA-Z]+ ; - WS : [ \r\n\t]+ -> skip ; - - */ - @CommentHasStringValue - public String grammar; - } - - public static class ExprAmbiguity_1 extends ExprAmbiguity { - public String input = "a+b"; - public String output = "(expr a + (expr b))\n"; - /** - line 1:1 reportAttemptingFullContext d=1 (expr), input='+' - line 1:2 reportContextSensitivity d=1 (expr), input='+b' - */ - @CommentHasStringValue - public String errors; - } - - public static class ExprAmbiguity_2 extends ExprAmbiguity { - public String input = "a+b*c"; - public String output = "(expr a + (expr b * (expr c)))\n"; - /** - line 1:1 reportAttemptingFullContext d=1 (expr), input='+' - line 1:2 reportContextSensitivity d=1 (expr), input='+b' - line 1:3 reportAttemptingFullContext d=1 (expr), input='*' - line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c' - */ - @CommentHasStringValue - public String errors; - } - - public static abstract class FullContextIF_THEN_ELSEParse extends BaseDiagnosticParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init {} - @after {} - : '{' stat* '}' ; - stat: 'if' ID 'then' stat ('else' ID)? - | 'return' - ; - ID : 'a'..'z'+ ; - WS : (' '|'\t'|'\n')+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class FullContextIF_THEN_ELSEParse_1 extends FullContextIF_THEN_ELSEParse { - public String input = "{ if x then return }"; - /** - Decision 1: - s0-'}'->:s1=>2 - */ - @CommentHasStringValue - public String output; - } - - public static class FullContextIF_THEN_ELSEParse_2 extends FullContextIF_THEN_ELSEParse { - public String input = "{ if x then return else foo }"; - /** - Decision 1: - s0-'else'->:s1^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:19 reportAttemptingFullContext d=1 (stat), input='else' - line 1:19 reportContextSensitivity d=1 (stat), input='else' - */ - @CommentHasStringValue - public String errors; - } - - public static class FullContextIF_THEN_ELSEParse_3 extends FullContextIF_THEN_ELSEParse { - public String input = "{ if x then if y then return else foo }"; - /** - Decision 1: - s0-'}'->:s2=>2 - s0-'else'->:s1^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:29 reportAttemptingFullContext d=1 (stat), input='else' - line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}' - */ - @CommentHasStringValue - public String errors; - } - - public static class FullContextIF_THEN_ELSEParse_4 extends FullContextIF_THEN_ELSEParse { - public String input = "{ if x then if y then return else foo else bar }"; - /** - Decision 1: - s0-'else'->:s1^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:29 reportAttemptingFullContext d=1 (stat), input='else' - line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse' - line 1:38 reportAttemptingFullContext d=1 (stat), input='else' - line 1:38 reportContextSensitivity d=1 (stat), input='else' - */ - @CommentHasStringValue - public String errors; - } - - public static class FullContextIF_THEN_ELSEParse_5 extends FullContextIF_THEN_ELSEParse { - /** - { if x then return else foo - if x then if y then return else foo } - */ - @CommentHasStringValue - public String input; - - /** - Decision 1: - s0-'}'->:s2=>2 - s0-'else'->:s1^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:19 reportAttemptingFullContext d=1 (stat), input='else' - line 1:19 reportContextSensitivity d=1 (stat), input='else' - line 2:27 reportAttemptingFullContext d=1 (stat), input='else' - line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}' - */ - @CommentHasStringValue - public String errors; - } - - public static class FullContextIF_THEN_ELSEParse_6 extends FullContextIF_THEN_ELSEParse { - /** - { if x then return else foo - if x then if y then return else foo } - */ - @CommentHasStringValue - public String input; - - /** - Decision 1: - s0-'}'->:s2=>2 - s0-'else'->:s1^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:19 reportAttemptingFullContext d=1 (stat), input='else' - line 1:19 reportContextSensitivity d=1 (stat), input='else' - line 2:27 reportAttemptingFullContext d=1 (stat), input='else' - line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}' - */ - @CommentHasStringValue - public String errors; - } - - /* - * Tests predictions for the following case involving closures. - * http://www.antlr.org/wiki/display/~admin/2011/12/29/Flaw+in+ANTLR+v3+LL(*)+analysis+algorithm - */ - public static class LoopsSimulateTailRecursion extends BaseDiagnosticParserTestDescriptor { - public String input = "a(i)<-x"; - public String output = "pass: a(i)<-x\n"; - /** - line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)' - line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "prog"; - public String grammarName = "T"; - - /** - grammar T; - prog - @init {} - : expr_or_assign*; - expr_or_assign - : expr '++' {} - | expr {} - ; - expr: expr_primary ('\<-' ID)?; - expr_primary - : '(' ID ')' - | ID '(' ID ')' - | ID - ; - ID : [a-z]+ ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SLLSeesEOFInLLGrammar extends BaseDiagnosticParserTestDescriptor { - public String input = "34 abc"; - /** - Decision 0: - s0-INT->s1 - s1-ID->:s2^=>1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:3 reportAttemptingFullContext d=0 (e), input='34abc' - line 1:0 reportContextSensitivity d=0 (e), input='34' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} - : a; - a : e ID ; - b : e INT ID ; - e : INT | ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\t'|'\n')+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LeftRecursionDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LeftRecursionDescriptors.java deleted file mode 100644 index 43f0f66ec9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LeftRecursionDescriptors.java +++ /dev/null @@ -1,1151 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class LeftRecursionDescriptors { - public static abstract class AmbigLR extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "prog"; - public String grammarName = "Expr"; - - /** - grammar Expr; - prog: stat ; - stat: expr NEWLINE # printExpr - | ID '=' expr NEWLINE # assign - | NEWLINE # blank - ; - expr: expr ('*'|'/') expr # MulDiv - | expr ('+'|'-') expr # AddSub - | INT # int - | ID # id - | '(' expr ')' # parens - ; - - MUL : '*' ; // assigns token name to '*' used above in grammar - DIV : '/' ; - ADD : '+' ; - SUB : '-' ; - ID : [a-zA-Z]+ ; // match identifiers - INT : [0-9]+ ; // match integers - NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) - WS : [ \t]+ -> skip ; // toss out whitespace - */ - @CommentHasStringValue - public String grammar; - } - - public static class AmbigLR_1 extends AmbigLR { - public String input = "1\n"; - public String output = null; - } - - public static class AmbigLR_2 extends AmbigLR { - public String input = "a = 5\n"; - public String output = null; - } - - public static class AmbigLR_3 extends AmbigLR { - public String input = "b = 6\n"; - public String output = null; - } - - public static class AmbigLR_4 extends AmbigLR { - public String input = "a+b*2\n"; - public String output = null; - } - - public static class AmbigLR_5 extends AmbigLR { - public String input = "(1+2)*3\n"; - public String output = null; - } - - public static abstract class Declarations extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : declarator EOF ; // must indicate EOF can follow - declarator - : declarator '[' e ']' - | declarator '[' ']' - | declarator '(' ')' - | '*' declarator // binds less tight than suffixes - | '(' declarator ')' - | ID - ; - e : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Declarations_1 extends Declarations { - public String input = "a"; - public String output = "(s (declarator a) )\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : declarator EOF ; // must indicate EOF can follow - declarator - : declarator '[' e ']' - | declarator '[' ']' - | declarator '(' ')' - | '*' declarator // binds less tight than suffixes - | '(' declarator ')' - | ID - ; - e : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Declarations_2 extends Declarations { - public String input = "*a"; - public String output = "(s (declarator * (declarator a)) )\n"; - } - - public static class Declarations_3 extends Declarations { - public String input = "**a"; - public String output = "(s (declarator * (declarator * (declarator a))) )\n"; - } - - public static class Declarations_4 extends Declarations { - public String input = "a[3]"; - public String output = "(s (declarator (declarator a) [ (e 3) ]) )\n"; - } - - public static class Declarations_5 extends Declarations { - public String input = "b[]"; - public String output = "(s (declarator (declarator b) [ ]) )\n"; - } - - public static class Declarations_6 extends Declarations { - public String input = "(a)"; - public String output = "(s (declarator ( (declarator a) )) )\n"; - } - - public static class Declarations_7 extends Declarations { - public String input = "a[]()"; - public String output = "(s (declarator (declarator (declarator a) [ ]) ( )) )\n"; - } - - public static class Declarations_8 extends Declarations { - public String input = "a[][]"; - public String output = "(s (declarator (declarator (declarator a) [ ]) [ ]) )\n"; - } - - public static class Declarations_9 extends Declarations { - public String input = "*a[]"; - public String output = "(s (declarator * (declarator (declarator a) [ ])) )\n"; - } - - public static class Declarations_10 extends Declarations { - public String input = "(*a)[]"; - public String output = "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )\n"; - } - - /* - * This is a regression test for "Support direct calls to left-recursive - * rules". - * https://github.com/antlr/antlr4/issues/161 - */ - public static abstract class DirectCallToLeftRecursiveRule extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a @after {} : a ID - | ID - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class DirectCallToLeftRecursiveRule_1 extends DirectCallToLeftRecursiveRule { - public String input = "x"; - public String output = "(a x)\n"; - } - - public static class DirectCallToLeftRecursiveRule_2 extends DirectCallToLeftRecursiveRule { - public String input = "x y"; - public String output = "(a (a x) y)\n"; - } - - public static class DirectCallToLeftRecursiveRule_3 extends DirectCallToLeftRecursiveRule { - public String input = "x y z"; - public String output = "(a (a (a x) y) z)\n"; - } - - public static abstract class Expressions extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e EOF ; // must indicate EOF can follow - e : e '.' ID - | e '.' 'this' - | '-' e - | e '*' e - | e ('+'|'-') e - | INT - | ID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Expressions_1 extends Expressions { - public String input = "a"; - public String output = "(s (e a) )\n"; - } - - public static class Expressions_2 extends Expressions { - public String input = "1"; - public String output = "(s (e 1) )\n"; - } - - public static class Expressions_3 extends Expressions { - public String input = "a-1"; - public String output = "(s (e (e a) - (e 1)) )\n"; - } - - public static class Expressions_4 extends Expressions { - public String input = "a.b"; - public String output = "(s (e (e a) . b) )\n"; - } - - public static class Expressions_5 extends Expressions { - public String input = "a.this"; - public String output = "(s (e (e a) . this) )\n"; - } - - public static class Expressions_6 extends Expressions { - public String input = "-a"; - public String output = "(s (e - (e a)) )\n"; - } - - public static class Expressions_7 extends Expressions { - public String input = "-a+b"; - public String output = "(s (e (e - (e a)) + (e b)) )\n"; - } - - public static abstract class JavaExpressions extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e EOF ; // must indicate EOF can follow - expressionList - : e (',' e)* - ; - e : '(' e ')' - | 'this' - | 'super' - | INT - | ID - | typespec '.' 'class' - | e '.' ID - | e '.' 'this' - | e '.' 'super' '(' expressionList? ')' - | e '.' 'new' ID '(' expressionList? ')' - | 'new' typespec ( '(' expressionList? ')' | ('[' e ']')+) - | e '[' e ']' - | '(' typespec ')' e - | e ('++' | '--') - | e '(' expressionList? ')' - | ('+'|'-'|'++'|'--') e - | ('~'|'!') e - | e ('*'|'/'|'%') e - | e ('+'|'-') e - | e ('\<\<' | '>>>' | '>>') e - | e ('\<=' | '>=' | '>' | '\<') e - | e 'instanceof' e - | e ('==' | '!=') e - | e '&' e - |\ e '^' e - | e '|' e - | e '&&' e - | e '||' e - | e '?' e ':' e - |\ - e ('=' - |'+=' - |'-=' - |'*=' - |'/=' - |'&=' - |'|=' - |'^=' - |'>>=' - |'>>>=' - |'\<\<=' - |'%=') e - ; - typespec - : ID - | ID '[' ']' - | 'int' - | 'int' '[' ']' - ; - ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class JavaExpressions_1 extends JavaExpressions { - public String input = "a|b&c"; - public String output = "(s (e (e a) | (e (e b) & (e c))) )\n"; - } - - public static class JavaExpressions_2 extends JavaExpressions { - public String input = "(a|b)&c"; - public String output = "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )\n"; - } - - public static class JavaExpressions_3 extends JavaExpressions { - public String input = "a > b"; - public String output = "(s (e (e a) > (e b)) )\n"; - } - - public static class JavaExpressions_4 extends JavaExpressions { - public String input = "a >> b"; - public String output = "(s (e (e a) >> (e b)) )\n"; - } - - public static class JavaExpressions_5 extends JavaExpressions { - public String input = "a=b=c"; - public String output = "(s (e (e a) = (e (e b) = (e c))) )\n"; - } - - public static class JavaExpressions_6 extends JavaExpressions { - public String input = "a^b^c"; - public String output = "(s (e (e a) ^ (e (e b) ^ (e c))) )\n"; - } - - public static class JavaExpressions_7 extends JavaExpressions { - public String input = "(T)x"; - public String output = "(s (e ( (typespec T) ) (e x)) )\n"; - } - - public static class JavaExpressions_8 extends JavaExpressions { - public String input = "new A().b"; - public String output = "(s (e (e new (typespec A) ( )) . b) )\n"; - } - - public static class JavaExpressions_9 extends JavaExpressions { - public String input = "(T)t.f()"; - public String output = "(s (e (e ( (typespec T) ) (e (e t) . f)) ( )) )\n"; - } - - public static class JavaExpressions_10 extends JavaExpressions { - public String input = "a.f(x)==T.c"; - public String output = "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )\n"; - } - - public static class JavaExpressions_11 extends JavaExpressions { - public String input = "a.f().g(x,1)"; - public String output = "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )\n"; - } - - public static class JavaExpressions_12 extends JavaExpressions { - public String input = "new T[((n-1) * x) + 1]"; - public String output = "(s (e new (typespec T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n"; - } - - public static abstract class LabelsOnOpSubrule extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e; - e : a=e op=('*'|'/') b=e {} - | INT {} - | '(' x=e ')' {} - ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LabelsOnOpSubrule_1 extends LabelsOnOpSubrule { - public String input = "4"; - public String output = "(s (e 4))\n"; - } - - public static class LabelsOnOpSubrule_2 extends LabelsOnOpSubrule { - public String input = "1*2/3"; - public String output = "(s (e (e (e 1) * (e 2)) / (e 3)))\n"; - } - - public static class LabelsOnOpSubrule_3 extends LabelsOnOpSubrule { - public String input = "(1/2)*3"; - public String output = "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n"; - } - - /* - * This is a regression test for antlr/antlr4#625 "Duplicate action breaks - * operator precedence" - * https://github.com/antlr/antlr4/issues/625 - */ - public static abstract class MultipleActionsPredicatesOptions extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e ; - e : a=e op=('*'|'/') b=e {}{}? - | a=e op=('+'|'-') b=e {}\{}?\ - | INT {}{} - | '(' x=e ')' {}{} - ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultipleActionsPredicatesOptions_1 extends MultipleActionsPredicatesOptions { - public String input = "4"; - public String output = "(s (e 4))\n"; - } - - public static class MultipleActionsPredicatesOptions_2 extends MultipleActionsPredicatesOptions { - public String input = "1*2/3"; - public String output = "(s (e (e (e 1) * (e 2)) / (e 3)))\n"; - } - - public static class MultipleActionsPredicatesOptions_3 extends MultipleActionsPredicatesOptions { - public String input = "(1/2)*3"; - public String output = "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n"; - } - - /* - * This is a regression test for antlr/antlr4#625 "Duplicate action breaks - * operator precedence" - * https://github.com/antlr/antlr4/issues/625 - */ - public static abstract class MultipleActions extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e ; - e : a=e op=('*'|'/') b=e {}{} - | INT {}{} - | '(' x=e ')' {}{} - ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultipleActions_1 extends MultipleActions { - public String input = "4"; - public String output = "(s (e 4))\n"; - public String errors = null; - } - - public static class MultipleActions_2 extends MultipleActions { - public String input = "1*2/3"; - public String output = "(s (e (e (e 1) * (e 2)) / (e 3)))\n"; - } - - public static class MultipleActions_3 extends MultipleActions { - public String input = "(1/2)*3"; - public String output = "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n"; - } - - /* - * This is a regression test for antlr/antlr4#433 "Not all context accessor - * methods are generated when an alternative rule label is used for multiple - * alternatives". - * https://github.com/antlr/antlr4/issues/433 - */ - public static abstract class MultipleAlternativesWithCommonLabel extends BaseParserTestDescriptor { - public String startRule = "s"; - public String grammarName = "T"; - public String errors = null; - - /** - grammar T; - s : e {}; - e returns [] - : e '*' e {$v = (0)}, {})> * (1)}, {})>;} # binary - | e '+' e {$v = (0)}, {})> + (1)}, {})>;} # binary - | INT {$v = $INT.int;} # anInt - | '(' e ')' {$v = $e.v;} # parens - | left=e INC {$v = $left.v + 1;} # unary - | left=e DEC {$v = $left.v - 1;} # unary - | ID {} # anID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - INC : '++' ; - DEC : '--' ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultipleAlternativesWithCommonLabel_1 extends MultipleAlternativesWithCommonLabel { - public String input = "4"; - public String output = "4\n"; - } - - public static class MultipleAlternativesWithCommonLabel_2 extends MultipleAlternativesWithCommonLabel { - public String input = "1+2"; - public String output = "3\n"; - } - - public static class MultipleAlternativesWithCommonLabel_3 extends MultipleAlternativesWithCommonLabel { - public String input = "1+2*3"; - public String output = "7\n"; - } - - public static class MultipleAlternativesWithCommonLabel_4 extends MultipleAlternativesWithCommonLabel { - public String input = "i++*3"; - public String output = "12\n"; - } - - /** Test for https://github.com/antlr/antlr4/issues/1295 in addition to #433. */ - public static class MultipleAlternativesWithCommonLabel_5 extends MultipleAlternativesWithCommonLabel { - public String input = "(99)+3"; - public String output = "102\n"; - } - - /** - * This is a regression test for antlr/antlr4#509 "Incorrect rule chosen in - * unambiguous grammar". - * https://github.com/antlr/antlr4/issues/509 - */ - public static class PrecedenceFilterConsidersContext extends BaseParserTestDescriptor { - public String input = "aa"; - public String output = "(prog (statement (letterA a)) (statement (letterA a)) )\n"; - public String errors = null; - public String startRule = "prog"; - public String grammarName = "T"; - - /** - grammar T; - prog - @after {} - : statement* EOF {}; - statement: letterA | statement letterA 'b' ; - letterA: 'a'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class PrefixAndOtherAlt extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : expr EOF ; - expr : literal - | op expr - | expr op expr - ; - literal : '-'? Integer ; - op : '+' | '-' ; - Integer : [0-9]+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PrefixAndOtherAlt_1 extends PrefixAndOtherAlt { - public String input = "-1"; - public String output = "(s (expr (literal - 1)) )\n"; - } - - public static class PrefixAndOtherAlt_2 extends PrefixAndOtherAlt { - public String input = "-1 + -1"; - public String output = "(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) )\n"; - } - - public static abstract class PrefixOpWithActionAndLabel extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : e {} ; - e returns [ result] - : ID '=' e1=e {$result = ;} - | ID {$result = $ID.text;} - | e1=e '+' e2=e {$result = ;} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PrefixOpWithActionAndLabel_1 extends PrefixOpWithActionAndLabel { - public String input = "a"; - public String output = "a\n"; - } - - public static class PrefixOpWithActionAndLabel_2 extends PrefixOpWithActionAndLabel { - public String input = "a+b"; - public String output = "(a+b)\n"; - } - - public static class PrefixOpWithActionAndLabel_3 extends PrefixOpWithActionAndLabel { - public String input = "a=b+c"; - public String output = "((a=b)+c)\n"; - } - - public static abstract class ReturnValueAndActionsAndLabels extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : q=e {}; - e returns [] - : a=e op='*' b=e {$v = $a.v * $b.v;} # mult - | a=e '+' b=e {$v = $a.v + $b.v;} # add - | INT {$v = $INT.int;} # anInt - | '(' x=e ')' {$v = 0 + $x.v;} # parens - | x=e '++' {$v = $x.v+1;} # inc - | e '--' # dec - | ID {$v = 3;} # anID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ReturnValueAndActionsAndLabels_1 extends ReturnValueAndActionsAndLabels { - public String input = "4"; - public String output = "4\n"; - } - - public static class ReturnValueAndActionsAndLabels_2 extends ReturnValueAndActionsAndLabels { - public String input = "1+2"; - public String output = "3\n"; - } - - public static class ReturnValueAndActionsAndLabels_3 extends ReturnValueAndActionsAndLabels { - public String input = "1+2*3"; - public String output = "7\n"; - } - - public static class ReturnValueAndActionsAndLabels_4 extends ReturnValueAndActionsAndLabels { - public String input = "i++*3"; - public String output = "12\n"; - } - - /* - * This is a regression test for antlr/antlr4#677 "labels not working in grammar - * file". - * https://github.com/antlr/antlr4/issues/677 - * - * This test treats `,` and `>>` as part of a single compound operator (similar - * to a ternary operator). - */ - public static abstract class ReturnValueAndActionsList extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : expr EOF; - expr: - a=expr '*' a=expr #Factor - | b+=expr (',' b+=expr)* '>>' c=expr #Send - | ID #JustId //semantic check on modifiers - ; - - ID : ('a'..'z'|'A'..'Z'|'_') - ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ReturnValueAndActionsList1_1 extends ReturnValueAndActionsList { - public String input = "a*b"; - public String output = "(s (expr (expr a) * (expr b)) )\n"; - } - - public static class ReturnValueAndActionsList1_2 extends ReturnValueAndActionsList { - public String input = "a,c>>x"; - public String output = "(s (expr (expr a) , (expr c) >> (expr x)) )\n"; - } - - public static class ReturnValueAndActionsList1_3 extends ReturnValueAndActionsList { - public String input = "x"; - public String output = "(s (expr x) )\n"; - } - - public static class ReturnValueAndActionsList1_4 extends ReturnValueAndActionsList { - public String input = "a*b,c,x*y>>r"; - public String output = "(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) )\n"; - } - - /* - * This is a regression test for antlr/antlr4#677 "labels not working in grammar - * file". - * https://github.com/antlr/antlr4/issues/677 - * - * This test treats the `,` and `>>` operators separately. - */ - public static abstract class ReturnValueAndActionsList2 extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : expr EOF; - expr: - a=expr '*' a=expr #Factor - | b+=expr ',' b+=expr #Comma - | b+=expr '>>' c=expr #Send - | ID #JustId //semantic check on modifiers - ; - ID : ('a'..'z'|'A'..'Z'|'_') - ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ReturnValueAndActionsList2_1 extends ReturnValueAndActionsList2 { - public String input = "a*b"; - public String output = "(s (expr (expr a) * (expr b)) )\n"; - } - - public static class ReturnValueAndActionsList2_2 extends ReturnValueAndActionsList2 { - public String input = "a,c>>x"; - public String output = "(s (expr (expr (expr a) , (expr c)) >> (expr x)) )\n"; - } - - public static class ReturnValueAndActionsList2_3 extends ReturnValueAndActionsList2 { - public String input = "x"; - public String output = "(s (expr x) )\n"; - } - - public static class ReturnValueAndActionsList2_4 extends ReturnValueAndActionsList2 { - public String input = "a*b,c,x*y>>r"; - public String output = "(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) )\n"; - } - - public static abstract class ReturnValueAndActions extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : e {}; - e returns [, ignored] - : a=e '*' b=e {$v = $a.v * $b.v;} - | a=e '+' b=e {$v = $a.v + $b.v;} - | INT {$v = $INT.int;} - | '(' x=e ')' {$v = 0 + $x.v;} - ; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ReturnValueAndActions_1 extends ReturnValueAndActions { - public String input = "4"; - public String output = "4\n"; - } - - public static class ReturnValueAndActions_2 extends ReturnValueAndActions { - public String input = "1+2"; - public String output = "3\n"; - } - - public static class ReturnValueAndActions_3 extends ReturnValueAndActions { - public String input = "1+2*3"; - public String output = "7\n"; - } - - public static class ReturnValueAndActions_4 extends ReturnValueAndActions { - public String input = "(1+2)*3"; - public String output = "9\n"; - } - - public static class SemPred extends BaseParserTestDescriptor { - public String input = "x y z"; - public String output = "(s (a (a (a x) y) z))\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : a ; - a : a {}? ID - | ID - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SemPredFailOption extends BaseParserTestDescriptor { - public String input = "x y z"; - public String output = "(s (a (a x) y z))\n"; - public String errors = "line 1:4 rule a custom message\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : a ; - a : a ID {}?\ - | ID - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class Simple extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : a ; - a : a ID - | ID - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Simple_1 extends Simple { - public String input = "x"; - public String output = "(s (a x))\n"; - } - - public static class Simple_2 extends Simple { - public String input = "x y"; - public String output = "(s (a (a x) y))\n"; - } - - public static class Simple_3 extends Simple { - public String input = "x y z"; - public String output = "(s (a (a (a x) y) z))\n"; - } - - /** - * This is a regression test for antlr/antlr4#542 "First alternative cannot - * be right-associative". - * https://github.com/antlr/antlr4/issues/542 - */ - public static abstract class TernaryExprExplicitAssociativity extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match - e :\ e '*' e - |\ e '+' e - |\ e '?' e ':' e - |\ e '=' e - | ID - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TernaryExprExplicitAssociativity_1 extends TernaryExprExplicitAssociativity { - public String input = "a"; - public String output = "(s (e a) )\n"; - } - - public static class TernaryExprExplicitAssociativity_2 extends TernaryExprExplicitAssociativity { - public String input = "a+b"; - public String output = "(s (e (e a) + (e b)) )\n"; - } - - public static class TernaryExprExplicitAssociativity_3 extends TernaryExprExplicitAssociativity { - public String input = "a*b"; - public String output = "(s (e (e a) * (e b)) )\n"; - } - - public static class TernaryExprExplicitAssociativity_4 extends TernaryExprExplicitAssociativity { - public String input = "a?b:c"; - public String output = "(s (e (e a) ? (e b) : (e c)) )\n"; - } - - public static class TernaryExprExplicitAssociativity_5 extends TernaryExprExplicitAssociativity { - public String input = "a=b=c"; - public String output = "(s (e (e a) = (e (e b) = (e c))) )\n"; - } - - public static class TernaryExprExplicitAssociativity_6 extends TernaryExprExplicitAssociativity { - public String input = "a?b+c:d"; - public String output = "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )\n"; - } - - public static class TernaryExprExplicitAssociativity_7 extends TernaryExprExplicitAssociativity { - public String input = "a?b=c:d"; - public String output = "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )\n"; - } - - public static class TernaryExprExplicitAssociativity_8 extends TernaryExprExplicitAssociativity { - public String input = "a? b?c:d : e"; - public String output = "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )\n"; - } - - public static class TernaryExprExplicitAssociativity_9 extends TernaryExprExplicitAssociativity { - public String input = "a?b: c?d:e"; - public String output = "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )\n"; - } - - public static abstract class TernaryExpr extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match - e : e '*' e - | e '+' e - |\ e '?' e ':' e - |\ e '=' e - | ID - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TernaryExpr_1 extends TernaryExpr { - public String input = "a"; - public String output = "(s (e a) )\n"; - } - - public static class TernaryExpr_2 extends TernaryExpr { - public String input = "a+b"; - public String output = "(s (e (e a) + (e b)) )\n"; - } - - public static class TernaryExpr_3 extends TernaryExpr { - public String input = "a*b"; - public String output = "(s (e (e a) * (e b)) )\n"; - } - - public static class TernaryExpr_4 extends TernaryExpr { - public String input = "a?b:c"; - public String output = "(s (e (e a) ? (e b) : (e c)) )\n"; - } - - public static class TernaryExpr_5 extends TernaryExpr { - public String input = "a=b=c"; - public String output = "(s (e (e a) = (e (e b) = (e c))) )\n"; - } - - public static class TernaryExpr_6 extends TernaryExpr { - public String input = "a?b+c:d"; - public String output = "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )\n"; - } - - public static class TernaryExpr_7 extends TernaryExpr { - public String input = "a?b=c:d"; - public String output = "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )\n"; - } - - public static class TernaryExpr_8 extends TernaryExpr { - public String input = "a? b?c:d : e"; - public String output = "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )\n"; - } - - public static class TernaryExpr_9 extends TernaryExpr { - public String input = "a?b: c?d:e"; - public String output = "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )\n"; - } - - /* - * This is a regression test for #239 "recoursive parser using implicit tokens - * ignore white space lexer rule". - * https://github.com/antlr/antlr4/issues/239 - */ - public static abstract class WhitespaceInfluence extends BaseParserTestDescriptor { - public String input = "Test(1,3)"; - public String output = null; - public String errors = null; - public String startRule = "prog"; - public String grammarName = "Expr"; - - /** - grammar Expr; - prog : expression EOF; - expression - : ID '(' expression (',' expression)* ')' # doFunction - | '(' expression ')' # doParenthesis - | '!' expression # doNot - | '-' expression # doNegate - | '+' expression # doPositiv - | expression '^' expression # doPower - | expression '*' expression # doMultipy - | expression '/' expression # doDivide - | expression '%' expression # doModulo - | expression '-' expression # doMinus - | expression '+' expression # doPlus - | expression '=' expression # doEqual - | expression '!=' expression # doNotEqual - | expression '>' expression # doGreather - | expression '>=' expression # doGreatherEqual - | expression '\<' expression # doLesser - | expression '\<=' expression # doLesserEqual - | expression K_IN '(' expression (',' expression)* ')' # doIn - | expression ( '&' | K_AND) expression # doAnd - | expression ( '|' | K_OR) expression # doOr - | '[' expression (',' expression)* ']' # newArray - | K_TRUE # newTrueBoolean - | K_FALSE # newFalseBoolean - | NUMBER # newNumber - | DATE # newDateTime - | ID # newIdentifier - | SQ_STRING # newString - | K_NULL # newNull - ; - - // Fragments - fragment DIGIT : '0' .. '9'; - fragment UPPER : 'A' .. 'Z'; - fragment LOWER : 'a' .. 'z'; - fragment LETTER : LOWER | UPPER; - fragment WORD : LETTER | '_' | '$' | '#' | '.'; - fragment ALPHANUM : WORD | DIGIT; - - // Tokens - ID : LETTER ALPHANUM*; - NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?; - DATE : '\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\''; - SQ_STRING : '\'' ('\'\'' | ~'\'')* '\''; - DQ_STRING : '"' ('\\\\"' | ~'"')* '"'; - WS : [ \t\n\r]+ -> skip ; - COMMENTS : ('/*' .*? '*' '/' | '//' ~'\n'* '\n' ) -> skip; - */ - @CommentHasStringValue - public String grammar; - } - - public static class WhitespaceInfluence_1 extends WhitespaceInfluence { - public String input = "Test(1,3)"; - public String output = null; - } - - public static class WhitespaceInfluence_2 extends WhitespaceInfluence { - public String input = "Test(1, 3)"; - public String output = null; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerErrorsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerErrorsDescriptors.java deleted file mode 100644 index a7bcc3d7d7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerErrorsDescriptors.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseLexerTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class LexerErrorsDescriptors { - public static class DFAToATNThatFailsBackToDFA extends BaseLexerTestDescriptor { - public String input = "ababx"; - /** - [@0,0:1='ab',<1>,1:0] - [@1,2:3='ab',<1>,1:2] - [@2,5:4='',<-1>,1:5] - */ - @CommentHasStringValue - public String output; - - public String errors = "line 1:4 token recognition error at: 'x'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'ab' ; - B : 'abc' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class DFAToATNThatMatchesThenFailsInATN extends BaseLexerTestDescriptor { - public String input = "ababcx"; - /** - [@0,0:1='ab',<1>,1:0] - [@1,2:4='abc',<2>,1:2] - [@2,6:5='',<-1>,1:6] - */ - @CommentHasStringValue - public String output; - - public String errors = "line 1:5 token recognition error at: 'x'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'ab' ; - B : 'abc' ; - C : 'abcd' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class EnforcedGreedyNestedBraces extends BaseLexerTestDescriptor { - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ACTION : '{' (ACTION | ~[{}])* '}'; - WS : [ \r\n\t]+ -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class EnforcedGreedyNestedBraces_1 extends EnforcedGreedyNestedBraces { - public String input = "{ { } }"; - /** - [@0,0:6='{ { } }',<1>,1:0] - [@1,7:6='',<-1>,1:7] - */ - @CommentHasStringValue - public String output; - } - - public static class EnforcedGreedyNestedBraces_2 extends EnforcedGreedyNestedBraces { - public String input = "{ { }"; - public String output = "[@0,5:4='',<-1>,1:5]\n"; - public String errors = "line 1:0 token recognition error at: '{ { }'\n"; - } - - public static class ErrorInMiddle extends BaseLexerTestDescriptor { - public String input = "abx"; - public String output = "[@0,3:2='',<-1>,1:3]\n"; - public String errors = "line 1:0 token recognition error at: 'abx'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'abc' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class InvalidCharAtStart extends BaseLexerTestDescriptor { - public String input = "x"; - public String output = "[@0,1:0='',<-1>,1:1]\n"; - public String errors = "line 1:0 token recognition error at: 'x'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'a' 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class InvalidCharAtStartAfterDFACache extends BaseLexerTestDescriptor { - public String input = "abx"; - /** - [@0,0:1='ab',<1>,1:0] - [@1,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = "line 1:2 token recognition error at: 'x'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'a' 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class InvalidCharInToken extends BaseLexerTestDescriptor { - public String input = "ax"; - public String output = "[@0,2:1='',<-1>,1:2]\n"; - public String errors = "line 1:0 token recognition error at: 'ax'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'a' 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class InvalidCharInTokenAfterDFACache extends BaseLexerTestDescriptor { - public String input = "abax"; - /** - [@0,0:1='ab',<1>,1:0] - [@1,4:3='',<-1>,1:4] - */ - @CommentHasStringValue - public String output; - - public String errors = "line 1:2 token recognition error at: 'ax'\n"; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'a' 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for #45 "NullPointerException in LexerATNSimulator.execDFA". - * https://github.com/antlr/antlr4/issues/46 - */ - public static class LexerExecDFA extends BaseLexerTestDescriptor { - public String input = "x : x"; - /** - [@0,0:0='x',<3>,1:0] - [@1,2:2=':',<1>,1:2] - [@2,4:4='x',<3>,1:4] - [@3,5:4='',<-1>,1:5] - */ - @CommentHasStringValue - public String output; - - /** - line 1:1 token recognition error at: ' ' - line 1:3 token recognition error at: ' ' - */ - @CommentHasStringValue - public String errors; - - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - COLON : ':' ; - PTR : '->' ; - ID : [a-z]+; - */ - @CommentHasStringValue - public String grammar; - } - - public static abstract class StringsEmbeddedInActions extends BaseLexerTestDescriptor { - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - // ST interprets \\ as \ so we need \\\\ to get \\ - /** - lexer grammar L; - ACTION2 : '[' (STRING | ~'"')*? ']'; - STRING : '"' ('\\\\' '"' | .)*? '"'; - WS : [ \t\r\n]+ -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class StringsEmbeddedInActions_1 extends StringsEmbeddedInActions { - public String input = "[\"foo\"]"; - /** - [@0,0:6='["foo"]',<1>,1:0] - [@1,7:6='',<-1>,1:7] - */ - @CommentHasStringValue - public String output; - } - - public static class StringsEmbeddedInActions_2 extends StringsEmbeddedInActions { - public String input = "[\"foo]"; - public String output = "[@0,6:5='',<-1>,1:6]\n"; - public String errors = "line 1:0 token recognition error at: '[\"foo]'\n"; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java deleted file mode 100644 index 7c74e74cb0..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java +++ /dev/null @@ -1,1060 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.test.runtime.BaseLexerTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Paths; - -public class LexerExecDescriptors { - public static class ActionPlacement extends BaseLexerTestDescriptor { - public String input = "ab"; - /** - stuff0: - stuff1: a - stuff2: ab - ab - [@0,0:1='ab',<1>,1:0] - [@1,2:1='',<-1>,1:2] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : ({} 'a' - | {} - 'a' {} - 'b' {}) - {} ; - WS : (' '|'\n') -> skip ; - J : .; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class CharSet extends BaseLexerTestDescriptor { - public String input = "34\n 34"; - /** - I - I - [@0,0:1='34',<1>,1:0] - [@1,4:5='34',<1>,2:1] - [@2,6:5='',<-1>,2:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : '0'..'9'+ {} ; - WS : [ \n\\u000D] -> skip ; - */ // needs escape on u000D otherwise java converts even in comment - @CommentHasStringValue - public String grammar; - - } - - /* regression test for antlr/antlr4#1925 */ - public static class UnicodeCharSet extends BaseLexerTestDescriptor { - public String input = "均"; - /** - [@0,0:0='均',<1>,1:0] - [@1,1:0='',<-1>,1:1] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ID : ([A-Z_]|'\u0100'..'\uFFFE') ([A-Z_0-9]|'\u0100'..'\uFFFE')*; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetInSet extends BaseLexerTestDescriptor { - public String input = "a x"; - /** - I - I - [@0,0:0='a',<1>,1:0] - [@1,2:2='x',<1>,1:2] - [@2,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : (~[ab \\n]|'a') {} ; - WS : [ \n\\u000D]+ -> skip ; - */ // needs escape on u000D otherwise java converts even in comment - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetNot extends BaseLexerTestDescriptor { - public String input = "xaf"; - /** - I - [@0,0:2='xaf',<1>,1:0] - [@1,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : ~[ab \n] ~[ \ncd]* {} ; - WS : [ \n\\u000D]+ -> skip ; - */ // needs escape on u000D otherwise java converts even in comment - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetPlus extends BaseLexerTestDescriptor { - public String input = "34\n 34"; - /** - I - I - [@0,0:1='34',<1>,1:0] - [@1,4:5='34',<1>,2:1] - [@2,6:5='',<-1>,2:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : '0'..'9'+ {} ; - WS : [ \n\\u000D]+ -> skip ; - */ // needs escape on u000D otherwise java converts even in comment - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetRange extends BaseLexerTestDescriptor { - public String input = "34\n 34 a2 abc \n "; - /** - I - I - ID - ID - [@0,0:1='34',<1>,1:0] - [@1,4:5='34',<1>,2:1] - [@2,7:8='a2',<2>,2:4] - [@3,10:12='abc',<2>,2:7] - [@4,18:17='',<-1>,3:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : [0-9]+ {} ; - ID : [a-zA-Z] [a-zA-Z0-9]* {} ; - WS : [ \n\\u0009\r]+ -> skip ; - */ // needs escape on u0009 otherwise java converts even in comment - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetWithEscapedChar extends BaseLexerTestDescriptor { - public String input = "- ] "; - /** - DASHBRACK - DASHBRACK - [@0,0:0='-',<1>,1:0] - [@1,2:2=']',<1>,1:2] - [@2,4:3='',<-1>,1:4] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - DASHBRACK : [\\-\]]+ {} ; - WS : [ \n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetWithMissingEscapeChar extends BaseLexerTestDescriptor { - public String input = "34 "; - /** - I - [@0,0:1='34',<1>,1:0] - [@1,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : [0-9]+ {} ; - WS : [ \n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetWithQuote1 extends BaseLexerTestDescriptor { - public String input = "b\"a"; - /** - A - [@0,0:2='b"a',<1>,1:0] - [@1,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : ["a-z]+ {} ; - WS : [ \n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class CharSetWithQuote2 extends BaseLexerTestDescriptor { - public String input = "b\"\\a"; - /** - A - [@0,0:3='b"\a',<1>,1:0] - [@1,4:3='',<-1>,1:4] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : ["\\\\ab]+ {} ; - WS : [ \n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class EOFByItself extends BaseLexerTestDescriptor { - public String input = ""; - /** - [@0,0:-1='',<1>,1:0] - [@1,0:-1='',<-1>,1:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - DONE : EOF ; - A : 'a'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class EOFSuffixInFirstRule extends BaseLexerTestDescriptor { - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : 'a' EOF ; - B : 'a'; - C : 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class EOFSuffixInFirstRule_1 extends EOFSuffixInFirstRule { - public String input = ""; - public String output = "[@0,0:-1='',<-1>,1:0]\n"; - } - - public static class EOFSuffixInFirstRule_2 extends EOFSuffixInFirstRule { - public String input = "a"; - /** - [@0,0:0='a',<1>,1:0] - [@1,1:0='',<-1>,1:1] - */ - @CommentHasStringValue - public String output; - } - - public static class GreedyClosure extends BaseLexerTestDescriptor { - /** - //blah - //blah - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:13='//blah\n//blah\n',<1>,1:0] - [@1,14:13='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : '//' .*? '\n' CMT*; - WS : (' '|'\t')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class GreedyConfigs extends BaseLexerTestDescriptor { - public String input = "ab"; - /** - ab - [@0,0:1='ab',<1>,1:0] - [@1,2:1='',<-1>,1:2] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : ('a' | 'ab') {} ; - WS : (' '|'\n') -> skip ; - J : .; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class GreedyOptional extends BaseLexerTestDescriptor { - /** - //blah - //blah - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:13='//blah\n//blah\n',<1>,1:0] - [@1,14:13='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : '//' .*? '\n' CMT?; - WS : (' '|'\t')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class GreedyPositiveClosure extends BaseLexerTestDescriptor { - /** - //blah - //blah - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:13='//blah\n//blah\n',<1>,1:0] - [@1,14:13='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : ('//' .*? '\n')+; - WS : (' '|'\t')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class HexVsID extends BaseLexerTestDescriptor { - public String input = "x 0 1 a.b a.l"; - /** - [@0,0:0='x',<5>,1:0] - [@1,1:1=' ',<6>,1:1] - [@2,2:2='0',<2>,1:2] - [@3,3:3=' ',<6>,1:3] - [@4,4:4='1',<2>,1:4] - [@5,5:5=' ',<6>,1:5] - [@6,6:6='a',<5>,1:6] - [@7,7:7='.',<4>,1:7] - [@8,8:8='b',<5>,1:8] - [@9,9:9=' ',<6>,1:9] - [@10,10:10='a',<5>,1:10] - [@11,11:11='.',<4>,1:11] - [@12,12:12='l',<5>,1:12] - [@13,13:12='',<-1>,1:13] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - HexLiteral : '0' ('x'|'X') HexDigit+ ; - DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ; - FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ; - DOT : '.' ; - ID : 'a'..'z'+ ; - fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ; - WS : (' '|'\n')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class KeywordID extends BaseLexerTestDescriptor { - public String input = "end eend ending a"; - /** - [@0,0:2='end',<1>,1:0] - [@1,3:3=' ',<3>,1:3] - [@2,4:7='eend',<2>,1:4] - [@3,8:8=' ',<3>,1:8] - [@4,9:14='ending',<2>,1:9] - [@5,15:15=' ',<3>,1:15] - [@6,16:16='a',<2>,1:16] - [@7,17:16='',<-1>,1:17] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - KEND : 'end' ; // has priority - ID : 'a'..'z'+ ; - WS : (' '|'\n')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NonGreedyClosure extends BaseLexerTestDescriptor { - /** - //blah - //blah - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:6='//blah\n',<1>,1:0] - [@1,7:13='//blah\n',<1>,2:0] - [@2,14:13='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : '//' .*? '\n' CMT*?; - WS : (' '|'\t')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NonGreedyConfigs extends BaseLexerTestDescriptor { - public String input = "ab"; - /** - a - b - [@0,0:0='a',<1>,1:0] - [@1,1:1='b',<3>,1:1] - [@2,2:1='',<-1>,1:2] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - I : .*? ('a' | 'ab') {} ; - WS : (' '|'\n') -> skip ; - J : . {}; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NonGreedyOptional extends BaseLexerTestDescriptor { - /** - //blah - //blah - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:6='//blah\n',<1>,1:0] - [@1,7:13='//blah\n',<1>,2:0] - [@2,14:13='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : '//' .*? '\n' CMT??; - WS : (' '|'\t')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NonGreedyPositiveClosure extends BaseLexerTestDescriptor { - /** - //blah - //blah - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:6='//blah\n',<1>,1:0] - [@1,7:13='//blah\n',<1>,2:0] - [@2,14:13='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : ('//' .*? '\n')+?; - WS : (' '|'\t')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NonGreedyTermination1 extends BaseLexerTestDescriptor { - public String input = "\"hi\"\"mom\""; - /** - [@0,0:3='"hi"',<1>,1:0] - [@1,4:8='"mom"',<1>,1:4] - [@2,9:8='',<-1>,1:9] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - STRING : '"' ('""' | .)*? '"'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NonGreedyTermination2 extends BaseLexerTestDescriptor { - public String input = "\"\"\"mom\""; - /** - [@0,0:6='"""mom"',<1>,1:0] - [@1,7:6='',<-1>,1:7] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - STRING : '"' ('""' | .)+? '"'; - */ - @CommentHasStringValue - public String grammar; - - } - - /* - * This is a regression test for antlr/antlr4#224: "Parentheses without - * quantifier in lexer rules have unclear effect". - * https://github.com/antlr/antlr4/issues/224 - */ - public static class Parentheses extends BaseLexerTestDescriptor { - public String input = "-.-.-!"; - /** - [@0,0:4='-.-.-',<1>,1:0] - [@1,5:5='!',<3>,1:5] - [@2,6:5='',<-1>,1:6] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - START_BLOCK: '-.-.-'; - ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+; - fragment LETTER: L_A|L_K; - fragment L_A: '.-'; - fragment L_K: '-.-'; - SEPARATOR: '!'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PositionAdjustingLexer extends BaseLexerTestDescriptor { - /** - tokens - tokens { - notLabel - label1 = - label2 += - notLabel - */ - @CommentHasStringValue - public String input; - - /** - [@0,0:5='tokens',<6>,1:0] - [@1,7:12='tokens',<4>,2:0] - [@2,14:14='{',<3>,2:7] - [@3,16:23='notLabel',<6>,3:0] - [@4,25:30='label1',<5>,4:0] - [@5,32:32='=',<1>,4:7] - [@6,34:39='label2',<5>,5:0] - [@7,41:42='+=',<2>,5:7] - [@8,44:51='notLabel',<6>,6:0] - [@9,53:52='',<-1>,7:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "PositionAdjustingLexer"; - - /** - lexer grammar PositionAdjustingLexer; - - @definitions { - - } - - @members { - - } - - ASSIGN : '=' ; - PLUS_ASSIGN : '+=' ; - LCURLY: '{'; - - // 'tokens' followed by '{' - TOKENS : 'tokens' IGNORED '{'; - - // IDENTIFIER followed by '+=' or '=' - LABEL - : IDENTIFIER IGNORED '+'? '=' - ; - - IDENTIFIER - : [a-zA-Z_] [a-zA-Z0-9_]* - ; - - fragment - IGNORED - : [ \t\r\n]* - ; - - NEWLINE - : [\r\n]+ -> skip - ; - - WS - : [ \t]+ -> skip - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class QuoteTranslation extends BaseLexerTestDescriptor { - public String input = "\""; - /** - [@0,0:0='"',<1>,1:0] - [@1,1:0='',<-1>,1:1] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - QUOTE : '"' ; // make sure this compiles - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class RecursiveLexerRuleRefWithWildcardPlus extends BaseLexerTestDescriptor { - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : '/*' (CMT | .)+? '*' '/' ; - WS : (' '|'\n')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class RecursiveLexerRuleRefWithWildcardPlus_1 extends RecursiveLexerRuleRefWithWildcardPlus { - public String input = - "/* ick */\n"+ - "/* /* */\n"+ - "/* /*nested*/ */\n"; // stuff on end of comment matches another rule - - public String output = - "[@0,0:8='/* ick */',<1>,1:0]\n"+ - "[@1,9:9='\\n',<2>,1:9]\n"+ - "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n"+ - "[@3,35:35='\\n',<2>,3:16]\n"+ - "[@4,36:35='',<-1>,4:0]\n"; - } - - public static class RecursiveLexerRuleRefWithWildcardPlus_2 extends RecursiveLexerRuleRefWithWildcardPlus { - public String input = - "/* ick */x\n"+ - "/* /* */x\n"+ - "/* /*nested*/ */x\n"; - - public String output = - "[@0,0:8='/* ick */',<1>,1:0]\n"+ - "[@1,10:10='\\n',<2>,1:10]\n"+ - "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n"+ - "[@3,38:38='\\n',<2>,3:17]\n"+ - "[@4,39:38='',<-1>,4:0]\n"; - - /** - line 1:9 token recognition error at: 'x' - line 3:16 token recognition error at: 'x' - */ - @CommentHasStringValue - public String errors; - } - - public static abstract class RecursiveLexerRuleRefWithWildcardStar extends BaseLexerTestDescriptor { - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - CMT : '/*' (CMT | .)*? '*' '/' ; - WS : (' '|'\n')+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class RecursiveLexerRuleRefWithWildcardStar_1 extends RecursiveLexerRuleRefWithWildcardStar { - public String input = - "/* ick */\n"+ - "/* /* */\n"+ - "/* /*nested*/ */\n"; - - public String output = - "[@0,0:8='/* ick */',<1>,1:0]\n"+ - "[@1,9:9='\\n',<2>,1:9]\n"+ - "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n"+ - "[@3,35:35='\\n',<2>,3:16]\n"+ - "[@4,36:35='',<-1>,4:0]\n"; - } - - public static class RecursiveLexerRuleRefWithWildcardStar_2 extends RecursiveLexerRuleRefWithWildcardStar { - public String input = - "/* ick */x\n"+ - "/* /* */x\n"+ - "/* /*nested*/ */x\n"; - - public String output = - "[@0,0:8='/* ick */',<1>,1:0]\n"+ - "[@1,10:10='\\n',<2>,1:10]\n"+ - "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n"+ - "[@3,38:38='\\n',<2>,3:17]\n"+ - "[@4,39:38='',<-1>,4:0]\n"; - - /** - line 1:9 token recognition error at: 'x' - line 3:16 token recognition error at: 'x' - */ - @CommentHasStringValue - public String errors; - } - - public static class RefToRuleDoesNotSetTokenNorEmitAnother extends BaseLexerTestDescriptor { - public String input = "34 -21 3"; - - // EOF has no length so range is 8:7 not 8:8 - /** - [@0,0:1='34',<2>,1:0] - [@1,3:5='-21',<1>,1:3] - [@2,7:7='3',<2>,1:7] - [@3,8:7='',<-1>,1:8] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - A : '-' I ; - I : '0'..'9'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Slashes extends BaseLexerTestDescriptor { - public String input = "\\ / \\/ /\\"; - /** - [@0,0:0='\',<1>,1:0] - [@1,2:2='/',<2>,1:2] - [@2,4:5='\/',<3>,1:4] - [@3,7:8='/\',<4>,1:7] - [@4,9:8='',<-1>,1:9] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - Backslash : '\\\\'; - Slash : '/'; - Vee : '\\\\/'; - Wedge : '/\\\\'; - WS : [ \t] -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - /* - * This is a regression test for antlr/antlr4#687 "Empty zero-length tokens - * cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match - * zero-length tokens" - * https://github.com/antlr/antlr4/issues/687 - * https://github.com/antlr/antlr4/issues/688 - */ - public static class ZeroLengthToken extends BaseLexerTestDescriptor { - public String input = "'xxx'"; - /** - [@0,0:4=''xxx'',<1>,1:0] - [@1,5:4='',<-1>,1:5] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - BeginString - : '\'' -> more, pushMode(StringMode) - ; - mode StringMode; - StringMode_X : 'x' -> more; - StringMode_Done : -> more, mode(EndStringMode); - mode EndStringMode; - EndString : '\'' -> popMode; - */ - @CommentHasStringValue - public String grammar; - } - - /** - * This is a regression test for antlr/antlr4#76 "Serialized ATN strings - * should be split when longer than 2^16 bytes (class file limitation)" - * https://github.com/antlr/antlr4/issues/76 - */ - public static class LargeLexer extends BaseLexerTestDescriptor { - public String input = "KW400"; - /** - [@0,0:4='KW400',<402>,1:0] - [@1,5:4='',<-1>,1:5] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** Look for grammar as resource */ - @Override - public Pair getGrammar() { - String grammar = null; - - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL stuff = loader.getResource("org/antlr/v4/test/runtime/LargeLexer.g4"); - try { - grammar = new String(Files.readAllBytes(Paths.get(stuff.toURI()))); - } - catch (Exception e) { - System.err.println("Cannot find grammar org/antlr/v4/test/runtime/LarseLexer.g4"); - } - - return new Pair<>(grammarName, grammar); - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ListenersDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ListenersDescriptors.java deleted file mode 100644 index e4bafa7fcf..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ListenersDescriptors.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class ListenersDescriptors { - public static class Basic extends BaseParserTestDescriptor { - public String input = "1 2"; - /** - (a 1 2) - 1 - 2 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - - - - s - @after { - - - } - : r=a ; - a : INT INT - | ID - ; - MULT: '*' ; - ADD : '+' ; - INT : [0-9]+ ; - ID : [a-z]+ ; - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LR extends BaseParserTestDescriptor { - public String input = "1+2*3"; - /** - (e (e 1) + (e (e 2) * (e 3))) - 1 - 2 - 3 - 2 3 2 - 1 2 1 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - - - - s - @after { - - - } - : r=e ; - e : e op='*' e - | e op='+' e - | INT - ; - MULT: '*' ; - ADD : '+' ; - INT : [0-9]+ ; - ID : [a-z]+ ; - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LRWithLabels extends BaseParserTestDescriptor { - public String input = "1(2,3)"; - /** - (e (e 1) ( (eList (e 2) , (e 3)) )) - 1 - 2 - 3 - 1 [13 6] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - - - - s - @after { - - - } - : r=e ; - e : e '(' eList ')' # Call - | INT # Int - ; - eList : e (',' e)* ; - MULT: '*' ; - ADD : '+' ; - INT : [0-9]+ ; - ID : [a-z]+ ; - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class RuleGetters extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - - - - s - @after { - - - } - : r=a ; - a : b b // forces list - | b // a list still - ; - b : ID | INT; - MULT: '*' ; - ADD : '+' ; - INT : [0-9]+ ; - ID : [a-z]+ ; - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class RuleGetters_1 extends RuleGetters { - public String input = "1 2"; - /** - (a (b 1) (b 2)) - 1 2 1 - */ - @CommentHasStringValue - public String output; - } - - public static class RuleGetters_2 extends RuleGetters { - public String input = "abc"; - /** - (a (b abc)) - abc - */ - @CommentHasStringValue - public String output; - } - - public static abstract class TokenGetters extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - - - - s - @after { - - - } - : r=a ; - a : INT INT - | ID - ; - MULT: '*' ; - ADD : '+' ; - INT : [0-9]+ ; - ID : [a-z]+ ; - WS : [ \t\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TokenGetters_1 extends TokenGetters { - public String input = "1 2"; - /** - (a 1 2) - 1 2 [1, 2] - */ - @CommentHasStringValue - public String output; - } - - public static class TokenGetters_2 extends TokenGetters { - public String input = "abc"; - /** - (a abc) - [@0,0:2='abc',<4>,1:0] - */ - @CommentHasStringValue - public String output; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java deleted file mode 100644 index 816f67b774..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class ParseTreesDescriptors { - public static class AltNum extends BaseParserTestDescriptor { - public String input = "xyz"; - public String output = "(a:3 x (b:2 y) z)\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - options { contextSuperClass=MyRuleNode; } - - - - - s - @init { - - } - @after { - - } - : r=a ; - - a : 'f' - | 'g' - | 'x' b 'z' - ; - b : 'e' {} | 'y' - ; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean ignore(String targetName) { - return targetName.equals("Rust"); - } - } - - public static class ExtraToken extends BaseParserTestDescriptor { - public String input = "xzy"; - public String output = "(a x z y)\n"; - public String errors = "line 1:1 extraneous input 'z' expecting 'y'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : 'x' 'y' - ; - Z : 'z' - ; - - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ExtraTokensAndAltLabels extends BaseParserTestDescriptor { - public String input = "${ ? a ?}"; - public String output = "(s ${ (v ? a) ? })\n"; - public String errors = - "line 1:3 extraneous input '?' expecting {'a', 'b'}\n"+ - "line 1:7 extraneous input '?' expecting '}'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - - s - @init { - - } - @after { - - } - : '${' v '}' - ; - - v : A #altA - | B #altB - ; - - A : 'a' ; - B : 'b' ; - - WHITESPACE : [ \n\t\r]+ -> channel(HIDDEN) ; - - ERROR : . ; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean ignore(String targetName) { - return !targetName.matches("Java|Python2|Python3|Node|Swift|CSharp|Rust"); - } - } - - public static class NoViableAlt extends BaseParserTestDescriptor { - public String input = "z"; - public String output = "(a z)\n"; - public String errors = "line 1:0 mismatched input 'z' expecting {'x', 'y'}\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : 'x' | 'y' - ; - Z : 'z' - ; - - */ - @CommentHasStringValue - public String grammar; - - } - - public static class RuleRef extends BaseParserTestDescriptor { - public String input = "yx"; - public String output = "(a (b y) x)\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : b 'x' - ; - b : 'y' - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Sync extends BaseParserTestDescriptor { - public String input = "xzyy!"; - public String output = "(a x z y y !)\n"; - public String errors = "line 1:1 extraneous input 'z' expecting {'y', '!'}\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : 'x' 'y'* '!' - ; - Z : 'z' - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Token2 extends BaseParserTestDescriptor { - public String input = "xy"; - public String output = "(a x y)\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : 'x' 'y' - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TokenAndRuleContextString extends BaseParserTestDescriptor { - public String input = "x"; - /** - [a, s] - (a x) - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : 'x' { - - } ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TwoAltLoop extends BaseParserTestDescriptor { - public String input = "xyyxyxz"; - public String output = "(a x y y x y x z)\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : ('x' | 'y')* 'z' - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TwoAlts extends BaseParserTestDescriptor { - public String input = "y"; - public String output = "(a y)\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s - @init { - - } - @after { - - } - : r=a ; - a : 'x' | 'y' - ; - */ - @CommentHasStringValue - public String grammar; - - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java deleted file mode 100644 index 4b860569d2..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java +++ /dev/null @@ -1,645 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class ParserErrorsDescriptors { - public static class ConjuringUpToken extends BaseParserTestDescriptor { - public String input = "ac"; - public String output = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; - public String errors = "line 1:1 missing 'b' at 'c'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' x='b' {} 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ConjuringUpTokenFromSet extends BaseParserTestDescriptor { - public String input = "ad"; - public String output = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; - public String errors = "line 1:1 missing {'b', 'c'} at 'd'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' x=('b'|'c') {} 'd' ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * Regression test for "Getter for context is not a list when it should be". - * https://github.com/antlr/antlr4/issues/19 - */ - public static class ContextListGetters extends BaseParserTestDescriptor { - public String input = "abab"; - public String output = "abab\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members{ - - } - s : (a | b)+; - a : 'a' {}; - b : 'b' {}; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class DuplicatedLeftRecursiveCall extends BaseParserTestDescriptor { - public String output = null; - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : expr EOF; - expr : 'x' - | expr expr - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class DuplicatedLeftRecursiveCall_1 extends DuplicatedLeftRecursiveCall { - public String input = "x"; - } - - public static class DuplicatedLeftRecursiveCall_2 extends DuplicatedLeftRecursiveCall { - public String input = "xx"; - } - - public static class DuplicatedLeftRecursiveCall_3 extends DuplicatedLeftRecursiveCall { - public String input = "xxx"; - } - - public static class DuplicatedLeftRecursiveCall_4 extends DuplicatedLeftRecursiveCall { - public String input = "xxxx"; - } - - /** - * This is a regression test for #45 "NullPointerException in ATNConfig.hashCode". - * https://github.com/antlr/antlr4/issues/45 - *

    - * The original cause of this issue was an error in the tool's ATN state optimization, - * which is now detected early in {@link ATNSerializer} by ensuring that all - * serialized transitions point to states which were not removed. - */ - public static class InvalidATNStateRemoval extends BaseParserTestDescriptor { - public String input = "x:x"; - public String output = null; - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : ID ':' expr; - expr : primary expr? {} | expr '->' ID; - primary : ID; - ID : [a-z]+; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for #6 "NullPointerException in getMissingSymbol". - * https://github.com/antlr/antlr4/issues/6 - */ - public static class InvalidEmptyInput extends BaseParserTestDescriptor { - public String input = ""; - public String output = null; - public String errors = "line 1:0 mismatched input '' expecting ID\n"; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : ID+; - ID : [a-z]+; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LL1ErrorInfo extends BaseParserTestDescriptor { - public String input = "dog and software"; - public String output = "{'hardware', 'software'}\n"; - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : animal (AND acClass)? service EOF; - animal : (DOG | CAT ); - service : (HARDWARE | SOFTWARE) ; - AND : 'and'; - DOG : 'dog'; - CAT : 'cat'; - HARDWARE: 'hardware'; - SOFTWARE: 'software'; - WS : ' ' -> skip ; - acClass - @init - {} - : ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LL2 extends BaseParserTestDescriptor { - public String input = "ae"; - public String output = null; - public String errors = "line 1:1 no viable alternative at input 'ae'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b' - | 'a' 'c' - ; - q : 'e' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LL3 extends BaseParserTestDescriptor { - public String input = "abe"; - public String output = null; - public String errors = "line 1:2 no viable alternative at input 'abe'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b'* 'c' - | 'a' 'b' 'd' - ; - q : 'e' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LLStar extends BaseParserTestDescriptor { - public String input = "aaae"; - public String output = null; - public String errors = "line 1:3 no viable alternative at input 'aaae'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a'+ 'b' - | 'a'+ 'c' - ; - q : 'e' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultiTokenDeletionBeforeLoop extends BaseParserTestDescriptor { - public String input = "aacabc"; - public String output = null; - public String errors = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b'* 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultiTokenDeletionBeforeLoop2 extends BaseParserTestDescriptor { - public String input = "aacabc"; - public String output = null; - public String errors = "line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' ('b'|'z'{})* 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultiTokenDeletionDuringLoop extends BaseParserTestDescriptor { - public String input = "abaaababc"; - public String output = null; - /** - line 1:2 extraneous input 'a' expecting {'b', 'c'} - line 1:6 extraneous input 'a' expecting {'b', 'c'} - */ - @CommentHasStringValue - public String errors; - - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b'* 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class MultiTokenDeletionDuringLoop2 extends BaseParserTestDescriptor { - public String input = "abaaababc"; - public String output = null; - /** - line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'} - line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'} - */ - @CommentHasStringValue - public String errors; - - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' ('b'|'z'{})* 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NoViableAltAvoidance extends BaseParserTestDescriptor { - public String input = "a."; - public String output = null; - public String errors = "line 1:1 mismatched input '.' expecting '!'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : e '!' ; - e : 'a' 'b' - | 'a' - ; - DOT : '.' ; - WS : [ \t\r\n]+ -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleSetInsertion extends BaseParserTestDescriptor { - public String input = "ad"; - public String output = null; - public String errors = "line 1:1 missing {'b', 'c'} at 'd'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' ('b'|'c') 'd' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleSetInsertionConsumption extends BaseParserTestDescriptor { - public String input = "ad"; - public String output = "[@0,0:0='a',<3>,1:0]\n"; - public String errors = "line 1:1 missing {'b', 'c'} at 'd'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - myset: ('b'|'c') ; - a: 'a' myset 'd' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletion extends BaseParserTestDescriptor { - public String input = "aab"; - public String output = null; - public String errors = "line 1:1 extraneous input 'a' expecting 'b'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionBeforeAlt extends BaseParserTestDescriptor { - public String input = "ac"; - public String output = null; - public String errors = "line 1:0 extraneous input 'a' expecting {'b', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ('b' | 'c') - ; - q : 'a' - ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionBeforeLoop extends BaseParserTestDescriptor { - public String input = "aabc"; - public String output = null; - /** - line 1:1 extraneous input 'a' expecting {, 'b'} - line 1:3 token recognition error at: 'c' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b'* EOF ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionBeforeLoop2 extends BaseParserTestDescriptor { - public String input = "aabc"; - public String output = null; - /** - line 1:1 extraneous input 'a' expecting {, 'b', 'z'} - line 1:3 token recognition error at: 'c' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' ('b'|'z'{})* EOF ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionBeforePredict extends BaseParserTestDescriptor { - public String input = "caaab"; - public String output = null; - public String errors = "line 1:0 extraneous input 'c' expecting 'a'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a'+ 'b' - | 'a'+ 'c' - ; - q : 'e' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionConsumption extends BaseParserTestDescriptor { - public String input = "aabd"; - public String output = "[@2,2:2='b',<1>,1:2]\n"; - public String errors = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - myset: ('b'|'c') ; - a: 'a' myset 'd' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionDuringLoop extends BaseParserTestDescriptor { - public String input = "ababbc"; - public String output = null; - public String errors = "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b'* 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionDuringLoop2 extends BaseParserTestDescriptor { - public String input = "ababbc"; - public String output = null; - public String errors = "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' ('b'|'z'{})* 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenDeletionExpectingSet extends BaseParserTestDescriptor { - public String input = "aab"; - public String output = null; - public String errors = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' ('b'|'c') ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SingleTokenInsertion extends BaseParserTestDescriptor { - public String input = "ac"; - public String output = null; - public String errors = "line 1:1 missing 'b' at 'c'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b' 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TokenMismatch extends BaseParserTestDescriptor { - public String input = "aa"; - public String output = null; - public String errors = "line 1:1 mismatched input 'a' expecting 'b'\n"; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : 'a' 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TokenMismatch2 extends BaseParserTestDescriptor { - public String input = "( ~FORCE_ERROR~ "; - public String output = null; - public String errors = "line 1:2 mismatched input '~FORCE_ERROR~' expecting {')', ID}\n"; - public String startRule = "stat"; - public String grammarName = "T"; - - /** - grammar T; - - stat: ( '(' expr? ')' )? EOF ; - expr: ID '=' STR ; - - ERR : '~FORCE_ERROR~' ; - ID : [a-zA-Z]+ ; - STR : '"' ~["]* '"' ; - WS : [ \t\r\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TokenMismatch3 extends BaseParserTestDescriptor { - public String input = ""; - public String output = null; - public String errors = "line 1:0 mismatched input '' expecting {'(', BOOLEAN_LITERAL, ID, '$'}\n"; - public String startRule = "expression"; - public String grammarName = "T"; - - /** - grammar T; - - expression - : value - | expression op=AND expression - | expression op=OR expression - ; - value - : BOOLEAN_LITERAL - | ID - | ID1 - | '(' expression ')' - ; - - AND : '&&'; - OR : '||'; - - BOOLEAN_LITERAL : 'true' | 'false'; - - ID : [a-z]+; - ID1 : '$'; - - WS : [ \t\r\n]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ExtraneousInput extends BaseParserTestDescriptor { - public String input = "baa"; - public String output = null; - public String errors = "line 1:0 mismatched input 'b' expecting {, 'a'}\n"; - public String startRule = "file"; - public String grammarName = "T"; - - /** - grammar T; - - member : 'a'; - body : member*; - file : body EOF; - B : 'b'; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean ignore(String targetName) { - return !"Java".equals(targetName) && !"Swift".equals(targetName) && !"Rust".equals(targetName); - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java deleted file mode 100644 index 6fa27fc0d2..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java +++ /dev/null @@ -1,892 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class ParserExecDescriptors { - public static class APlus extends BaseParserTestDescriptor { - public String input = "a b c"; - public String output = "abc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ID+ { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AStar_1 extends BaseParserTestDescriptor { - public String input = ""; - public String output = "\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ID* { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AStar_2 extends BaseParserTestDescriptor { - public String input = "a b c"; - public String output = "abc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ID* { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorAPlus extends BaseParserTestDescriptor { - public String input = "a b c"; - public String output = "abc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|ID)+ { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorAStar_1 extends BaseParserTestDescriptor { - public String input = ""; - public String output = "\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|ID)* { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorAStar_2 extends BaseParserTestDescriptor { - public String input = "a b c"; - public String output = "abc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|ID)* { - - }; - ID : 'a'..'z'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorB extends BaseParserTestDescriptor { - public String input = "34"; - public String output = "alt 2\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ID { - - } | INT { - - }; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorBPlus extends BaseParserTestDescriptor { - public String input = "a 34 c"; - public String output = "a34c\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|INT{ - })+ { - - }; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorBStar_1 extends BaseParserTestDescriptor { - public String input = ""; - public String output = "\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|INT{ - })* { - - }; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class AorBStar_2 extends BaseParserTestDescriptor { - public String input = "a 34 c"; - public String output = "a34c\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|INT{ - })* { - - }; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Basic extends BaseParserTestDescriptor { - public String input = "abc 34"; - public String output = "abc34\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ID INT { - - }; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - /** Match assignments, ignore other tokens with wildcard. */ - public static class Wildcard extends BaseParserTestDescriptor { - public String input = "x=10; abc;;;; y=99;"; - public String output = "x=10;\ny=99;\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (assign|.)+ EOF ; - assign : ID '=' INT ';' { - - } ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - } - - /** - * This test ensures that {@link org.antlr.v4.runtime.atn.ParserATNSimulator} does not produce a - * {@link StackOverflowError} when it encounters an {@code EOF} transition - * inside a closure. - */ - public static class EOFInClosure extends BaseParserTestDescriptor { - public String input = "x"; - public String output = null; - public String errors = null; - public String startRule = "prog"; - public String grammarName = "T"; - - /** - grammar T; - prog : stat EOF; - stat : 'x' ('y' | EOF)*?; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class IfIfElseGreedyBinding1 extends BaseParserTestDescriptor { - public String input = "if y if y x else x"; - /** - if y x else x - if y if y x else x - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : statement+ ; - statement : 'x' | ifStatement; - ifStatement : 'if' 'y' statement ('else' statement)? { - - }; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class IfIfElseGreedyBinding2 extends BaseParserTestDescriptor { - public String input = "if y if y x else x"; - /** - if y x else x - if y if y x else x - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : statement+ ; - statement : 'x' | ifStatement; - ifStatement : 'if' 'y' statement ('else' statement|) { - - }; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class IfIfElseNonGreedyBinding1 extends BaseParserTestDescriptor { - public String input = "if y if y x else x"; - /** - if y x - if y if y x else x - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : statement+ ; - statement : 'x' | ifStatement; - ifStatement : 'if' 'y' statement ('else' statement)?? { - - }; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class IfIfElseNonGreedyBinding2 extends BaseParserTestDescriptor { - public String input = "if y if y x else x"; - /** - if y x - if y if y x else x - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : statement+ ; - statement : 'x' | ifStatement; - ifStatement : 'if' 'y' statement (|'else' statement) { - - }; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LL1OptionalBlock_1 extends BaseParserTestDescriptor { - public String input = ""; - public String output = "\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|{}INT)? { - - }; - ID : 'a'..'z'+; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LL1OptionalBlock_2 extends BaseParserTestDescriptor { - public String input = "a"; - public String output = "a\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|{}INT)? { - - }; - ID : 'a'..'z'+; - INT : '0'..'9'+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for antlr/antlr4#195 "label 'label' type - * mismatch with previous definition: TOKEN_LABEL!=RULE_LABEL" - * https://github.com/antlr/antlr4/issues/195 - */ - public static class LabelAliasingAcrossLabeledAlternatives extends BaseParserTestDescriptor { - public String input = "xy"; - /** - x - y - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : a* EOF; - a - : label=subrule {} #One - | label='y' {} #Two - ; - subrule : 'x'; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Labels extends BaseParserTestDescriptor { - public String input = "abc 34;"; - public String output = null; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : b1=b b2+=b* b3+=';' ; - b : id_=ID val+=INT*; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for antlr/antlr4#299 "Repeating subtree not - * accessible in visitor". - * https://github.com/antlr/antlr4/issues/299 - */ - public static class ListLabelForClosureContext extends BaseParserTestDescriptor { - public String input = "a"; - public String output = null; - public String errors = null; - public String startRule = "expression"; - public String grammarName = "T"; - - /** - grammar T; - ifStatement - @after { - })> - } - : 'if' expression - ( ( 'then' - executableStatement* - elseIfStatement* // \<--- problem is here; should yield a list not node - elseStatement? - 'end' 'if' - ) | executableStatement ) - ; - - elseIfStatement - : 'else' 'if' expression 'then' executableStatement* - ; - expression : 'a' ; - executableStatement : 'a' ; - elseStatement : 'a' ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for #270 "Fix operator += applied to a set of - * tokens". - * https://github.com/antlr/antlr4/issues/270 - */ - public static class ListLabelsOnSet extends BaseParserTestDescriptor { - public String input = "abc 34;"; - public String output = null; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : b b* ';' ; - b : ID val+=(INT | FLOAT)*; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - FLOAT : [0-9]+ '.' [0-9]+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This test ensures that {@link ParserATNSimulator} produces a correct - * result when the grammar contains multiple explicit references to - * {@code EOF} inside of parser rules. - */ - public static class MultipleEOFHandling extends BaseParserTestDescriptor { - public String input = "x"; - public String output = null; - public String errors = null; - public String startRule = "prog"; - public String grammarName = "T"; - - /** - grammar T; - prog : ('x' | 'x' 'y') EOF EOF; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This test is meant to detect regressions of bug antlr/antlr4#41. - * https://github.com/antlr/antlr4/issues/41 - */ - public static class Optional_1 extends BaseParserTestDescriptor { - public String input = "x"; - public String output = null; - public String errors = null; - public String startRule = "stat"; - public String grammarName = "T"; - - /** - grammar T; - stat : ifstat | 'x'; - ifstat : 'if' stat ('else' stat)?; - WS : [ \n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Optional_2 extends BaseParserTestDescriptor { - public String input = "if x"; - public String output = null; - public String errors = null; - public String startRule = "stat"; - public String grammarName = "T"; - - /** - grammar T; - stat : ifstat | 'x'; - ifstat : 'if' stat ('else' stat)?; - WS : [ \n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Optional_3 extends BaseParserTestDescriptor { - public String input = "if x else x"; - public String output = null; - public String errors = null; - public String startRule = "stat"; - public String grammarName = "T"; - - /** - grammar T; - stat : ifstat | 'x'; - ifstat : 'if' stat ('else' stat)?; - WS : [ \n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Optional_4 extends BaseParserTestDescriptor { - public String input = "if if x else x"; - public String output = null; - public String errors = null; - public String startRule = "stat"; - public String grammarName = "T"; - - /** - grammar T; - stat : ifstat | 'x'; - ifstat : 'if' stat ('else' stat)?; - WS : [ \n\t]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /* - * This is a regression test for antlr/antlr4#561 "Issue with parser - * generation in 4.2.2" - * https://github.com/antlr/antlr4/issues/561 - */ - public static class ParserProperty extends BaseParserTestDescriptor { - public String input = "abc"; - public String output = "valid\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - - a : {}? ID {} - ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This test is meant to test the expected solution to antlr/antlr4#42. - * https://github.com/antlr/antlr4/issues/42 - */ - public static class PredicatedIfIfElse extends BaseParserTestDescriptor { - public String input = "if x if x a else b"; - public String output = null; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : stmt EOF ; - stmt : ifStmt | ID; - ifStmt : 'if' ID stmt ('else' stmt | { })> }?); - ELSE : 'else'; - ID : [a-zA-Z]+; - WS : [ \\n\\t]+ -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for antlr/antlr4#334 "BailErrorStrategy: bails - * out on proper input". - * https://github.com/antlr/antlr4/issues/334 - */ - public static class PredictionIssue334 extends BaseParserTestDescriptor { - public String input = "a"; - public String output = "(file_ (item a) )\n"; - public String errors = null; - public String startRule = "file_"; - public String grammarName = "T"; - - /** - grammar T; - file_ @init{ - - } - @after { - - } - : item (SEMICOLON item)* SEMICOLON? EOF ; - item : A B?; - SEMICOLON: ';'; - A : 'a'|'A'; - B : 'b'|'B'; - WS : [ \r\t\n]+ -> skip; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for antlr/antlr4#561 "Issue with parser - * generation in 4.2.2" - * https://github.com/antlr/antlr4/issues/561 - */ - public static class ReferenceToATN_1 extends BaseParserTestDescriptor { - public String input = ""; - public String output = "\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|ATN)* ATN? {} ; - ID : 'a'..'z'+ ; - ATN : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ReferenceToATN_2 extends BaseParserTestDescriptor { - public String input = "a 34 c"; - public String output = "a34c\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (ID|ATN)* ATN? {} ; - ID : 'a'..'z'+ ; - ATN : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - } - - /** - * This is a regression test for antlr/antlr4#1545, case 1. - */ - public static class OpenDeviceStatement_Case1 extends BaseParserTestDescriptor { - public String input = "OPEN DEVICE DEVICE"; - public String output = "OPEN DEVICE DEVICE\n"; - public String errors = null; - public String startRule = "statement"; - public String grammarName = "OpenDeviceStatement"; - - /** - grammar OpenDeviceStatement; - program : statement+ '.' ; - - statement : 'OPEN' ( 'DEVICE' ( OPT1 | OPT2 | OPT3 )? )+ {} ; - - OPT1 : 'OPT-1'; - OPT2 : 'OPT-2'; - OPT3 : 'OPT-3'; - - WS : (' '|'\n')+ -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - } - - /** - * This is a regression test for antlr/antlr4#1545, case 2. - */ - public static class OpenDeviceStatement_Case2 extends BaseParserTestDescriptor { - public String input = "OPEN DEVICE DEVICE"; - public String output = "OPEN DEVICE DEVICE\n"; - public String errors = null; - public String startRule = "statement"; - public String grammarName = "OpenDeviceStatement"; - - /** - grammar OpenDeviceStatement; - program : statement+ '.' ; - - statement : 'OPEN' ( 'DEVICE' ( (OPT1) | OPT2 | OPT3 )? )+ {} ; - - OPT1 : 'OPT-1'; - OPT2 : 'OPT-2'; - OPT3 : 'OPT-3'; - - WS : (' '|'\n')+ -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - } - - /** - * This is a regression test for antlr/antlr4#1545, case 3. - */ - public static class OpenDeviceStatement_Case3 extends BaseParserTestDescriptor { - public String input = "OPEN DEVICE DEVICE."; - public String output = "OPEN DEVICE DEVICE\n"; - public String errors = null; - public String startRule = "statement"; - public String grammarName = "OpenDeviceStatement"; - - /** - grammar OpenDeviceStatement; - program : statement+ '.' ; - - statement : 'OPEN' ( 'DEVICE' ( (OPT1) | OPT2 | OPT3 )? )+ {} ; - - OPT1 : 'OPT-1'; - OPT2 : 'OPT-2'; - OPT3 : 'OPT-3'; - - WS : (' '|'\n')+ -> channel(HIDDEN); - */ - @CommentHasStringValue - public String grammar; - } - - /** - * This is a regression test for antlr/antlr4#2301. - */ - public static class OrderingPredicates extends BaseParserTestDescriptor { - public String input = "POINT AT X"; - public String output = null; - public String errors = null; - public String startRule = "expr"; - public String grammarName = "Issue2301"; - - /** - grammar Issue2301; - - SPACES: [ \t\r\n]+ -> skip; - - AT: 'AT'; - X : 'X'; - Y : 'Y'; - - ID: [A-Z]+; - - constant - : 'DUMMY' - ; - - expr - : ID constant? - | expr AT X - | expr AT Y - ; - */ - @CommentHasStringValue - public String grammar; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java deleted file mode 100644 index f2ea13b5ee..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -import java.util.Arrays; - -public class PerformanceDescriptors { - /* - * This is a regression test for antlr/antlr4#192 "Poor performance of - * expression parsing". - * https://github.com/antlr/antlr4/issues/192 - */ - public static abstract class ExpressionGrammar extends BaseParserTestDescriptor { - public String output = null; - public String errors = null; - public String startRule = "program"; - public String grammarName = "Expr"; - - /** - grammar Expr; - - program: expr EOF; - - expr - : ID - | 'not' expr - | expr 'and' expr - | expr 'or' expr - ; - - ID: [a-zA-Z_][a-zA-Z_0-9]*; - WS: [ \t\n\r\f]+ -> skip; - ERROR: .; - */ - @CommentHasStringValue - public String grammar; - } - - public static class ExpressionGrammar_1 extends ExpressionGrammar { - /** - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 - */ - @CommentHasStringValue - public String input; - } - - public static class ExpressionGrammar_2 extends ExpressionGrammar { - /** - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 - */ - @CommentHasStringValue - public String input; - } - - /** Test for https://github.com/antlr/antlr4/issues/1398. - * Seeing through a large expression takes 5 _minutes_ on - * my fast box to complete. After fix, it's instantaneous. - */ - public static abstract class DropLoopEntryBranchInLRRule extends BaseParserTestDescriptor { - public String grammarName = "Expr"; - public String startRule = "stat"; - - /** - grammar Expr; - - stat : expr ';' - | expr '.' - ; - - expr - : ID - | 'not' expr - | expr 'and' expr - | expr 'or' expr - | '(' ID ')' expr - | expr '?' expr ':' expr - | 'between' expr 'and' expr - ; - - ID: [a-zA-Z_][a-zA-Z_0-9]*; - WS: [ \t\n\r\f]+ -> skip; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean ignore(String targetName) { - return !Arrays.asList("Java", "CSharp", "Python2", "Python3", "Node", "Cpp", "Swift", "Rust").contains(targetName); - } - } - - public static class DropLoopEntryBranchInLRRule_1 extends DropLoopEntryBranchInLRRule { - /** - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 - ; - */ - @CommentHasStringValue - public String input; - } - - public static class DropLoopEntryBranchInLRRule_2 extends DropLoopEntryBranchInLRRule { - /** - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 or - X1 and X2 and X3 and X4 and X5 and X6 and X7 - . - */ // Different in final token - @CommentHasStringValue - public String input; - } - - public static class DropLoopEntryBranchInLRRule_3 extends DropLoopEntryBranchInLRRule { - /** - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 or - not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 - ; - */ - @CommentHasStringValue - public String input; - } - - public static class DropLoopEntryBranchInLRRule_4 extends DropLoopEntryBranchInLRRule { - /** - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 and - between X1 and X2 or between X3 and X4 - ; - */ - @CommentHasStringValue - public String input; - - @Override - public boolean ignore(String targetName) { - // passes, but still too slow in Python and JavaScript - return !Arrays.asList("Java", "CSharp", "Cpp", "Swift", "Rust").contains(targetName); - } - } - - public static class DropLoopEntryBranchInLRRule_5 extends DropLoopEntryBranchInLRRule { - /** - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z or - X ? Y : Z - ; - */ - @CommentHasStringValue - public String input; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexerDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexerDescriptors.java deleted file mode 100644 index 3b91a50f23..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalLexerDescriptors.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseLexerTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class SemPredEvalLexerDescriptors { - // Test for https://github.com/antlr/antlr4/issues/958 - public static class RuleSempredFunction extends BaseLexerTestDescriptor { - public String input = "aaa"; - /** - [@0,0:0='a',<1>,1:0] - [@1,1:1='a',<1>,1:1] - [@2,2:2='a',<1>,1:2] - [@3,3:2='',<-1>,1:3] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - T : 'a' {}? ; - */ - @CommentHasStringValue - public String grammar; - } - - public static class DisableRule extends BaseLexerTestDescriptor { - public String input = "enum abc"; - /** - [@0,0:3='enum',<2>,1:0] - [@1,5:7='abc',<3>,1:5] - [@2,8:7='',<-1>,1:8] - s0-' '->:s5=>4 - s0-'a'->:s6=>3 - s0-'e'->:s1=>3 - :s1=>3-'n'->:s2=>3 - :s2=>3-'u'->:s3=>3 - :s6=>3-'b'->:s6=>3 - :s6=>3-'c'->:s6=>3 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - E1 : 'enum' { }? ; - E2 : 'enum' { }? ; // winner not E1 or ID - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDFA() { return true; } - } - - public static class EnumNotID extends BaseLexerTestDescriptor { - public String input = "enum abc enum"; - /** - [@0,0:3='enum',<1>,1:0] - [@1,5:7='abc',<2>,1:5] - [@2,9:12='enum',<1>,1:9] - [@3,13:12='',<-1>,1:13] - s0-' '->:s3=>3 - */ - @CommentHasStringValue // - - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ENUM : [a-z]+ { }? ; - ID : [a-z]+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDFA() { return true; } - } - - public static class IDnotEnum extends BaseLexerTestDescriptor { - public String input = "enum abc enum"; - /** - [@0,0:3='enum',<2>,1:0] - [@1,5:7='abc',<2>,1:5] - [@2,9:12='enum',<2>,1:9] - [@3,13:12='',<-1>,1:13] - s0-' '->:s2=>3 - */ - @CommentHasStringValue // - - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ENUM : [a-z]+ { }? ; - ID : [a-z]+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDFA() { return true; } - } - - public static class IDvsEnum extends BaseLexerTestDescriptor { - public String input = "enum abc enum"; - - /** - [@0,0:3='enum',<2>,1:0] - [@1,5:7='abc',<2>,1:5] - [@2,9:12='enum',<2>,1:9] - [@3,13:12='',<-1>,1:13] - s0-' '->:s5=>3 - s0-'a'->:s4=>2 - s0-'e'->:s1=>2 - :s1=>2-'n'->:s2=>2 - :s2=>2-'u'->:s3=>2 - :s4=>2-'b'->:s4=>2 - :s4=>2-'c'->:s4=>2 - */ - @CommentHasStringValue // no 'm'-> transition...conflicts with pred - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ENUM : 'enum' { }? ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDFA() { return true; } - } - - public static class Indent extends BaseLexerTestDescriptor { - public String input = "abc\n def \n"; - /** - INDENT - [@0,0:2='abc',<1>,1:0] - [@1,3:3='\n',<3>,1:3] - [@2,4:5=' ',<2>,2:0] - [@3,6:8='def',<1>,2:2] - [@4,9:10=' ',<4>,2:5] - [@5,11:11='\n',<3>,2:7] - [@6,12:11='',<-1>,3:0] - s0-' - '->:s2=>3 - s0-'a'->:s1=>1 - s0-'d'->:s1=>1 - :s1=>1-'b'->:s1=>1 - :s1=>1-'c'->:s1=>1 - :s1=>1-'e'->:s1=>1 - :s1=>1-'f'->:s1=>1 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ID : [a-z]+ ; - INDENT : [ \t]+ { }? - { } ; - NL : '\n'; - WS : [ \t]+ ; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDFA() { return true; } - } - - public static class LexerInputPositionSensitivePredicates extends BaseLexerTestDescriptor { - public String input = "a cde\nabcde\n"; - /** - a - cde - ab - cde - [@0,0:0='a',<1>,1:0] - [@1,2:4='cde',<2>,1:2] - [@2,6:7='ab',<1>,2:0] - [@3,8:10='cde',<2>,2:2] - [@4,12:11='',<-1>,3:0] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - WORD1 : ID1+ { } ; - WORD2 : ID2+ { } ; - fragment ID1 : { \< 2 }? [a-zA-Z]; - fragment ID2 : { >= 2 }? [a-zA-Z]; - WS : (' '|'\n') -> skip; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDFA() { return true; } - } - - public static class PredicatedKeywords extends BaseLexerTestDescriptor { - public String input = "enum enu a"; - /** - enum! - ID enu - ID a - [@0,0:3='enum',<1>,1:0] - [@1,5:7='enu',<2>,1:5] - [@2,9:9='a',<2>,1:9] - [@3,10:9='',<-1>,1:10] - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = ""; - public String grammarName = "L"; - - /** - lexer grammar L; - ENUM : [a-z]+ { }? { } ; - ID : [a-z]+ { } ; - WS : [ \n] -> skip ; - */ - @CommentHasStringValue - public String grammar; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java deleted file mode 100644 index 3993c94106..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java +++ /dev/null @@ -1,752 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class SemPredEvalParserDescriptors { - public static class ActionHidesPreds extends BaseParserTestDescriptor { - public String input = "x x y"; - /** - alt 1 - alt 1 - alt 1 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members {} - s : a+ ; - a : {} ID {}? {} - | {} ID {}? {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** Regular non-forced actions can create side effects used by semantic - * predicates and so we cannot evaluate any semantic predicate - * encountered after having seen a regular action. This includes - * during global follow operations. - */ - public static class ActionsHidePredsInGlobalFOLLOW extends BaseParserTestDescriptor { - public String input = "a!"; - /** - eval=true - parse - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members { - - } - s : e {} {}? {} '!' ; - t : e {} {}? ID ; - e : ID | ; // non-LL(1) so we use ATN - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for antlr/antlr4#196 - * "element+ in expression grammar doesn't parse properly" - * https://github.com/antlr/antlr4/issues/196 - */ - public static class AtomWithClosureInTranslatedLRRule extends BaseParserTestDescriptor { - public String input = "a+b+a"; - public String output = null; - public String errors = null; - public String startRule = "start"; - public String grammarName = "T"; - - /** - grammar T; - start : e[0] EOF; - e[] - : ( 'a' | 'b'+ ) ( {3 >= $_p}? '+' e[4] )* - ; - - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * We cannot collect predicates that are dependent on local context if - * we are doing a global follow. They appear as if they were not there at all. - */ - public static class DependentPredsInGlobalFOLLOW extends BaseParserTestDescriptor { - public String input = "a!"; - /** - eval=true - parse - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members { - - } - s : a[99] ; - a[] : e {}? {} '!' ; - b[] : e {}? ID ; - e : ID | ; // non-LL(1) so we use ATN - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class DependentPredNotInOuterCtxShouldBeIgnored extends BaseParserTestDescriptor { - public String input = "a;"; - public String output = "alt 2\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a; - b[] : a[] ; - a[] - : {}? ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". - * https://github.com/antlr/antlr4/issues/218 - */ - public static class DisabledAlternative extends BaseParserTestDescriptor { - public String input = "hello"; - public String output = null; - public String errors = null; - public String startRule = "cppCompilationUnit"; - public String grammarName = "T"; - - /** - grammar T; - cppCompilationUnit : content+ EOF; - content: anything | {}? .; - anything: ANY_CHAR; - ANY_CHAR: [_a-zA-Z0-9]; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class IndependentPredNotPassedOuterCtxToAvoidCastException extends BaseParserTestDescriptor { - public String input = "a;"; - public String output = "alt 2\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : b ';' | b '.' ; - b : a ; - a - : {}? ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NoTruePredsThrowsNoViableAlt extends BaseParserTestDescriptor { - public String input = "y 3 x 4"; - public String output = null; - public String errors = "line 1:0 no viable alternative at input 'y'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a a; - a : {}? ID INT {} - | {}? ID INT {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Order extends BaseParserTestDescriptor { - public String input = "x y"; - /** - alt 1 - alt 1 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a {} a; // do 2x: once in ATN, next in DFA; - // action blocks lookahead from falling off of 'a' - // and looking into 2nd 'a' ref. !ctx dependent pred - a : ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** Loopback doesn't eval predicate at start of alt */ - public static abstract class PredFromAltTestedInLoopBack extends BaseParserTestDescriptor { - public String startRule = "file_"; - public String grammarName = "T"; - - /** - grammar T; - file_ - @after {} - : para para EOF ; - para: paraContent NL NL ; - paraContent : ('s'|'x'|{})>}? NL)+ ; - NL : '\n' ; - s : 's' ; - X : 'x' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PredFromAltTestedInLoopBack_1 extends PredFromAltTestedInLoopBack { - public String input = "s\n\n\nx\n"; - public String output = "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n"; - /** - line 5:0 mismatched input '' expecting {'s', ' - ', 'x'} - */ - @CommentHasStringValue - public String errors; - - @Override - public boolean ignore(String targetName) { - return !"Java".equals(targetName) && !"Swift".equals(targetName) && !"Rust".equals(targetName); - } - } - - public static class PredFromAltTestedInLoopBack_2 extends PredFromAltTestedInLoopBack { - public String input = "s\n\n\nx\n\n"; - public String output = "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) )\n"; - public String errors = null; - } - - public static abstract class PredTestedEvenWhenUnAmbig extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "primary"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members {} - primary - : ID {} - | {}? 'enum' {} - ; - ID : [a-z]+ ; - WS : [ \t\n\r]+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PredTestedEvenWhenUnAmbig_1 extends PredTestedEvenWhenUnAmbig { - public String input = "abc"; - public String output = "ID abc\n"; - } - - public static class PredTestedEvenWhenUnAmbig_2 extends PredTestedEvenWhenUnAmbig { - public String input = "enum"; - public String output = null; - public String errors = "line 1:0 no viable alternative at input 'enum'\n"; - } - - /** - * In this case, we're passing a parameter into a rule that uses that - * information to predict the alternatives. This is the special case - * where we know exactly which context we are in. The context stack - * is empty and we have not dipped into the outer context to make a decision. - */ - public static class PredicateDependentOnArg extends BaseParserTestDescriptor { - public String input = "a b"; - /** - alt 2 - alt 1 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members {} - s : a[2] a[1]; - a[] - : {}? ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** - * In this case, we have to ensure that the predicates are not tested - * during the closure after recognizing the 1st ID. The closure will - * fall off the end of 'a' 1st time and reach into the a[1] rule - * invocation. It should not execute predicates because it does not know - * what the parameter is. The context stack will not be empty and so - * they should be ignored. It will not affect recognition, however. We - * are really making sure the ATN simulation doesn't crash with context - * object issues when it encounters preds during FOLLOW. - */ - public static class PredicateDependentOnArg2 extends BaseParserTestDescriptor { - public String input = "a b"; - public String output = null; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members {} - s : a[2] a[1]; - a[] - : {}? ID - | {}? ID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** During a global follow operation, we still collect semantic - * predicates as long as they are not dependent on local context - */ - public static class PredsInGlobalFOLLOW extends BaseParserTestDescriptor { - public String input = "a!"; - /** - eval=true - parse - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members { - - } - s : e {}? {} '!' ; - t : e {}? ID ; - e : ID | ; // non-LL(1) so we use ATN - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class RewindBeforePredEval extends BaseParserTestDescriptor { - public String input = "y 3 x 4"; - /** - alt 2 - alt 1 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a a; - a : {}? ID INT {} - | {}? ID INT {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class Simple extends BaseParserTestDescriptor { - public String input = "x y 3"; - /** - alt 2 - alt 2 - alt 3 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a a a; // do 3x: once in ATN, next in DFA then INT in ATN - a : {}? ID {} - | {}? ID {} - | INT {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SimpleValidate extends BaseParserTestDescriptor { - public String input = "x"; - public String output = null; - public String errors = "line 1:0 no viable alternative at input 'x'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a ; - a : {}? ID {} - | {}? INT {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SimpleValidate2 extends BaseParserTestDescriptor { - public String input = "3 4 x"; - /** - alt 2 - alt 2 - */ - @CommentHasStringValue - public String output; - - public String errors = "line 1:4 no viable alternative at input 'x'\n"; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a a a; - a : {}? ID {} - | {}? INT {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ToLeft extends BaseParserTestDescriptor { - public String input = "x x y"; - /** - alt 2 - alt 2 - alt 2 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a+ ; - a : {}? ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - /** In this case, we use predicates that depend on global information - * like we would do for a symbol table. We simply execute - * the predicates assuming that all necessary information is available. - * The i++ action is done outside of the prediction and so it is executed. - */ - public static class ToLeftWithVaryingPredicate extends BaseParserTestDescriptor { - public String input = "x x y"; - /** - i=1 - alt 2 - i=2 - alt 1 - i=3 - alt 2 - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - @parser::members {} - s : ({ - - } a)+ ; - a : {}? ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class TwoUnpredicatedAlts extends BaseParserTestDescriptor { - public String input = "x; y"; - /** - alt 1 - alt 1 - */ - @CommentHasStringValue - public String output; - - /** - line 1:0 reportAttemptingFullContext d=0 (a), input='x' - line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x' - line 1:3 reportAttemptingFullContext d=0 (a), input='y' - line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : {} a ';' a; // do 2x: once in ATN, next in DFA - a : ID {} - | ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDiagnosticErrors() { return true; } - } - - public static class TwoUnpredicatedAltsAndOneOrthogonalAlt extends BaseParserTestDescriptor { - public String input = "34; x; y"; - /** - alt 1 - alt 2 - alt 2 - */ - @CommentHasStringValue - public String output; - - /** - line 1:4 reportAttemptingFullContext d=0 (a), input='x' - line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x' - line 1:7 reportAttemptingFullContext d=0 (a), input='y' - line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : {} a ';' a ';' a; - a : INT {} - | ID {} // must pick this one for ID since pred is false - | ID {} - | {}? ID {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - @Override - public boolean showDiagnosticErrors() { return true; } - } - - public static class UnpredicatedPathsInAlt extends BaseParserTestDescriptor { - public String input = "x 4"; - public String output = "alt 1\n"; - public String errors = null; - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a {} - | b {} - ; - a : {}? ID INT - | ID INT - ; - b : ID ID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ValidateInDFA extends BaseParserTestDescriptor { - public String input = "x ; y"; - public String output = null; - /** - line 1:0 no viable alternative at input 'x' - line 1:4 no viable alternative at input 'y' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "s"; - public String grammarName = "T"; - - /** - grammar T; - s : a ';' a; - // ';' helps us to resynchronize without consuming - // 2nd 'a' reference. We our testing that the DFA also - // throws an exception if the validating predicate fails - a : {}? ID {} - | {}? INT {} - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SetsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SetsDescriptors.java deleted file mode 100644 index e877558992..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SetsDescriptors.java +++ /dev/null @@ -1,683 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.descriptors; - -import org.antlr.v4.test.runtime.BaseParserTestDescriptor; -import org.antlr.v4.test.runtime.CommentHasStringValue; - -public class SetsDescriptors { - public static class CharSetLiteral extends BaseParserTestDescriptor { - public String input = "A a B b"; - /** - A - a - B - b - */ - @CommentHasStringValue - public String output; - - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : (A {})+ ; - A : [AaBb] ; - WS : (' '|'\n')+ -> skip ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ComplementSet extends BaseParserTestDescriptor { - public String input = "a"; - public String output = null; - /** - line 1:0 token recognition error at: 'a' - line 1:1 missing {} at '' - */ - @CommentHasStringValue - public String errors; - - public String startRule = "parse"; - public String grammarName = "T"; - - /** - grammar T; - parse : ~NEW_LINE; - NEW_LINE: '\\r'? '\\n'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LexerOptionalSet extends BaseParserTestDescriptor { - public String input = "ac"; - public String output = "ac\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : ('a'|'b')? 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LexerPlusSet extends BaseParserTestDescriptor { - public String input = "abaac"; - public String output = "abaac\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : ('a'|'b')+ 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class LexerStarSet extends BaseParserTestDescriptor { - public String input = "abaac"; - public String output = "abaac\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : ('a'|'b')* 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NotChar extends BaseParserTestDescriptor { - public String input = "x"; - public String output = "x\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : ~'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NotCharSet extends BaseParserTestDescriptor { - public String input = "x"; - public String output = "x\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : ~('b'|'c') ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NotCharSetWithLabel extends BaseParserTestDescriptor { - public String input = "x"; - public String output = "x\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : h=~('b'|'c') ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class NotCharSetWithRuleRef3 extends BaseParserTestDescriptor { - public String input = "x"; - public String output = "x\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : ('a'|B) ; // this doesn't collapse to set but works - fragment - B : ~('a'|'c') ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class OptionalLexerSingleElement extends BaseParserTestDescriptor { - public String input = "bc"; - public String output = "bc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : 'b'? 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class OptionalSet extends BaseParserTestDescriptor { - public String input = "ac"; - public String output = "ac\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ('a'|'b')? 'c' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class OptionalSingleElement extends BaseParserTestDescriptor { - public String input = "bc"; - public String output = "bc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A? 'c' {} ; - A : 'b' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ParserNotSet extends BaseParserTestDescriptor { - public String input = "zz"; - public String output = "z\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : t=~('x'|'y') 'z' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ParserNotToken extends BaseParserTestDescriptor { - public String input = "zz"; - public String output = "zz\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ~'x' 'z' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ParserNotTokenWithLabel extends BaseParserTestDescriptor { - public String input = "zz"; - public String output = "z\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : t=~'x' 'z' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class ParserSet extends BaseParserTestDescriptor { - public String input = "x"; - public String output = "x\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : t=('x'|'y') {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PlusLexerSingleElement extends BaseParserTestDescriptor { - public String input = "bbbbc"; - public String output = "bbbbc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : 'b'+ 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class PlusSet extends BaseParserTestDescriptor { - public String input = "abaac"; - public String output = "abaac\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ('a'|'b')+ 'c' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class RuleAsSet extends BaseParserTestDescriptor { - public String input = "b"; - public String output = "b\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a @after {} : 'a' | 'b' |'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class SeqDoesNotBecomeSet extends BaseParserTestDescriptor { - public String input = "34"; - public String output = "34\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : C {} ; - fragment A : '1' | '2'; - fragment B : '3' '4'; - C : A | B; - */ - @CommentHasStringValue - public String grammar; - - } - - public static abstract class StarLexerSingleElement extends BaseParserTestDescriptor { - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : A {} ; - A : 'b'* 'c' ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class StarLexerSingleElement_1 extends StarLexerSingleElement { - public String input = "bbbbc"; - public String output = "bbbbc\n"; - } - - public static class StarLexerSingleElement_2 extends StarLexerSingleElement { - public String input = "c"; - public String output = "c\n"; - } - - public static class StarSet extends BaseParserTestDescriptor { - public String input = "abaac"; - public String output = "abaac\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : ('a'|'b')* 'c' {} ; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeUnescapedBMPSet extends BaseParserTestDescriptor { - public String input = "a\u00E4\u3042\u4E9Cc"; - public String output = "a\u00E4\u3042\u4E9Cc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS {} ; - // These are actually not escaped -- Java passes the - // raw unescaped Unicode values to the grammar compiler. - LETTERS : ('a'|'\u00E4'|'\u4E9C'|'\u3042')* 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeUnescapedBMPRangeSet extends BaseParserTestDescriptor { - public String input = "a\u00E1\u00E4\u00E1\u00E2\u00E5d"; - public String output = "a\u00E1\u00E4\u00E1\u00E2\u00E5d\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS* 'd' {} ; - // These are actually not escaped -- Java passes the - // raw unescaped Unicode values to the grammar compiler. - LETTERS : ('a'|'\u00E0'..'\u00E5'); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeEscapedBMPSet extends BaseParserTestDescriptor { - public String input = "a\u00E4\u3042\u4E9Cc"; - public String output = "a\u00E4\u3042\u4E9Cc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS {} ; - // Note the double-backslash to avoid Java passing - // unescaped values as part of the grammar. - LETTERS : ('a'|'\\u00E4'|'\\u4E9C'|'\\u3042')* 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeEscapedBMPRangeSet extends BaseParserTestDescriptor { - public String input = "a\u00E1\u00E4\u00E1\u00E2\u00E5d"; - public String output = "a\u00E1\u00E4\u00E1\u00E2\u00E5d\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS* 'd' {} ; - // Note the double-backslash to avoid Java passing - // unescaped values as part of the grammar. - LETTERS : ('a'|'\\u00E0'..'\\u00E5'); - */ - @CommentHasStringValue - public String grammar; - - } - - // TODO(bhamiltoncx): This needs to be an error, the V3 - // runtime used by the tool doesn't really understand unescaped code points > - // U+FFFF. - // public static class UnicodeUnescapedSMPSet extends BaseParserTestDescriptor { - // public String input = new StringBuilder() - // .append("a") - // .appendCodePoint(0x1D5C2) - // .appendCodePoint(0x1D5CE) - // .appendCodePoint(0x1D5BA) - // .append("c") - // .toString(); - // public String output = new StringBuilder() - // .append("a") - // .appendCodePoint(0x1D5C2) - // .appendCodePoint(0x1D5CE) - // .appendCodePoint(0x1D5BA) - // .append("c\n") - // .toString(); - // public String errors = null; - // public String startRule = "a"; - // public String grammarName = "T"; - - // /** - // grammar T; - // a : LETTERS {} ; - // // These are actually not escaped -- Java passes the - // // raw unescaped Unicode values to the grammar compiler. - // // - // // Each sequence is the UTF-16 encoding of a raw Unicode - // // SMP code point. - // LETTERS : ('a'|'\uD835\uDDBA'|'\uD835\uDDBE'|'\uD835\uDDC2'|'\uD835\uDDC8'|'\uD835\uDDCE')* 'c'; - // */ - // @CommentHasStringValue - // public String grammar; - - // } - - public static class UnicodeEscapedSMPSet extends BaseParserTestDescriptor { - public String input = new StringBuilder() - .append("a") - .appendCodePoint(0x1D5C2) - .appendCodePoint(0x1D5CE) - .appendCodePoint(0x1D5BA) - .append("c") - .toString(); - public String output = new StringBuilder() - .append("a") - .appendCodePoint(0x1D5C2) - .appendCodePoint(0x1D5CE) - .appendCodePoint(0x1D5BA) - .append("c\n") - .toString(); - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS {} ; - // Note the double-backslash to avoid Java passing - // unescaped values as part of the grammar. - LETTERS : ('a'|'\\u{1D5BA}'|'\\u{1D5BE}'|'\\u{1D5C2}'|'\\u{1D5C8}'|'\\u{1D5CE}')* 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - // Turns out Tool.java uses ANTLR 3's runtime, which means it can't use - // CodePointCharStream to understand unescaped code points > U+FFFF. - // - // TODO(bhamiltoncx): This needs to be an error, since we don't currently plan - // to port Tool.java to use ANTLR 4's runtime. - - // public static class UnicodeUnescapedSMPRangeSet extends BaseParserTestDescriptor { - // public String input = new StringBuilder() - // .append("a") - // .appendCodePoint(0x1D5C2) - // .appendCodePoint(0x1D5CE) - // .appendCodePoint(0x1D5BA) - // .append("d") - // .toString(); - // public String output = new StringBuilder() - // .append("a") - // .appendCodePoint(0x1D5C2) - // .appendCodePoint(0x1D5CE) - // .appendCodePoint(0x1D5BA) - // .append("d\n") - // .toString(); - // public String errors = null; - // public String startRule = "a"; - // public String grammarName = "T"; - - // /** - // grammar T; - // a : LETTERS* 'd' {} ; - // // These are actually not escaped -- Java passes the - // // raw unescaped Unicode values to the grammar compiler. - // LETTERS : ('a'|'\uD83D\uDE00'..'\uD83E\uDD43'); - // */ - // @CommentHasStringValue - // public String grammar; - - // } - - public static class UnicodeEscapedSMPRangeSet extends BaseParserTestDescriptor { - public String input = new StringBuilder() - .append("a") - .appendCodePoint(0x1F609) - .appendCodePoint(0x1F942) - .appendCodePoint(0x1F700) - .append("d") - .toString(); - public String output = new StringBuilder() - .append("a") - .appendCodePoint(0x1F609) - .appendCodePoint(0x1F942) - .appendCodePoint(0x1F700) - .append("d\n") - .toString(); - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS* 'd' {} ; - // Note the double-backslash to avoid Java passing - // unescaped values as part of the grammar. - LETTERS : ('a'|'\\u{1F600}'..'\\u{1F943}'); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeEscapedSMPRangeSetMismatch extends BaseParserTestDescriptor { - // Test the code points just before and just after the range. - public String input = new StringBuilder() - .append("a") - .appendCodePoint(0x1F5FF) - .appendCodePoint(0x1F944) - .append("d") - .toString(); - public String output = "ad\n"; - public String errors = new StringBuilder() - .append("line 1:1 token recognition error at: '") - .appendCodePoint(0x1F5FF) - .append("'\n") - .append("line 1:2 token recognition error at: '") - .appendCodePoint(0x1F944) - .append("'\n") - .toString(); - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS* 'd' {} ; - // Note the double-backslash to avoid Java passing - // unescaped values as part of the grammar. - LETTERS : ('a'|'\\u{1F600}'..'\\u{1F943}'); - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeNegatedBMPSetIncludesSMPCodePoints extends BaseParserTestDescriptor { - public String input = "a\uD83D\uDE33\uD83D\uDE21\uD83D\uDE1D\uD83E\uDD13c"; - public String output = "a\uD83D\uDE33\uD83D\uDE21\uD83D\uDE1D\uD83E\uDD13c\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS {} ; - LETTERS : 'a' ~('b')+ 'c'; - */ - @CommentHasStringValue - public String grammar; - - } - - public static class UnicodeNegatedSMPSetIncludesBMPCodePoints extends BaseParserTestDescriptor { - public String input = "abc"; - public String output = "abc\n"; - public String errors = null; - public String startRule = "a"; - public String grammarName = "T"; - - /** - grammar T; - a : LETTERS {} ; - LETTERS : 'a' ~('\\u{1F600}'..'\\u{1F943}')+ 'c'; - */ - @CommentHasStringValue - public String grammar; - - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/BaseGoTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/BaseGoTest.java deleted file mode 100644 index 48791461fa..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/BaseGoTest.java +++ /dev/null @@ -1,976 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.io.FileFilter; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertNotNull; -import static junit.framework.TestCase.assertTrue; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; - -public class BaseGoTest implements RuntimeTestSupport { - public File overall_tmpdir = null; - public File tmpdir = null; // this is where the parser package is stored, typically inside the tmpdir - private static File tmpGopath = null; - private static final String GO_RUNTIME_IMPORT_PATH = "github.com/antlr/antlr4/runtime/Go/antlr"; // TODO: Change this before merging with upstream - - /** - * If error during parser execution, store stderr here; can't return stdout - * and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - /** - * Copies all files from go runtime to a temporary folder that is inside a valid GOPATH project structure. - */ - public static void groupSetUp() throws Exception { - tmpGopath = new File(System.getProperty("java.io.tmpdir"), "antlr-goruntime-tmpgopath-" - + Long.toHexString(System.currentTimeMillis())); - - ArrayList pathsegments = new ArrayList(); - pathsegments.add("src"); - pathsegments.addAll(Arrays.asList(GO_RUNTIME_IMPORT_PATH.split("/"))); - - File tmpPackageDir = tmpGopath; - for (String pathsegment : pathsegments) { - tmpPackageDir = new File(tmpPackageDir, pathsegment); - } - if (!tmpPackageDir.mkdirs()) { - throw new Exception("Could not create temp go runtime package dirs!"); - } - - File[] runtimeFiles = locateRuntime().listFiles(); - if (runtimeFiles == null) { - throw new Exception("Go runtime file list is empty."); - } - for (File runtimeFile : runtimeFiles) { - File dest = new File(tmpPackageDir, runtimeFile.getName()); - copyFile(runtimeFile, dest); - } - - cacheGoRuntime(tmpPackageDir); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir.getPath(); - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - public static void groupTearDown() throws Exception { - eraseDirectory(tmpGopath); - } - - private static void cacheGoRuntime(File tmpPackageDir) throws Exception { - String goExecutable = locateGo(); - ProcessBuilder pb = new ProcessBuilder(goExecutable, "install", "-x"); - pb.directory(tmpPackageDir); - pb.environment().put("GOPATH", tmpGopath.getPath()); - pb.redirectErrorStream(true); - Process process = pb.start(); - StreamVacuum sucker = new StreamVacuum(process.getInputStream()); - sucker.start(); - int exit = process.waitFor(); - sucker.join(); - if (exit != 0) { - throw new Exception("Non-zero exit while caching go runtime, output: " + sucker.toString()); - } - } - - private static void copyFile(File source, File dest) throws IOException { - InputStream is = new FileInputStream(source); - OutputStream os = new FileOutputStream(dest); - byte[] buf = new byte[4 << 10]; - int l; - while ((l = is.read(buf)) > -1) { - os.write(buf, 0, l); - } - is.close(); - os.close(); - } - - public void testSetUp() throws Exception { - // new output dir for each test - String prop = System.getProperty("antlr-go-test-dir"); - if (prop != null && prop.length() > 0) { - overall_tmpdir = new File(prop); - } - else { - String threadName = Thread.currentThread().getName(); - overall_tmpdir = new File(System.getProperty("java.io.tmpdir"), - getClass().getSimpleName()+"-"+threadName+"-"+System.currentTimeMillis()); - } - - if ( overall_tmpdir.exists()) - this.eraseDirectory(overall_tmpdir); - - tmpdir = new File(overall_tmpdir, "parser"); - - if ( tmpdir.exists()) { - this.eraseDirectory(tmpdir); - } - antlrToolErrors = new StringBuilder(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - return new Tool(args); - } - - protected Tool newTool() { - return new Tool(new String[]{"-o", tmpdir.getPath()}); - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if (g.atn == null) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if (g.isLexer()) { - f = new LexerATNFactory((LexerGrammar) g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if (g.ast != null && !g.ast.hasErrors) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if (g.getImportedGrammars() != null) { // process imported grammars - // (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if (expecting != null && !expecting.trim().isEmpty()) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, - LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while (ttype != Token.EOF); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, ATN atn, CharStream input) { - LexerATNSimulator interp = new LexerATNSimulator(atn, - new DFA[] { new DFA( - atn.modeToStartState.get(Lexer.DEFAULT_MODE)) }, null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if (hitEOF) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if (ttype == Token.EOF) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if (t == IntStream.EOF) { - hitEOF = true; - } - } while (ttype != Token.EOF); - return tokenTypes; - } - - protected String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input, boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, null, lexerName, "-no-listener"); - assertTrue(success); - writeFile(overall_tmpdir.toString(), "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.go"); - return output; - } -// -// public String execParser(String grammarFileName, String grammarStr, -// String parserName, String lexerName, String listenerName, -// String visitorName, String startRuleName, String input, -// boolean debug) -// { -// boolean success = rawGenerateAndBuildRecognizer(grammarFileName, -// grammarStr, parserName, lexerName, "-visitor"); -// assertTrue(success); -// writeFile(overall_tmpdir, "input", input); -// rawBuildRecognizerTestFile(parserName, lexerName, listenerName, -// visitorName, startRuleName, debug); -// return execRecognizer(); -// } - - @Override - public String execParser(String grammarFileName, String grammarStr, - String parserName, String lexerName, String listenerName, - String visitorName, String startRuleName, String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, parserName, lexerName, "-visitor"); - assertTrue(success); - writeFile(overall_tmpdir.toString(), "input", input); - rawBuildRecognizerTestFile(parserName, lexerName, listenerName, - visitorName, startRuleName, showDiagnosticErrors); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - String... extraOptions) { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, - parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - boolean defaultListener, String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTmpDir(), "Go", grammarFileName, grammarStr, - defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - return true; - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - this.stderrDuringParse = null; - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, lexerName, listenerName, - visitorName, parserStartRuleName, debug); - } - } - - public String execRecognizer() { - return execModule("Test.go"); - } - - public String execModule(String fileName) { - String goExecutable = locateGo(); - String modulePath = new File(overall_tmpdir, fileName).getAbsolutePath(); - String inputPath = new File(overall_tmpdir, "input").getAbsolutePath(); - try { - ProcessBuilder builder = new ProcessBuilder(goExecutable, "run", modulePath, inputPath); - builder.environment().put("GOPATH", tmpGopath.getPath()); - builder.directory(overall_tmpdir); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if (stderrVacuum.toString().length() > 0) { - this.stderrDuringParse = stderrVacuum.toString(); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private static String locateTool(String tool) { - ArrayList paths = new ArrayList(); // default cap is about right - - // GOROOT should have priority if set - String goroot = System.getenv("GOROOT"); - if (goroot != null) { - paths.add(goroot + File.separatorChar + "bin"); - } - - String pathEnv = System.getenv("PATH"); - if (pathEnv != null) { - paths.addAll(Arrays.asList(pathEnv.split(File.pathSeparator))); - } - - // OS specific default locations of binary dist as last resort - paths.add("/usr/local/go/bin"); - paths.add("c:\\Go\\bin"); - - for (String path : paths) { - File candidate = new File(new File(path), tool); - if (candidate.exists()) { - return candidate.getPath(); - } - candidate = new File(new File(path), tool+".exe"); - if (candidate.exists()) { - return candidate.getPath(); - } - } - return null; - } - - private static String locateGo() { - String propName = "antlr-go"; - String prop = System.getProperty(propName); - if (prop == null || prop.length() == 0) { - prop = locateTool("go"); - } - if (prop == null) { - throw new RuntimeException("Missing system property:" + propName); - } - return prop; - } - - private static File locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource("Go"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find Go ANTLR runtime"); - } - File runtimeDir = new File(runtimeSrc.getPath(), "antlr"); - if (!runtimeDir.exists()) { - throw new RuntimeException("Cannot find Go ANTLR runtime"); - } - return runtimeDir; - } - - // void ambig(List msgs, int[] expectedAmbigAlts, String - // expectedAmbigInput) - // throws Exception - // { - // ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); - // } - - // void ambig(List msgs, int i, int[] expectedAmbigAlts, String - // expectedAmbigInput) - // throws Exception - // { - // List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); - // AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); - // if ( a==null ) assertNull(expectedAmbigAlts); - // else { - // assertEquals(a.conflictingAlts.toString(), - // Arrays.toString(expectedAmbigAlts)); - // } - // assertEquals(expectedAmbigInput, a.input); - // } - - // void unreachable(List msgs, int[] expectedUnreachableAlts) - // throws Exception - // { - // unreachable(msgs, 0, expectedUnreachableAlts); - // } - - // void unreachable(List msgs, int i, int[] - // expectedUnreachableAlts) - // throws Exception - // { - // List amsgs = getMessagesOfType(msgs, - // UnreachableAltsMessage.class); - // UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); - // if ( u==null ) assertNull(expectedUnreachableAlts); - // else { - // assertEquals(u.conflictingAlts.toString(), - // Arrays.toString(expectedUnreachableAlts)); - // } - // } - - List getMessagesOfType(List msgs, - Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if (m.getClass() == c) - filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out - .println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - // System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, - String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if (g.ast != null && !g.ast.hasErrors) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if (g.isLexer()) - factory = new LexerATNFactory((LexerGrammar) g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - // System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start + b.length(), end); - assertEquals(expected, snippet); - } - if (equeue.size() > 0) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) throws Exception { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertNotNull("no error; " + expectedMessage.getErrorType() - + " expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), - Arrays.toString(foundMsg.getArgs())); - if (equeue.size() != 1) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) throws Exception { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertNotNull("no error; " + expectedMessage.getErrorType() - + " expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), - Arrays.toString(foundMsg.getArgs())); - if (equeue.size() != 1) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, ANTLRMessage expectedMessage) - throws Exception { - // System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertTrue("no error; " + expectedMessage.getErrorType() + " expected", - !equeue.errors.isEmpty()); - assertTrue("too many errors; " + equeue.errors, - equeue.errors.size() <= 1); - assertNotNull( - "couldn't find expected error: " - + expectedMessage.getErrorType(), foundMsg); - /* - * assertTrue("error is not a GrammarSemanticsMessage", foundMsg - * instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { - super(src); - } - - Set hide = new HashSet(); - - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if (hide.contains(t.getType())) { - ((WritableToken) t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(File dir) { - dir.mkdirs(); - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - ST outputFileST = new ST( - "package main\n" + - "import (\n" - +" \"github.com/antlr/antlr4/runtime/Go/antlr\"\n" - +" \"./parser\"\n" - +" \"fmt\"\n" - +" \"os\"\n" - +")\n" - + "\n" - + "type TreeShapeListener struct {\n" - + " *parser.Base\n" - + "}\n" - + "\n" - + "func NewTreeShapeListener() *TreeShapeListener {\n" - + " return new(TreeShapeListener)\n" - + "}\n" - + "\n" - + "func (this *TreeShapeListener) EnterEveryRule(ctx antlr.ParserRuleContext) {\n" - + " for i := 0; i\\(input)\n" - + " stream := antlr.NewCommonTokenStream(lexer,0)\n" - + "" - + " p.BuildParseTrees = true\n" - + " tree := p.()\n" - + " antlr.ParseTreeWalkerDefault.Walk(NewTreeShapeListener(), tree)\n" - + "}\n"); - - ST createParserST = new ST( - " p := parser.New(stream)\n"); - if (debug) { - createParserST = new ST( - " p := parser.New(stream)\n" - + " p.AddErrorListener(antlr.NewDiagnosticErrorListener(true))\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName.substring(0, 1).toUpperCase() + parserStartRuleName.substring(1) ); - writeFile(overall_tmpdir.toString(), "Test.go", outputFileST.render()); - } - - - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "package main\n" + - "import (\n" - + " \"github.com/antlr/antlr4/runtime/Go/antlr\"\n" - + " \"./parser\"\n" - + " \"os\"\n" - + " \"fmt\"\n" - + ")\n" - + "\n" - + "func main() {\n" - + " input, err := antlr.NewFileStream(os.Args[1])\n" - + " if err != nil {\n" - + " fmt.Printf(\"Failed to find file: %v\", err)\n" - + " return\n" - + " }\n" - + " lexer := parser.New(input)\n" - + " stream := antlr.NewCommonTokenStream(lexer,0)\n" - + " stream.Fill()\n" - + " for _, t := range stream.GetAllTokens() {\n" - + " fmt.Println(t)\n" - + " }\n" - + (showDFA ? "fmt.Print(lexer.GetInterpreter().DecisionToDFA()[antlr.LexerDefaultMode].ToLexerString())\n" - : "") - + "}\n" - + "\n"); - outputFileST.add("lexerName", lexerName); - writeFile(overall_tmpdir.toString(), "Test.go", outputFileST.render()); - } - - public void writeRecognizer(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - if (parserName == null) { - writeLexerTestFile(lexerName, debug); - } - else { - writeParserTestFile(parserName, lexerName, listenerName, - visitorName, parserStartRuleName, debug); - } - } - - protected void eraseFilesEndingWith(final String filesEndingWith) { - File[] files = overall_tmpdir.listFiles(new FileFilter() { - @Override - public boolean accept(File pathname) { - return pathname.getName().endsWith(filesEndingWith); - } - }); - for (File file : files) { - file.delete(); - } - } - - protected static void eraseDirectory(File dir) { - File[] files = dir.listFiles(); - if (files != null) { - for (File file : files) { - if (file.isDirectory()) { - eraseDirectory(file); - } - else { - file.delete(); - } - } - } - dir.delete(); - } - - public void eraseTempDir() { - boolean doErase = true; - String propName = "antlr-go-erase-test-dir"; - String prop = System.getProperty(propName); - if (prop != null && prop.length() > 0) - doErase = Boolean.getBoolean(prop); - if (doErase) { - if ( overall_tmpdir.exists()) { - eraseDirectory(overall_tmpdir); - } - } - } - - public String getFirstLineOfException() { - if (this.stderrDuringParse == null) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix = "Exception in thread \"main\" "; - return lines[0].substring(prefix.length(), lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable we cannot - * rely on the output order, as the hashing algorithm or other aspects of - * the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the - * Map, which is a bit of a hack, but guarantees that we get the same order - * on all systems. We assume that the keys are strings. - * - * @param m - * The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p = 0; - - public IntTokenStream(IntegerList types) { - this.types = types; - } - - @Override - public void consume() { - p++; - } - - @Override - public int LA(int i) { - return LT(i).getType(); - } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { - return p; - } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if (rawIndex >= types.size()) - t = new CommonToken(Token.EOF); - else - t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public , V> LinkedHashMap sort( - Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRunner.java new file mode 100644 index 0000000000..69943126e9 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRunner.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.go; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class GoRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Go"; + } + + @Override + public String getLexerSuffix() { + return "_lexer"; + } + + @Override + public String getParserSuffix() { + return "_parser"; + } + + @Override + public String getBaseListenerSuffix() { + return "_base_listener"; + } + + @Override + public String getListenerSuffix() { + return "_listener"; + } + + @Override + public String getBaseVisitorSuffix() { + return "_base_visitor"; + } + + @Override + public String getVisitorSuffix() { + return "_visitor"; + } + + @Override + protected String grammarNameToFileName(String grammarName) { + return grammarName.toLowerCase(); + } + + @Override + public String[] getExtraRunArgs() { + return new String[]{"run"}; + } + + private static final String GoRuntimeImportPath = "github.com/antlr4-go/antlr/v4"; + + private final static Map environment; + + private static String cachedGoMod; + private static String cachedGoSum; + private static ArrayList options = new ArrayList<>(); + + static { + environment = new HashMap<>(); + environment.put("GOWORK", "off"); + } + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + String cachePath = getCachePath(); + mkdir(cachePath); + Path runtimeFilesPath = Paths.get(getRuntimePath("Go"), "antlr", "v4"); + String runtimeToolPath = getRuntimeToolPath(); + File goModFile = new File(cachePath, "go.mod"); + if (goModFile.exists()) + if (!goModFile.delete()) + throw new IOException("Can't delete " + goModFile); + Processor.run(new String[]{runtimeToolPath, "mod", "init", "test"}, cachePath, environment); + Processor.run(new String[]{runtimeToolPath, "mod", "edit", + "-replace=" + GoRuntimeImportPath + "=" + runtimeFilesPath}, cachePath, environment); + Processor.run(new String[]{runtimeToolPath, "mod", "edit", + "-require=" + GoRuntimeImportPath + "@v4.0.0"}, cachePath, environment); + cachedGoMod = readFile(cachePath + FileSeparator, "go.mod"); + } + + @Override + protected String grammarParseRuleToRecognizerName(String startRuleName) { + if (startRuleName == null || startRuleName.length() == 0) { + return null; + } + + // The rule name start is now translated to Start_ at runtime to avoid clashes with labels. + // Some tests use start as the first rule name, and we must cater for that + // + String rn = startRuleName.substring(0, 1).toUpperCase() + startRuleName.substring(1); + switch (rn) { + case "Start": + case "End": + case "Exception": + rn += "_"; + default: + } + return rn; + } + + @Override + protected List getTargetToolOptions(RunOptions ro) { + // Unfortunately this cannot be cached because all the synchronization is out of whack, and + // we end up return the options before they are populated. I prefer to make this small change + // at the expense of an object rather than try to change teh synchronized initialization, which is + // very fragile. + // Also, the options may need to change in the future according to the test options. This is safe + ArrayList options = new ArrayList<>(); + options.add("-o"); + options.add(tempTestDir.resolve("parser").toString()); + return options; + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + // We have already created a suitable go.mod file, though it may need to have go mod tidy run on it one time + // + writeFile(getTempDirPath(), "go.mod", cachedGoMod); + + // We need to run a go mod tidy once, now that we have source code. This will generate a valid go.sum file and + // recognize the indirect requirements in the go.mod file. Then we re-cache the go.mod and cache + // the go.sum and therefore save sparking a new process for all the remaining go tests. This is probably + // a race condition as these tests are run in parallel, but it does not matter as they are all going to + // generate the same go.mod and go.sum file anyway. + // + Exception ex = null; + if (cachedGoSum == null) { + try { + Processor.run(new String[]{getRuntimeToolPath(), "mod", "tidy"}, getTempDirPath(), environment); + } catch (InterruptedException | IOException e) { + ex = e; + } + cachedGoMod = readFile(getTempDirPath() + FileSeparator, "go.mod"); + cachedGoSum = readFile(getTempDirPath() + FileSeparator, "go.sum"); + } + + // We can now write the go.sum file, which will allow the go compiler to build the module + // + writeFile(getTempDirPath(), "go.sum", cachedGoSum); + + return new CompiledState(generatedState, ex); + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRuntimeTests.java new file mode 100644 index 0000000000..d09dffbc21 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.go; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class GoRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new GoRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeLexers.java deleted file mode 100644 index cd1bcc2742..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeLexers.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeParsers.java deleted file mode 100644 index ce29ab1a00..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeParsers.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestFullContextParsing.java deleted file mode 100644 index e90645b39c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestFullContextParsing.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLeftRecursion.java deleted file mode 100644 index ab492c4aef..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLeftRecursion.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerErrors.java deleted file mode 100644 index c4c967e80a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerErrors.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerExec.java deleted file mode 100644 index da01840ddb..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerExec.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestListeners.java deleted file mode 100644 index 012c0e2ac7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestListeners.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParseTrees.java deleted file mode 100644 index 9b93978227..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParseTrees.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserErrors.java deleted file mode 100644 index 4b339cccc5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserErrors.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserExec.java deleted file mode 100644 index 5a453fcc2d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserExec.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestPerformance.java deleted file mode 100644 index d73b49cad3..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestPerformance.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalLexer.java deleted file mode 100644 index 1b632cd334..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalLexer.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalParser.java deleted file mode 100644 index 4e487e6589..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalParser.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSets.java deleted file mode 100644 index 860cbf081d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSets.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/BaseJavaTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/BaseJavaTest.java deleted file mode 100644 index c94e8e7a6a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/BaseJavaTest.java +++ /dev/null @@ -1,1199 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.Tool; -import org.antlr.v4.analysis.AnalysisPipeline; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.DecisionState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import javax.tools.JavaCompiler; -import javax.tools.JavaFileObject; -import javax.tools.StandardJavaFileManager; -import javax.tools.ToolProvider; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.PipedInputStream; -import java.io.PipedOutputStream; -import java.io.PrintStream; -import java.io.StringReader; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URL; -import java.net.URLClassLoader; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertNotNull; -import static junit.framework.TestCase.assertTrue; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; - -public class BaseJavaTest implements RuntimeTestSupport { - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - /** - * When the {@code antlr.testinprocess} runtime property is set to - * {@code true}, the test suite will attempt to load generated classes into - * the test process for direct execution rather than invoking the JVM in a - * new process for testing. - *

    - *

    - * In-process testing results in a substantial performance improvement, but - * some test environments created by IDEs do not support the mechanisms - * currently used by the tests to dynamically load compiled code. Therefore, - * the default behavior (used in all other cases) favors reliable - * cross-system test execution by executing generated test code in a - * separate process.

    - */ - public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess")); - - /** - * When the {@code antlr.preserve-test-dir} runtime property is set to - * {@code true}, the temporary directories created by the test run will not - * be removed at the end of the test run, even for tests that completed - * successfully. - *

    - *

    - * The default behavior (used in all other cases) is removing the temporary - * directories for all tests which completed successfully, and preserving - * the directories for tests which failed.

    - */ - public static final boolean PRESERVE_TEST_DIR = true; //Boolean.parseBoolean(System.getProperty("antlr.preserve-test-dir")); - - /** - * The base test directory is the directory where generated files get placed - * during unit test execution. - *

    - *

    - * The default value for this property is the {@code java.io.tmpdir} system - * property, and can be overridden by setting the - * {@code antlr.java-test-dir} property to a custom location. Note that the - * {@code antlr.java-test-dir} property directly affects the - * {@link #CREATE_PER_TEST_DIRECTORIES} value as well.

    - */ - public static final String BASE_TEST_DIR; - - /** - * When {@code true}, a temporary directory will be created for each test - * executed during the test run. - *

    - *

    - * This value is {@code true} when the {@code antlr.java-test-dir} system - * property is set, and otherwise {@code false}.

    - */ - public static final boolean CREATE_PER_TEST_DIRECTORIES; - - static { - String baseTestDir = System.getProperty("antlr.java-test-dir"); - boolean perTestDirectories = false; - if ( baseTestDir==null || baseTestDir.isEmpty() ) { - baseTestDir = System.getProperty("java.io.tmpdir"); - perTestDirectories = true; - } - - if ( !new File(baseTestDir).isDirectory() ) { - throw new UnsupportedOperationException("The specified base test directory does not exist: "+baseTestDir); - } - - BASE_TEST_DIR = baseTestDir; - CREATE_PER_TEST_DIRECTORIES = perTestDirectories; - } - - /** - * Build up the full classpath we need, including the surefire path (if present) - */ - public static final String CLASSPATH = System.getProperty("java.class.path"); - - public String tmpdir = null; - - /** - * If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** - * Errors found while running antlr - */ - protected StringBuilder antlrToolErrors; - - @Override - public void testSetUp() throws Exception { -// STGroup.verbose = true; -// System.err.println("testSetUp "+Thread.currentThread().getName()); - if ( CREATE_PER_TEST_DIRECTORIES ) { - // new output dir for each test - String threadName = Thread.currentThread().getName(); - String testDirectory = getClass().getSimpleName()+"-"+threadName+"-"+System.nanoTime(); - tmpdir = new File(BASE_TEST_DIR, testDirectory).getAbsolutePath(); - } - else { - tmpdir = new File(BASE_TEST_DIR).getAbsolutePath(); - if ( !PRESERVE_TEST_DIR && new File(tmpdir).exists() ) { - eraseFiles(); - } - } - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if ( g.isLexer() ) { - f = new LexerATNFactory((LexerGrammar) g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if ( useSerializer ) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { -// System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - public DFA createDFA(Grammar g, DecisionState s) { -// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); -// DFA dfa = conv.createDFA(); -// conv.issueAmbiguityWarnings(); -// System.out.print("DFA="+dfa); -// return dfa; - return null; - } - -// public void minimizeDFA(DFA dfa) { -// DFAMinimizer dmin = new DFAMinimizer(dfa); -// dfa.minimized = dmin.minimize(); -// } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if ( expecting!=null && !expecting.trim().isEmpty() ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) { - LexerATNSimulator interp = new LexerATNSimulator(atn, new DFA[]{new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE))}, null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype==Token.EOF ) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if ( t==IntStream.EOF ) { - hitEOF = true; - } - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - List checkRuleDFA(String gtext, String ruleName, String expecting) - throws Exception { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; - if ( s==null ) { - System.err.println("no such rule: "+ruleName); - return null; - } - ATNState t = s.transition(0).target; - if ( !(t instanceof DecisionState) ) { - System.out.println(ruleName+" has no decision"); - return null; - } - DecisionState blk = (DecisionState) t; - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - List checkRuleDFA(String gtext, int decision, String expecting) - throws Exception { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - DecisionState blk = atn.decisionToState.get(decision); - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - void checkRuleDFA(Grammar g, DecisionState blk, String expecting) - throws Exception { - DFA dfa = createDFA(g, blk); - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - } - - List checkLexerDFA(String gtext, String expecting) - throws Exception { - return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); - } - - List checkLexerDFA(String gtext, String modeName, String expecting) - throws Exception { - ErrorQueue equeue = new ErrorQueue(); - LexerGrammar g = new LexerGrammar(gtext, equeue); - g.atn = createATN(g, false); -// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); -// DFA dfa = conv.createDFA(modeName); -// g.setLookaheadDFA(0, dfa); // only one decision to worry about -// -// String result = null; -// if ( dfa!=null ) result = dfa.toString(); -// assertEquals(expecting, result); -// -// return equeue.all; - return null; - } - - protected String load(String fileName, String encoding) - throws IOException { - if ( fileName==null ) { - return null; - } - - String fullFileName = getClass().getPackage().getName().replace('.', '/')+'/'+fileName; - int size = 65000; - InputStreamReader isr; - InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); - if ( encoding!=null ) { - isr = new InputStreamReader(fis, encoding); - } - else { - isr = new InputStreamReader(fis); - } - try { - char[] data = new char[size]; - int n = isr.read(data); - return new String(data, 0, n); - } finally { - isr.close(); - } - } - - /** - * Wow! much faster than compiling outside of VM. Finicky though. - * Had rules called r and modulo. Wouldn't compile til I changed to 'a'. - */ - protected boolean compile(String... fileNames) { - List files = new ArrayList(); - for (String fileName : fileNames) { - File f = new File(tmpdir, fileName); - files.add(f); - } - - JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); -// DiagnosticCollector diagnostics = -// new DiagnosticCollector(); - - StandardJavaFileManager fileManager = - compiler.getStandardFileManager(null, null, null); - - Iterable compilationUnits = - fileManager.getJavaFileObjectsFromFiles(files); - - Iterable compileOptions = - Arrays.asList("-g", "-source", "1.6", "-target", "1.6", "-implicit:class", "-Xlint:-options", "-d", tmpdir, "-cp", tmpdir+pathSep+CLASSPATH); - - JavaCompiler.CompilationTask task = - compiler.getTask(null, fileManager, null, compileOptions, null, - compilationUnits); - boolean ok = task.call(); - - try { - fileManager.close(); - } catch (IOException ioe) { - ioe.printStackTrace(System.err); - } - - return ok; - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - compile("Test.java"); - String output = execClass("Test"); - return output; - } - - public ParseTree execParser(String startRuleName, String input, - String parserName, String lexerName) - throws Exception - { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - return execStartRule(startRuleName, parser); - } - - public ParseTree execStartRule(String startRuleName, Parser parser) - throws IllegalAccessException, InvocationTargetException, - NoSuchMethodException { - Method startRule = null; - Object[] args = null; - try { - startRule = parser.getClass().getMethod(startRuleName); - } catch (NoSuchMethodException nsme) { - // try with int _p arg for recursive func - startRule = parser.getClass().getMethod(startRuleName, int.class); - args = new Integer[]{0}; - } - ParseTree result = (ParseTree) startRule.invoke(parser, args); -// System.out.println("parse tree = "+result.toStringTree(parser)); - return result; - } - - public Pair getParserAndLexer(String input, - String parserName, String lexerName) - throws Exception { - final Class lexerClass = loadLexerClassFromTempDir(lexerName); - final Class parserClass = loadParserClassFromTempDir(parserName); - - ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); - - Class c = lexerClass.asSubclass(Lexer.class); - Constructor ctor = c.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance(in); - - Class pc = parserClass.asSubclass(Parser.class); - Constructor pctor = pc.getConstructor(TokenStream.class); - CommonTokenStream tokens = new CommonTokenStream(lexer); - Parser parser = pctor.newInstance(tokens); - return new Pair(parser, lexer); - } - - public Class loadClassFromTempDir(String name) throws Exception { - ClassLoader loader = - new URLClassLoader(new URL[]{new File(tmpdir).toURI().toURL()}, - ClassLoader.getSystemClassLoader()); - return loader.loadClass(name); - } - - public Class loadLexerClassFromTempDir(String name) throws Exception { - return loadClassFromTempDir(name).asSubclass(Lexer.class); - } - - public Class loadParserClassFromTempDir(String name) throws Exception { - return loadClassFromTempDir(name).asSubclass(Parser.class); - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - return execParser(grammarFileName, grammarStr, parserName, lexerName, - listenerName, visitorName, startRuleName, input, showDiagnosticErrors, false); - } - - /** ANTLR isn't thread-safe to process grammars so we use a global lock for testing */ - public static final Object antlrLock = new Object(); - - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors, - boolean profile) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - return rawExecRecognizer(parserName, - lexerName, - startRuleName, - showDiagnosticErrors, - profile); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - BaseRuntimeTest.antlrOnString(getTmpDir(), "Java", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".java"); - } - if ( parserName!=null ) { - files.add(parserName+".java"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName+"Listener.java"); - files.add(grammarName+"BaseListener.java"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName+"Visitor.java"); - files.add(grammarName+"BaseVisitor.java"); - } - } - boolean allIsWell = compile(files.toArray(new String[files.size()])); - return allIsWell; - } - - protected String rawExecRecognizer(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - compile("Test.java"); - return execClass("Test"); - } - - public String execRecognizer() { - return execClass("Test"); - } - - public String execClass(String className) { - if (TEST_IN_SAME_PROCESS) { - try { - ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader()); - final Class mainClass = (Class)loader.loadClass(className); - final Method mainMethod = mainClass.getDeclaredMethod("main", String[].class); - PipedInputStream stdoutIn = new PipedInputStream(); - PipedInputStream stderrIn = new PipedInputStream(); - PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn); - PipedOutputStream stderrOut = new PipedOutputStream(stderrIn); - StreamVacuum stdoutVacuum = new StreamVacuum(stdoutIn); - StreamVacuum stderrVacuum = new StreamVacuum(stderrIn); - - PrintStream originalOut = System.out; - System.setOut(new PrintStream(stdoutOut)); - try { - PrintStream originalErr = System.err; - try { - System.setErr(new PrintStream(stderrOut)); - stdoutVacuum.start(); - stderrVacuum.start(); - mainMethod.invoke(null, (Object)new String[] { new File(tmpdir, "input").getAbsolutePath() }); - } - finally { - System.setErr(originalErr); - } - } - finally { - System.setOut(originalOut); - } - - stdoutOut.close(); - stderrOut.close(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - } - return output; - } - catch (Exception ex) { - throw new RuntimeException(ex); - } - } - - try { - String[] args = new String[] { - "java", "-classpath", tmpdir+pathSep+CLASSPATH, - "-Dfile.encoding=UTF-8", - className, new File(tmpdir, "input").getAbsolutePath() - }; -// String cmdLine = Utils.join(args, " "); -// System.err.println("execParser: "+cmdLine); - Process process = - Runtime.getRuntime().exec(args, null, new File(tmpdir)); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - -// void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) -// throws Exception -// { -// ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); -// } - -// void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) -// throws Exception -// { -// List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); -// AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); -// if ( a==null ) assertNull(expectedAmbigAlts); -// else { -// assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); -// } -// assertEquals(expectedAmbigInput, a.input); -// } - -// void unreachable(List msgs, int[] expectedUnreachableAlts) -// throws Exception -// { -// unreachable(msgs, 0, expectedUnreachableAlts); -// } - -// void unreachable(List msgs, int i, int[] expectedUnreachableAlts) -// throws Exception -// { -// List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); -// UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); -// if ( u==null ) assertNull(expectedUnreachableAlts); -// else { -// assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); -// } -// } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - public void checkRuleATN(Grammar g, String ruleName, String expecting) { -// DOTGenerator dot = new DOTGenerator(g); -// System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = g.getATN().ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - AnalysisPipeline anal = new AnalysisPipeline(g); - anal.process(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(false); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { -// System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, - ANTLRMessage expectedMessage) - throws Exception - { - //System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); - assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); - assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); - /* - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void writeTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - ST outputFileST = new ST( - "import org.antlr.v4.runtime.*;\n" + - "import org.antlr.v4.runtime.tree.*;\n" + - "import org.antlr.v4.runtime.atn.*;\n" + - "import java.nio.file.Paths;\n"+ - "import java.util.Arrays;\n"+ - "\n" + - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = CharStreams.fromPath(Paths.get(args[0]));\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " \n"+ - " parser.setBuildParseTree(true);\n" + - " \n"+ - " ParserRuleContext tree = parser.();\n" + - " System.out.println(Arrays.toString(profiler.getDecisionInfo()));\n" + - " ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" + - " }\n" + - "\n" + - " static class TreeShapeListener implements ParseTreeListener {\n" + - " @Override public void visitTerminal(TerminalNode node) { }\n" + - " @Override public void visitErrorNode(ErrorNode node) { }\n" + - " @Override public void exitEveryRule(ParserRuleContext ctx) { }\n" + - "\n" + - " @Override\n" + - " public void enterEveryRule(ParserRuleContext ctx) {\n" + - " for (int i = 0; i \\< ctx.getChildCount(); i++) {\n" + - " ParseTree parent = ctx.getChild(i).getParent();\n" + - " if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) {\n" + - " throw new IllegalStateException(\"Invalid parse tree shape detected.\");\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - ST createParserST = new ST(" parser = new (tokens);\n"); - if ( debug ) { - createParserST = - new ST( - " parser = new (tokens);\n" + - " parser.addErrorListener(new DiagnosticErrorListener());\n"); - } - if ( profile ) { - outputFileST.add("profile", - "ProfilingATNSimulator profiler = new ProfilingATNSimulator(parser);\n" + - "parser.setInterpreter(profiler);"); - } - else { - outputFileST.add("profile", new ArrayList()); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.java", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import java.nio.file.Paths;\n" + - "import org.antlr.v4.runtime.*;\n" + - "\n" + - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = CharStreams.fromPath(Paths.get(args[0]));\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " tokens.fill();\n" + - " for (Object t : tokens.getTokens()) System.out.println(t);\n" + - (showDFA?"System.out.print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString());\n":"")+ - " }\n" + - "}" - ); - - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.java", outputFileST.render()); - } - - public void writeRecognizerAndCompile(String parserName, String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) { - if ( parserName==null ) { - writeLexerTestFile(lexerName, debug); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - compile("Test.java"); - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles() { - if (tmpdir == null) { - return; - } - - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - - public void eraseTempDir() { - File tmpdirF = new File(tmpdir); - if ( tmpdirF.exists() ) { - eraseFiles(); - tmpdirF.delete(); - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - public IntegerList types; - int p=0; - public IntTokenStream(IntegerList types) { this.types = types; } - - @Override - public void consume() { p++; } - - @Override - public int LA(int i) { return LT(i).getType(); } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { return p; } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return UNKNOWN_SOURCE_NAME; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); - else t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRunner.java new file mode 100644 index 0000000000..fd8dbce398 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRunner.java @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.java; + +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.ProfilingATNSimulator; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.ParseTreeWalker; +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.java.helpers.CustomStreamErrorListener; +import org.antlr.v4.test.runtime.java.helpers.RuntimeTestLexer; +import org.antlr.v4.test.runtime.java.helpers.RuntimeTestParser; +import org.antlr.v4.test.runtime.java.helpers.TreeShapeListener; +import org.antlr.v4.test.runtime.states.*; + +import javax.tools.JavaCompiler; +import javax.tools.JavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; +import java.io.*; +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.antlr.v4.test.runtime.FileUtils.replaceInFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.PathSeparator; + +public class JavaRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Java"; + } + + public static final String classPath = System.getProperty("java.class.path"); + + public static final String runtimeTestLexerName = "org.antlr.v4.test.runtime.java.helpers.RuntimeTestLexer"; + public static final String runtimeTestParserName = "org.antlr.v4.test.runtime.java.helpers.RuntimeTestParser"; + + public static final String runtimeHelpersPath = Paths.get(RuntimeTestUtils.runtimeTestsuitePath.toString(), + "test", "org", "antlr", "v4", "test", "runtime", "java", "helpers").toString(); + + private static JavaCompiler compiler; + + private final static DiagnosticErrorListener DiagnosticErrorListenerInstance = new DiagnosticErrorListener(); + + public JavaRunner(Path tempDir, boolean saveTestDir) { + super(tempDir, saveTestDir); + } + + public JavaRunner() { + super(); + } + + @Override + protected void initRuntime(RunOptions runOptions) { + compiler = ToolProvider.getSystemJavaCompiler(); + } + + @Override + protected String getCompilerName() { + return "javac"; + } + + @Override + protected void writeInputFile(RunOptions runOptions) {} + + @Override + protected void writeRecognizerFile(RunOptions runOptions) {} + + @Override + protected JavaCompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + String tempTestDir = getTempDirPath(); + + List generatedFiles = generatedState.generatedFiles; + GeneratedFile firstFile = generatedFiles.get(0); + + if (!firstFile.isParser) { + try { + // superClass for combined grammar generates the same extends base class for Lexer and Parser + // So, for lexer it should be replaced on correct base lexer class + replaceInFile(Paths.get(getTempDirPath(), firstFile.name), + "extends " + runtimeTestParserName + " {", + "extends " + runtimeTestLexerName + " {"); + } catch (IOException e) { + return new JavaCompiledState(generatedState, null, null, null, e); + } + } + + ClassLoader loader = null; + Class lexer = null; + Class parser = null; + Exception exception = null; + + try { + StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null); + + ClassLoader systemClassLoader = ClassLoader.getSystemClassLoader(); + + List files = new ArrayList<>(); + if (runOptions.lexerName != null) { + files.add(new File(tempTestDir, runOptions.lexerName + ".java")); + } + if (runOptions.parserName != null) { + files.add(new File(tempTestDir, runOptions.parserName + ".java")); + } + + Iterable compilationUnits = fileManager.getJavaFileObjectsFromFiles(files); + + Iterable compileOptions = + Arrays.asList("-g", "-source", "1.8", "-target", "1.8", "-implicit:class", "-Xlint:-options", "-d", + tempTestDir, "-cp", tempTestDir + PathSeparator + runtimeHelpersPath + PathSeparator + classPath); + + JavaCompiler.CompilationTask task = + compiler.getTask(null, fileManager, null, compileOptions, null, + compilationUnits); + task.call(); + + loader = new URLClassLoader(new URL[]{new File(tempTestDir).toURI().toURL()}, systemClassLoader); + if (runOptions.lexerName != null) { + lexer = loader.loadClass(runOptions.lexerName).asSubclass(Lexer.class); + } + if (runOptions.parserName != null) { + parser = loader.loadClass(runOptions.parserName).asSubclass(Parser.class); + } + } catch (Exception ex) { + exception = ex; + } + + return new JavaCompiledState(generatedState, loader, lexer, parser, exception); + } + + @Override + protected ExecutedState execute(RunOptions runOptions, CompiledState compiledState) { + JavaCompiledState javaCompiledState = (JavaCompiledState) compiledState; + String output = null; + String errors = null; + ParseTree parseTree = null; + Exception exception = null; + + try { + InMemoryStreamHelper outputStreamHelper = InMemoryStreamHelper.initialize(); + InMemoryStreamHelper errorsStreamHelper = InMemoryStreamHelper.initialize(); + + PrintStream outStream = new PrintStream(outputStreamHelper.pipedOutputStream); + CustomStreamErrorListener errorListener = new CustomStreamErrorListener(new PrintStream(errorsStreamHelper.pipedOutputStream)); + + CommonTokenStream tokenStream; + RuntimeTestLexer lexer; + if (runOptions.lexerName != null) { + lexer = (RuntimeTestLexer) javaCompiledState.initializeLexer(runOptions.input); + lexer.setOutStream(outStream); + lexer.removeErrorListeners(); + lexer.addErrorListener(errorListener); + tokenStream = new CommonTokenStream(lexer); + } else { + lexer = null; + tokenStream = null; + } + + if (runOptions.parserName != null) { + RuntimeTestParser parser = (RuntimeTestParser) javaCompiledState.initializeParser(tokenStream); + parser.setOutStream(outStream); + parser.removeErrorListeners(); + parser.addErrorListener(errorListener); + + if (runOptions.showDiagnosticErrors) { + parser.addErrorListener(DiagnosticErrorListenerInstance); + } + + if (runOptions.traceATN) { + // Setting trace_atn_sim isn't thread-safe, + // But it's used only in helper TraceATN that is not integrated into tests infrastructure + ParserATNSimulator.trace_atn_sim = true; + } + + ProfilingATNSimulator profiler = null; + if (runOptions.profile) { + profiler = new ProfilingATNSimulator(parser); + parser.setInterpreter(profiler); + } + parser.getInterpreter().setPredictionMode(runOptions.predictionMode); + parser.setBuildParseTree(runOptions.buildParseTree); + + Method startRule; + Object[] args = null; + try { + startRule = javaCompiledState.parser.getMethod(runOptions.startRuleName); + } catch (NoSuchMethodException noSuchMethodException) { + // try with int _p arg for recursive func + startRule = javaCompiledState.parser.getMethod(runOptions.startRuleName, int.class); + args = new Integer[]{0}; + } + + parseTree = (ParserRuleContext) startRule.invoke(parser, args); + + if (runOptions.profile) { + outStream.println(Arrays.toString(profiler.getDecisionInfo())); + } + + ParseTreeWalker.DEFAULT.walk(TreeShapeListener.INSTANCE, parseTree); + } + else { + assert tokenStream != null; + tokenStream.fill(); + for (Object t : tokenStream.getTokens()) { + outStream.println(t); + } + if (runOptions.showDFA) { + outStream.print(lexer.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString()); + } + } + + output = outputStreamHelper.close(); + errors = errorsStreamHelper.close(); + } catch (Exception ex) { + exception = ex; + } + return new JavaExecutedState(javaCompiledState, output, errors, parseTree, exception); + } + + static class InMemoryStreamHelper { + private final PipedOutputStream pipedOutputStream; + private final StreamReader streamReader; + + private InMemoryStreamHelper(PipedOutputStream pipedOutputStream, StreamReader streamReader) { + this.pipedOutputStream = pipedOutputStream; + this.streamReader = streamReader; + } + + public static InMemoryStreamHelper initialize() throws IOException { + PipedInputStream pipedInputStream = new PipedInputStream(); + PipedOutputStream pipedOutputStream = new PipedOutputStream(pipedInputStream); + StreamReader stdoutReader = new StreamReader(pipedInputStream); + stdoutReader.start(); + return new InMemoryStreamHelper(pipedOutputStream, stdoutReader); + } + + public String close() throws InterruptedException, IOException { + pipedOutputStream.close(); + streamReader.join(); + return streamReader.toString(); + } + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRuntimeTests.java new file mode 100644 index 0000000000..a6a83d8d2a --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.java; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class JavaRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new JavaRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java index f5bb891ea7..cbe1a71e0d 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java @@ -8,11 +8,10 @@ import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CharStreams; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.io.InputStream; import java.io.Reader; import java.nio.channels.SeekableByteChannel; @@ -23,15 +22,10 @@ import java.nio.file.Files; import java.nio.file.Path; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class TestCharStreams { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void fromBMPStringHasExpectedSize() { CharStream s = CharStreams.fromString("hello"); @@ -50,19 +44,19 @@ public void fromSMPStringHasExpectedSize() { } @Test - public void fromBMPUTF8PathHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); - Files.write(p, "hello".getBytes(StandardCharsets.UTF_8)); - CharStream s = CharStreams.fromPath(p); + public void fromBMPUTF8PathHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path test = new File(tempDir.toString(), "test").toPath(); + Files.write(test, "hello".getBytes(StandardCharsets.UTF_8)); + CharStream s = CharStreams.fromPath(test); assertEquals(5, s.size()); assertEquals(0, s.index()); assertEquals("hello", s.toString()); - assertEquals(p.toString(), s.getSourceName()); + assertEquals(test.toString(), s.getSourceName()); } @Test - public void fromSMPUTF8PathHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8PathHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); CharStream s = CharStreams.fromPath(p); assertEquals(7, s.size()); @@ -72,8 +66,8 @@ public void fromSMPUTF8PathHasExpectedSize() throws Exception { } @Test - public void fromBMPUTF8InputStreamHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromBMPUTF8InputStreamHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello".getBytes(StandardCharsets.UTF_8)); try (InputStream is = Files.newInputStream(p)) { CharStream s = CharStreams.fromStream(is); @@ -84,8 +78,8 @@ public void fromBMPUTF8InputStreamHasExpectedSize() throws Exception { } @Test - public void fromSMPUTF8InputStreamHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8InputStreamHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (InputStream is = Files.newInputStream(p)) { CharStream s = CharStreams.fromStream(is); @@ -96,8 +90,8 @@ public void fromSMPUTF8InputStreamHasExpectedSize() throws Exception { } @Test - public void fromBMPUTF8ChannelHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromBMPUTF8ChannelHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello".getBytes(StandardCharsets.UTF_8)); try (SeekableByteChannel c = Files.newByteChannel(p)) { CharStream s = CharStreams.fromChannel( @@ -110,8 +104,8 @@ public void fromBMPUTF8ChannelHasExpectedSize() throws Exception { } @Test - public void fromSMPUTF8ChannelHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8ChannelHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (SeekableByteChannel c = Files.newByteChannel(p)) { CharStream s = CharStreams.fromChannel( @@ -124,9 +118,9 @@ public void fromSMPUTF8ChannelHasExpectedSize() throws Exception { } @Test - public void fromInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode() + public void fromInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode(@TempDir Path tempDir) throws Exception { - Path p = folder.newFile().toPath(); + Path p = getTestFile(tempDir); byte[] toWrite = new byte[] { (byte)0xCA, (byte)0xFE, (byte)0xFE, (byte)0xED }; Files.write(p, toWrite); try (SeekableByteChannel c = Files.newByteChannel(p)) { @@ -139,19 +133,21 @@ public void fromInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode() } @Test - public void fromInvalidUTF8BytesThrowsInReportMode() throws Exception { - Path p = folder.newFile().toPath(); + public void fromInvalidUTF8BytesThrowsInReportMode(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); byte[] toWrite = new byte[] { (byte)0xCA, (byte)0xFE }; Files.write(p, toWrite); try (SeekableByteChannel c = Files.newByteChannel(p)) { - thrown.expect(CharacterCodingException.class); - CharStreams.fromChannel(c, 4096, CodingErrorAction.REPORT, "foo"); + assertThrows( + CharacterCodingException.class, + () -> CharStreams.fromChannel(c, 4096, CodingErrorAction.REPORT, "foo") + ); } } @Test - public void fromSMPUTF8SequenceStraddlingBufferBoundary() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8SequenceStraddlingBufferBoundary(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (SeekableByteChannel c = Files.newByteChannel(p)) { CharStream s = CharStreams.fromChannel( @@ -168,8 +164,8 @@ public void fromSMPUTF8SequenceStraddlingBufferBoundary() throws Exception { } @Test - public void fromFileName() throws Exception { - Path p = folder.newFile().toPath(); + public void fromFileName(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); CharStream s = CharStreams.fromFileName(p.toString()); assertEquals(7, s.size()); @@ -180,20 +176,19 @@ public void fromFileName() throws Exception { } @Test - public void fromFileNameWithLatin1() throws Exception { - Path p = folder.newFile().toPath(); + public void fromFileNameWithLatin1(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \u00CA\u00FE".getBytes(StandardCharsets.ISO_8859_1)); CharStream s = CharStreams.fromFileName(p.toString(), StandardCharsets.ISO_8859_1); assertEquals(8, s.size()); assertEquals(0, s.index()); assertEquals("hello \u00CA\u00FE", s.toString()); assertEquals(p.toString(), s.getSourceName()); - } @Test - public void fromReader() throws Exception { - Path p = folder.newFile().toPath(); + public void fromReader(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (Reader r = Files.newBufferedReader(p, StandardCharsets.UTF_8)) { CharStream s = CharStreams.fromReader(r); @@ -204,8 +199,8 @@ public void fromReader() throws Exception { } @Test - public void fromSMPUTF16LEPathSMPHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF16LEPathSMPHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_16LE)); CharStream s = CharStreams.fromPath(p, StandardCharsets.UTF_16LE); assertEquals(7, s.size()); @@ -215,8 +210,8 @@ public void fromSMPUTF16LEPathSMPHasExpectedSize() throws Exception { } @Test - public void fromSMPUTF32LEPathSMPHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF32LEPathSMPHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); // UTF-32 isn't popular enough to have an entry in StandardCharsets. Charset c = Charset.forName("UTF-32LE"); Files.write(p, "hello \uD83C\uDF0E".getBytes(c)); @@ -226,4 +221,8 @@ public void fromSMPUTF32LEPathSMPHasExpectedSize() throws Exception { assertEquals("hello \uD83C\uDF0E", s.toString()); assertEquals(p.toString(), s.getSourceName()); } + + private Path getTestFile(Path dir) { + return new File(dir.toString(), "test").toPath(); + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeLexers.java deleted file mode 100644 index 58b316c0e9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeParsers.java deleted file mode 100644 index 2b3a1b153a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestFullContextParsing.java deleted file mode 100644 index ca6ec83d6d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java index 909a4f3b69..3d1492ce1a 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java @@ -7,16 +7,12 @@ package org.antlr.v4.test.runtime.java; import org.antlr.v4.runtime.misc.IntegerList; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class TestIntegerList { - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void emptyListToEmptyCharArray() { IntegerList l = new IntegerList(); @@ -27,8 +23,10 @@ public void emptyListToEmptyCharArray() { public void negativeIntegerToCharArrayThrows() { IntegerList l = new IntegerList(); l.add(-42); - thrown.expect(IllegalArgumentException.class); - l.toCharArray(); + assertThrows( + IllegalArgumentException.class, + l::toCharArray + ); } @Test @@ -37,7 +35,7 @@ public void surrogateRangeIntegerToCharArray() { // Java allows dangling surrogates, so (currently) we do // as well. We could change this if desired. l.add(0xDC00); - char expected[] = new char[] { 0xDC00 }; + char[] expected = new char[] { 0xDC00 }; assertArrayEquals(expected, l.toCharArray()); } @@ -45,8 +43,10 @@ public void surrogateRangeIntegerToCharArray() { public void tooLargeIntegerToCharArrayThrows() { IntegerList l = new IntegerList(); l.add(0x110000); - thrown.expect(IllegalArgumentException.class); - l.toCharArray(); + assertThrows( + IllegalArgumentException.class, + l::toCharArray + ); } @Test @@ -55,7 +55,7 @@ public void unicodeBMPIntegerListToCharArray() { l.add(0x35); l.add(0x4E94); l.add(0xFF15); - char expected[] = new char[] { 0x35, 0x4E94, 0xFF15 }; + char[] expected = new char[] { 0x35, 0x4E94, 0xFF15 }; assertArrayEquals(expected, l.toCharArray()); } @@ -65,7 +65,7 @@ public void unicodeSMPIntegerListToCharArray() { l.add(0x104A5); l.add(0x116C5); l.add(0x1D7FB); - char expected[] = new char[] { 0xD801, 0xDCA5, 0xD805, 0xDEC5, 0xD835, 0xDFFB }; + char[] expected = new char[] { 0xD801, 0xDCA5, 0xD805, 0xDEC5, 0xD835, 0xDFFB }; assertArrayEquals(expected, l.toCharArray()); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java new file mode 100644 index 0000000000..45a3fb1c1f --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.java; + +import org.antlr.v4.Tool; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.runtime.misc.InterpreterDataReader; +import org.antlr.v4.tool.Grammar; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.*; + +/** This file represents a simple sanity checks on the parsing of the .interp file + * available to the Java runtime for interpreting rather than compiling and executing parsers. + */ +public class TestInterpreterDataReader { + @Test + public void testParseFile() throws IOException, NoSuchFieldException, IllegalAccessException, org.antlr.runtime.RecognitionException { + Grammar g = new Grammar( + "grammar Calc;\n" + + "s : expr EOF\n" + + " ;\n" + + "expr\n" + + " : INT # number\n" + + " | expr (MUL | DIV) expr # multiply\n" + + " | expr (ADD | SUB) expr # add\n" + + " ;\n" + + "\n" + + "INT : [0-9]+;\n" + + "MUL : '*';\n" + + "DIV : '/';\n" + + "ADD : '+';\n" + + "SUB : '-';\n" + + "WS : [ \\t]+ -> channel(HIDDEN);"); + String interpString = Tool.generateInterpreterData(g); + Path interpFile = Files.createTempFile(null, null); + Files.write(interpFile, interpString.getBytes(StandardCharsets.UTF_8)); + + InterpreterDataReader.InterpreterData interpreterData = InterpreterDataReader.parseFile(interpFile.toString()); + Field atnField = interpreterData.getClass().getDeclaredField("atn"); + Field vocabularyField = interpreterData.getClass().getDeclaredField("vocabulary"); + Field ruleNamesField = interpreterData.getClass().getDeclaredField("ruleNames"); + Field channelsField = interpreterData.getClass().getDeclaredField("channels"); + Field modesField = interpreterData.getClass().getDeclaredField("modes"); + + atnField.setAccessible(true); + vocabularyField.setAccessible(true); + ruleNamesField.setAccessible(true); + channelsField.setAccessible(true); + modesField.setAccessible(true); + + ATN atn = (ATN) atnField.get(interpreterData); + Vocabulary vocabulary = (Vocabulary) vocabularyField.get(interpreterData); + String[] literalNames = ((VocabularyImpl) vocabulary).getLiteralNames(); + String[] symbolicNames = ((VocabularyImpl) vocabulary).getSymbolicNames(); + List ruleNames = castList(ruleNamesField.get(interpreterData), String.class); + List channels = castList(channelsField.get(interpreterData), String.class); + List modes = castList(modesField.get(interpreterData), String.class); + + assertEquals(6, vocabulary.getMaxTokenType()); + assertArrayEquals(new String[]{"s","expr"}, ruleNames.toArray()); + assertArrayEquals(new String[]{"", "", "'*'", "'/'", "'+'", "'-'", ""}, literalNames); + assertArrayEquals(new String[]{"", "INT", "MUL", "DIV", "ADD", "SUB", "WS"}, symbolicNames); + assertNull(channels); + assertNull(modes); + + IntegerList serialized = ATNSerializer.getSerialized(atn); + assertEquals(ATNDeserializer.SERIALIZED_VERSION, serialized.get(0)); + } + + private List castList(Object obj, Class clazz) { + List result = new ArrayList(); + if (obj instanceof List) { + for (Object o : (List) obj) { + result.add(clazz.cast(o)); + } + return result; + } + return null; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLeftRecursion.java deleted file mode 100644 index 90b51ac72c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerErrors.java deleted file mode 100644 index 3ff3741f34..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerExec.java deleted file mode 100644 index adef511c09..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestListeners.java deleted file mode 100644 index 643f562189..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParseTrees.java deleted file mode 100644 index 4a293cc700..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserErrors.java deleted file mode 100644 index 3c6870f95d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserExec.java deleted file mode 100644 index 1bf39a4219..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestPerformance.java deleted file mode 100644 index 097365816e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalLexer.java deleted file mode 100644 index 0cd917b1a9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalParser.java deleted file mode 100644 index 2cbb91b676..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSets.java deleted file mode 100644 index bc43a44c02..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java index f1758813c4..9123d1d3b5 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java @@ -9,14 +9,16 @@ import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.misc.IntervalSet; -import org.antlr.v4.test.runtime.java.BaseJavaTest; +import org.antlr.v4.test.runtime.RuntimeTestUtils; +import org.antlr.v4.test.runtime.java.JavaRunner; import org.antlr.v4.tool.Grammar; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -public class TestExpectedTokens extends BaseJavaTest { - @Test public void testEpsilonAltSubrule() throws Exception { +public class TestExpectedTokens extends JavaRunner { + @Test + public void testEpsilonAltSubrule() throws Exception { String gtext = "parser grammar T;\n" + "a : A (B | ) C ;\n"; @@ -32,7 +34,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s7-C->s8\n"+ "s8->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", atnText); + RuntimeTestUtils.checkRuleATN(g, "a", atnText); ATN atn = g.getATN(); int blkStartStateNumber = 5; @@ -55,7 +57,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s6-C->s7\n"+ "s7->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s8\n"; - checkRuleATN(g, "a", atnText); + RuntimeTestUtils.checkRuleATN(g, "a", atnText); ATN atn = g.getATN(); int blkStartStateNumber = 4; @@ -75,7 +77,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s5-A->s6\n"+ "s6->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s11\n"; - checkRuleATN(g, "a", atnText); + RuntimeTestUtils.checkRuleATN(g, "a", atnText); atnText = "RuleStart_b_2->BlockStart_9\n"+ "BlockStart_9->s7\n"+ @@ -84,13 +86,13 @@ public class TestExpectedTokens extends BaseJavaTest { "s8->BlockEnd_10\n"+ "BlockEnd_10->RuleStop_b_3\n"+ "RuleStop_b_3->s5\n"; - checkRuleATN(g, "b", atnText); + RuntimeTestUtils.checkRuleATN(g, "b", atnText); ATN atn = g.getATN(); // From the start of 'b' with empty stack, can only see B and EOF int blkStartStateNumber = 9; - IntervalSet tokens = atn.getExpectedTokens(blkStartStateNumber, RuleContext.EMPTY); + IntervalSet tokens = atn.getExpectedTokens(blkStartStateNumber, ParserRuleContext.EMPTY); assertEquals("{, B}", tokens.toString(g.getTokenNames())); // Now call from 'a' @@ -132,7 +134,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s17-expr->RuleStart_expr_2\n"+ "BlockEnd_19->StarLoopBack_22\n"+ "StarLoopBack_22->StarLoopEntry_20\n"; - checkRuleATN(g, "expr", atnText); + RuntimeTestUtils.checkRuleATN(g, "expr", atnText); ATN atn = g.getATN(); diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java index 9f1f29b193..84951180ff 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java @@ -10,8 +10,9 @@ import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.TokenStream; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** * This class contains tests for specific API functionality in {@link TokenStream} and derived types. @@ -26,17 +27,16 @@ public void testBufferedTokenStreamReuseAfterFill() { CharStream firstInput = new ANTLRInputStream("A"); BufferedTokenStream tokenStream = new BufferedTokenStream(new VisitorBasicLexer(firstInput)); tokenStream.fill(); - Assert.assertEquals(2, tokenStream.size()); - Assert.assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); - Assert.assertEquals(Token.EOF, tokenStream.get(1).getType()); + assertEquals(2, tokenStream.size()); + assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); + assertEquals(Token.EOF, tokenStream.get(1).getType()); CharStream secondInput = new ANTLRInputStream("AA"); tokenStream.setTokenSource(new VisitorBasicLexer(secondInput)); tokenStream.fill(); - Assert.assertEquals(3, tokenStream.size()); - Assert.assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); - Assert.assertEquals(VisitorBasicLexer.A, tokenStream.get(1).getType()); - Assert.assertEquals(Token.EOF, tokenStream.get(2).getType()); + assertEquals(3, tokenStream.size()); + assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); + assertEquals(VisitorBasicLexer.A, tokenStream.get(1).getType()); + assertEquals(Token.EOF, tokenStream.get(2).getType()); } - } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java index d8344de21a..dc6c4b1d4d 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java @@ -10,27 +10,20 @@ import org.antlr.v4.runtime.LexerInterpreter; import org.antlr.v4.runtime.TokenStreamRewriter; import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.test.runtime.java.BaseJavaTest; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; -public class TestTokenStreamRewriter extends BaseJavaTest { +public class TestTokenStreamRewriter { /** Public default constructor used by TestRig */ public TestTokenStreamRewriter() { } - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test public void testInsertBeforeIndex0() throws Exception { + @Test + public void testInsertBeforeIndex0() throws Exception { LexerGrammar g = new LexerGrammar( "lexer grammar T;\n"+ "A : 'a';\n" + diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java index 5eafca122b..4999e8074e 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java @@ -14,12 +14,13 @@ import org.antlr.v4.runtime.tree.ErrorNode; import org.antlr.v4.runtime.tree.RuleNode; import org.antlr.v4.runtime.tree.TerminalNode; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class TestVisitors { /** @@ -33,7 +34,7 @@ public void testVisitTerminalNode() { VisitorBasicParser parser = new VisitorBasicParser(new CommonTokenStream(lexer)); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s A )", context.toStringTree(parser)); + assertEquals("(s A )", context.toStringTree(parser)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -56,7 +57,7 @@ protected String aggregateResult(String aggregate, String nextResult) { String expected = "[@0,0:0='A',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -79,9 +80,9 @@ public void syntaxError(Recognizer recognizer, Object offendingSymbol, int }); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s )", context.toStringTree(parser)); - Assert.assertEquals(1, errors.size()); - Assert.assertEquals("line 1:0 missing 'A' at ''", errors.get(0)); + assertEquals("(s )", context.toStringTree(parser)); + assertEquals(1, errors.size()); + assertEquals("line 1:0 missing 'A' at ''", errors.get(0)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -102,12 +103,13 @@ protected String aggregateResult(String aggregate, String nextResult) { String result = listener.visit(context); String expected = "Error encountered: [@-1,-1:-1='',<1>,1:0]"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** * This test verifies that {@link AbstractParseTreeVisitor#visitChildren} does not call - * {@link ParseTreeVisitor#visit} after {@link AbstractParseTreeVisitor#shouldVisitNextChild} returns + * {@link org.antlr.v4.runtime.tree.ParseTreeVisitor#visit} after + * {@link org.antlr.v4.runtime.tree.AbstractParseTreeVisitor#shouldVisitNextChild} returns * {@code false}. */ @Test @@ -117,7 +119,7 @@ public void testShouldNotVisitEOF() { VisitorBasicParser parser = new VisitorBasicParser(new CommonTokenStream(lexer)); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s A )", context.toStringTree(parser)); + assertEquals("(s A )", context.toStringTree(parser)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -133,7 +135,7 @@ protected boolean shouldVisitNextChild(RuleNode node, String currentResult) { String result = listener.visit(context); String expected = "[@0,0:0='A',<1>,1:0]\n"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -148,7 +150,7 @@ public void testShouldNotVisitTerminal() { VisitorBasicParser parser = new VisitorBasicParser(new CommonTokenStream(lexer)); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s A )", context.toStringTree(parser)); + assertEquals("(s A )", context.toStringTree(parser)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -169,7 +171,7 @@ protected boolean shouldVisitNextChild(RuleNode node, String currentResult) { String result = listener.visit(context); String expected = "default result"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -182,7 +184,7 @@ public void testCalculatorVisitor() { VisitorCalcParser parser = new VisitorCalcParser(new CommonTokenStream(lexer)); VisitorCalcParser.SContext context = parser.s(); - Assert.assertEquals("(s (expr (expr 2) + (expr (expr 8) / (expr 2))) )", context.toStringTree(parser)); + assertEquals("(s (expr (expr 2) + (expr (expr 8) / (expr 2))) )", context.toStringTree(parser)); VisitorCalcVisitor listener = new VisitorCalcBaseVisitor() { @Override @@ -232,7 +234,7 @@ protected Integer aggregateResult(Integer aggregate, Integer nextResult) { int result = listener.visit(context); int expected = 6; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/TimeLexerSpeed.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/TimeLexerSpeed.java index 5d612d089a..e653f8b59f 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/TimeLexerSpeed.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/TimeLexerSpeed.java @@ -55,7 +55,7 @@ * * Sample output on OS X with 4 GHz Intel Core i7 (us == microseconds, 1/1000 of a millisecond): * - Java VM args: -Xms2G -Xmx2G + Java VM args: -Xms2G -Xmx8g Warming up Java compiler.... load_legacy_java_ascii_file average time 53us size 58384b over 3500 loads of 29038 symbols from Parser.java load_legacy_java_ascii_file average time 27us size 15568b over 3500 loads of 7625 symbols from RuleContext.java @@ -381,7 +381,7 @@ public void lex_legacy_java_utf8(int n, boolean clearLexerDFACache) throws Excep public void lex_new_java_utf8(int n, boolean clearLexerDFACache) throws Exception { ClassLoader loader = TimeLexerSpeed.class.getClassLoader(); - try (InputStream is = loader.getResourceAsStream(Parser_java_file);) { + try (InputStream is = loader.getResourceAsStream(Parser_java_file)) { long size = getResourceSize(loader, Parser_java_file); CharStream input = CharStreams.fromStream(is, StandardCharsets.UTF_8, size); JavaLexer lexer = new JavaLexer(input); @@ -490,7 +490,7 @@ public static String dirname(Path path) { return path.getName(0).toString(); } - public static final long getResourceSize(ClassLoader loader, String resourceName) throws IOException { + public static long getResourceSize(ClassLoader loader, String resourceName) throws IOException { URLConnection uc = null; try { // Sadly, URLConnection is not AutoCloseable, but it leaks resources if diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/graphemes.g4 b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/graphemes.g4 index 361360563f..5e5def4b59 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/graphemes.g4 +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/perf/graphemes.g4 @@ -10,8 +10,12 @@ fragment TextPresentationCharacter: [\p{EmojiPresentation=TextDefault}]; fragment EmojiPresentationCharacter: [\p{EmojiPresentation=EmojiDefault}]; fragment TextPresentationSequence: EmojiPresentationCharacter VS15; fragment EmojiPresentationSequence: TextPresentationCharacter VS16; + +/* No Longer supported; see https://github.com/antlr/antlr4/pull/3261 fragment EmojiModifierSequence: [\p{Grapheme_Cluster_Break=E_Base}\p{Grapheme_Cluster_Break=E_Base_GAZ}] [\p{Grapheme_Cluster_Break=E_Modifier}]; +*/ + fragment EmojiFlagSequence: [\p{Grapheme_Cluster_Break=Regional_Indicator}] [\p{Grapheme_Cluster_Break=Regional_Indicator}]; fragment ExtendedPictographic: [\p{Extended_Pictographic}]; @@ -22,12 +26,10 @@ fragment EmojiCombiningSequence: | EmojiPresentationCharacter ) NonspacingMark*; EmojiCoreSequence: - EmojiModifierSequence - | EmojiCombiningSequence + EmojiCombiningSequence | EmojiFlagSequence; fragment EmojiZWJElement: - EmojiModifierSequence - | EmojiPresentationSequence + EmojiPresentationSequence | EmojiPresentationCharacter | ExtendedPictographic | EmojiNRK; @@ -52,4 +54,4 @@ grapheme_cluster: CRLF | Prepend* ( emoji_sequence | HangulSyllable | NonControl ) ( Extend | ZWJ | SpacingMark )*; -graphemes: grapheme_cluster* EOF; \ No newline at end of file +graphemes: grapheme_cluster* EOF; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/CustomStreamErrorListener.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/CustomStreamErrorListener.java new file mode 100644 index 0000000000..89cc25a034 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/CustomStreamErrorListener.java @@ -0,0 +1,25 @@ +package org.antlr.v4.test.runtime.java.helpers; + +import org.antlr.v4.runtime.BaseErrorListener; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.Recognizer; + +import java.io.PrintStream; + +public class CustomStreamErrorListener extends BaseErrorListener { + private final PrintStream printStream; + + public CustomStreamErrorListener(PrintStream printStream){ + this.printStream = printStream; + } + + @Override + public void syntaxError(Recognizer recognizer, + Object offendingSymbol, + int line, + int charPositionInLine, + String msg, + RecognitionException e) { + printStream.println("line " + line + ":" + charPositionInLine + " " + msg); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/RuntimeTestLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/RuntimeTestLexer.java new file mode 100644 index 0000000000..521159bb1f --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/RuntimeTestLexer.java @@ -0,0 +1,12 @@ +package org.antlr.v4.test.runtime.java.helpers; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Lexer; + +public abstract class RuntimeTestLexer extends Lexer { + protected java.io.PrintStream outStream = System.out; + + public RuntimeTestLexer(CharStream input) { super(input); } + + public void setOutStream(java.io.PrintStream outStream) { this.outStream = outStream; } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/RuntimeTestParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/RuntimeTestParser.java new file mode 100644 index 0000000000..43e121887d --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/RuntimeTestParser.java @@ -0,0 +1,16 @@ +package org.antlr.v4.test.runtime.java.helpers; + +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.TokenStream; + +public abstract class RuntimeTestParser extends Parser { + protected java.io.PrintStream outStream = System.out; + + public RuntimeTestParser(TokenStream input) { + super(input); + } + + public void setOutStream(java.io.PrintStream outStream) { + this.outStream = outStream; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/TreeShapeListener.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/TreeShapeListener.java new file mode 100644 index 0000000000..fb6bbe0480 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/helpers/TreeShapeListener.java @@ -0,0 +1,22 @@ +package org.antlr.v4.test.runtime.java.helpers; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.*; + +public class TreeShapeListener implements ParseTreeListener { + public static final TreeShapeListener INSTANCE = new TreeShapeListener(); + + @Override public void visitTerminal(TerminalNode node) { } + @Override public void visitErrorNode(ErrorNode node) { } + @Override public void exitEveryRule(ParserRuleContext ctx) { } + + @Override + public void enterEveryRule(ParserRuleContext ctx) { + for (int i = 0; i < ctx.getChildCount(); i++) { + ParseTree parent = ctx.getChild(i).getParent(); + if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) { + throw new IllegalStateException("Invalid parse tree shape detected."); + } + } + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/JavaScriptRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/JavaScriptRuntimeTests.java new file mode 100644 index 0000000000..0173db7429 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/JavaScriptRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.javascript; + +import org.antlr.v4.test.runtime.RuntimeRunner; +import org.antlr.v4.test.runtime.RuntimeTests; + +public class JavaScriptRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new NodeRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/NodeRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/NodeRunner.java new file mode 100644 index 0000000000..4b0ceff17f --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/NodeRunner.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.javascript; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.List; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; + +public class NodeRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "JavaScript"; + } + + @Override + public String getExtension() { return "js"; } + + @Override + public String getBaseListenerSuffix() { return null; } + + @Override + public String getBaseVisitorSuffix() { return null; } + + @Override + public String getRuntimeToolName() { return "node"; } + + private final static String normalizedRuntimePath = getRuntimePath("JavaScript").replace('\\', '/'); + private final static String newImportAntlrString = + "import antlr4 from 'file://" + normalizedRuntimePath + "/src/antlr4/index.node.js'"; + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + List generatedFiles = generatedState.generatedFiles; + for (GeneratedFile generatedFile : generatedFiles) { + try { + FileUtils.replaceInFile(Paths.get(getTempDirPath(), generatedFile.name), + "import antlr4 from 'antlr4';", + newImportAntlrString); + } catch (IOException e) { + return new CompiledState(generatedState, e); + } + } + + writeFile(getTempDirPath(), "package.json", + RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/package_js.json")); + return new CompiledState(generatedState, null); + } + + @Override + protected void addExtraRecognizerParameters(ST template) { + template.add("runtimePath", normalizedRuntimePath); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/browser/BaseBrowserTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/browser/BaseBrowserTest.java deleted file mode 100644 index 3796e51544..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/browser/BaseBrowserTest.java +++ /dev/null @@ -1,967 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.browser; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.eclipse.jetty.server.Handler; -import org.eclipse.jetty.server.Server; -import org.eclipse.jetty.server.handler.DefaultHandler; -import org.eclipse.jetty.server.handler.HandlerList; -import org.eclipse.jetty.server.handler.ResourceHandler; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.openqa.selenium.By.ById; -import org.openqa.selenium.WebDriver; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.net.BindException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public abstract class BaseBrowserTest implements RuntimeTestSupport { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - // private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); - - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public String httpdir = null; - public String tmpdir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - @org.junit.Rule - public final TestRule testWatcher = new TestWatcher() { - - @Override - protected void succeeded(Description description) { - // remove tmpdir if no error. - eraseTempDir(); - } - - }; - - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String prop = System.getProperty("antlr-javascript-test-dir"); - if(prop!=null && prop.length()>0) { - httpdir = prop; - } - else { - httpdir = new File(System.getProperty("java.io.tmpdir"), getClass().getSimpleName()+"-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()).getAbsolutePath(); - } - File dir = new File(httpdir); - if(dir.exists()) - this.eraseFiles(dir); - tmpdir = new File(httpdir, "parser").getAbsolutePath(); - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - return null; - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if ( g.isLexer() ) { - f = new LexerATNFactory((LexerGrammar)g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if ( expecting!=null && !expecting.trim().isEmpty() ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) - { - LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype == Token.EOF ) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if ( t==IntStream.EOF ) { - hitEOF = true; - } - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName,"-no-listener"); - assertTrue(success); - writeLexerTestFile(lexerName, showDFA); - String output = null; - try { - output = execHtmlPage("Test.html", input); - } - catch (Exception e) { - e.printStackTrace(System.err); - } - return output; - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - rawBuildRecognizerTestFile(parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - showDiagnosticErrors); - String result = null; - try { - result = execRecognizer(input); - } - catch (Exception e) { - e.printStackTrace(System.err); - } - return result; - } - - @Override - public void testTearDown() throws Exception { - - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - antlrOnString(getTmpDir(), "JavaScript", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".js"); - } - if ( parserName!=null ) { - files.add(parserName+".js"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.js"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.js"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, boolean debug) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug); - } - } - - public String execRecognizer(String input) throws Exception { - return execHtmlPage("Test.html", input); - } - - static int httpPort = 8080; - - class ServerThread extends Thread { - - Server server; - String runtimePath; - String fileName; - Exception ex; - - public ServerThread(String fileName) { - this.runtimePath = locateRuntime(); - this.fileName = fileName; - } - - @Override - public void run() { - try { - Server server = new Server(httpPort); - ResourceHandler rh1 = new ResourceHandler(); - rh1.setDirectoriesListed(false); - rh1.setResourceBase(httpdir); - rh1.setWelcomeFiles(new String[] { fileName }); - ResourceHandler rh2 = new ResourceHandler(); - rh2.setDirectoriesListed(false); - rh2.setResourceBase(runtimePath); - HandlerList handlers = new HandlerList(); - handlers.setHandlers(new Handler[] { rh1, rh2, new DefaultHandler() }); - server.setHandler(handlers); - server.start(); - this.server = server; - this.server.join(); - } catch(BindException e) { - httpPort++; - run(); - } catch (Exception e) { - ex = e; - } - } - } - - protected static WebDriver driver; - - public String execHtmlPage(String fileName, String input) throws Exception { - // 'file' protocol is not supported by Selenium drivers - // so we run an embedded Jetty server - ServerThread thread = new ServerThread(fileName); - thread.start(); - try { - while(thread.server==null && thread.ex==null) - Thread.sleep(10); - if(thread.ex!=null) - throw thread.ex; - while(thread.server.isStarting()) - Thread.sleep(10); - Thread.sleep(400); // despite all the above precautions, driver.get often fails if you don't give time to Jetty - driver.get("http://localhost:" + httpPort + "/" + fileName); - driver.findElement(new ById("input")).clear(); - driver.findElement(new ById("output")).clear(); - driver.findElement(new ById("errors")).clear(); - driver.navigate().refresh(); - driver.findElement(new ById("input")).sendKeys(input); - driver.findElement(new ById("load")).click(); - driver.findElement(new ById("submit")).click(); - String errors = driver.findElement(new ById("errors")).getAttribute("value"); - if(errors!=null && errors.length()>0) { - this.stderrDuringParse = errors; - System.err.print(errors); - } - String value = driver.findElement(new ById("output")).getAttribute("value"); - // mimic stdout which adds a NL - if(value.length()>0 && !value.endsWith("\n")) - value = value + "\n"; - return value; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } finally { - if(thread.server!=null) { - thread.server.stop(); - while(!thread.server.isStopped()) - Thread.sleep(10); - Thread.sleep(100); // ensure the port is freed - } - } - return null; - } - - private String locateRuntime() { - String propName = "antlr-javascript-runtime"; - String prop = System.getProperty(propName); - if(prop==null || prop.length()==0) - prop = "../runtime/JavaScript/src"; - File file = new File(prop); - System.out.println(file.getAbsolutePath()); - if(!file.exists()) - throw new RuntimeException("Missing system property:" + propName); - return file.getAbsolutePath(); - } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, - ANTLRMessage expectedMessage) - throws Exception - { - //System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); - assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); - assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); - /* - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeParserTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, boolean debug) { - String html = "\r\n" + - "\r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - "
    \r\n" + - "
    \r\n" + - "
    \r\n" + - "
    \r\n" + - "
    \r\n" + - " \r\n" + - "\r\n"; - writeFile(httpdir, "Test.html", html); - }; - - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - String html = "\r\n" + - "\r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - "
    \r\n" + - "
    \r\n" + - "
    \r\n" + - "
    \r\n" + - "
    \r\n" + - " \r\n" + - "\r\n"; - writeFile(httpdir, "Test.html", html); - } - - public void writeRecognizer(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - if ( parserName==null ) - writeLexerTestFile(lexerName, debug); - else - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug); - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(httpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(httpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles(File dir) { - String[] files = dir.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(dir,files[i]).delete(); - } - } - - @Override - public void eraseTempDir() { - boolean doErase = true; - String propName = "antlr-javascript-erase-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) - doErase = Boolean.getBoolean(prop); - if(doErase) { - File tmpdirF = new File(httpdir); - if ( tmpdirF.exists() ) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p=0; - public IntTokenStream(IntegerList types) { this.types = types; } - - @Override - public void consume() { p++; } - - @Override - public int LA(int i) { return LT(i).getType(); } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { return p; } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); - else t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/BaseChromeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/BaseChromeTest.java deleted file mode 100644 index 93733ca8b7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/BaseChromeTest.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -public class BaseChromeTest extends BaseBrowserTest { - @BeforeClass - public static void initWebDriver() { - driver = SharedWebDriver.init(); - } - - @AfterClass - public static void closeWebDriver() { - SharedWebDriver.close(); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/SharedWebDriver.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/SharedWebDriver.java deleted file mode 100644 index 0078561e5c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/SharedWebDriver.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.openqa.selenium.WebDriver; -import org.openqa.selenium.chrome.ChromeDriver; - -import java.io.File; -import java.net.URL; -import java.util.Timer; -import java.util.TimerTask; - -import static org.junit.Assert.assertTrue; - -public class SharedWebDriver { - - static WebDriver driver; - static Timer timer; - - public static WebDriver init() { - if(driver==null) { - String path = SharedWebDriver.class.getPackage().getName().replace(".", "/") + "/chromedriver.bin"; - URL url = Thread.currentThread().getContextClassLoader().getResource(path); - File file = new File(url.toExternalForm().substring(5)); // skip 'file:' - assertTrue(file.exists()); - System.setProperty("webdriver.chrome.driver", file.getAbsolutePath()); - driver = new ChromeDriver(); - } else if(timer!=null) { - timer.cancel(); - timer = null; - } - - return driver; - } - - public static void close() { - if(driver!=null) { - if(timer!=null) { - timer.cancel(); - timer = null; - } - timer = new Timer(); - timer.schedule(new TimerTask() { - @Override public void run() { - driver.quit(); - driver = null; - } - }, 2000); // close with delay to allow next Test to start - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeLexers.java deleted file mode 100644 index 2249599dd7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeParsers.java deleted file mode 100644 index 1786d2a56c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestFullContextParsing.java deleted file mode 100644 index 12d8ec76d8..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLeftRecursion.java deleted file mode 100644 index dfd1ccbadc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerErrors.java deleted file mode 100644 index 3eff25ca2e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerExec.java deleted file mode 100644 index e07e020b86..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestListeners.java deleted file mode 100644 index 89cdfb40ef..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParseTrees.java deleted file mode 100644 index 487ddbbb4f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserErrors.java deleted file mode 100644 index 1eab06b52a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserExec.java deleted file mode 100644 index c8e2ff6a31..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestPerformance.java deleted file mode 100644 index 44aa4e9ed9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalLexer.java deleted file mode 100644 index d0374c8815..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalParser.java deleted file mode 100644 index cb8e530ff3..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSets.java deleted file mode 100644 index 331ca08587..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/BaseExplorerTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/BaseExplorerTest.java deleted file mode 100644 index 3345166acc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/BaseExplorerTest.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.After; -import org.junit.Before; -import org.openqa.selenium.ie.InternetExplorerDriver; - -public class BaseExplorerTest extends BaseBrowserTest { - - @Before - public void initWebDriver() { - System.setProperty("webdriver.ie.driver", "C:\\Program Files (x86)\\Selenium\\IEDriverServer.exe"); - driver = new InternetExplorerDriver(); - } - - @After - public void closeWebDriver() { - if(driver!=null) { - driver.quit(); - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeLexers.java deleted file mode 100644 index fa22c5d916..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeParsers.java deleted file mode 100644 index 202c179710..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestFullContextParsing.java deleted file mode 100644 index 1e397c5f67..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLeftRecursion.java deleted file mode 100644 index 0f6cd02748..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerErrors.java deleted file mode 100644 index 4e75227850..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerExec.java deleted file mode 100644 index 1ca62e783c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestListeners.java deleted file mode 100644 index c12edd0ec5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParseTrees.java deleted file mode 100644 index 25e6305b25..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserErrors.java deleted file mode 100644 index 68b003cd44..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserExec.java deleted file mode 100644 index 9a524c54fd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestPerformance.java deleted file mode 100644 index 23526657f2..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalLexer.java deleted file mode 100644 index e412c96610..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalParser.java deleted file mode 100644 index 22ad655a8c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSets.java deleted file mode 100644 index df359cd344..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/BaseFirefoxTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/BaseFirefoxTest.java deleted file mode 100644 index 2f43a75628..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/BaseFirefoxTest.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -public class BaseFirefoxTest extends BaseBrowserTest { - - @BeforeClass - public static void initWebDriver() { - driver = SharedWebDriver.init(); - } - - @AfterClass - public static void closeWebDriver() { - SharedWebDriver.close(); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/SharedWebDriver.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/SharedWebDriver.java deleted file mode 100644 index b9e1daf6e4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/SharedWebDriver.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.openqa.selenium.WebDriver; -import org.openqa.selenium.firefox.FirefoxDriver; - -import java.util.Timer; -import java.util.TimerTask; - -public class SharedWebDriver { - - static WebDriver driver; - static Timer timer; - - public static WebDriver init() { - if(driver==null) { - driver = new FirefoxDriver(); - } else if(timer!=null) { - timer.cancel(); - timer = null; - } - - return driver; - } - - public static void close() { - if(driver!=null) { - if(timer!=null) { - timer.cancel(); - timer = null; - } - timer = new Timer(); - timer.schedule(new TimerTask() { - @Override public void run() { - driver.quit(); - driver = null; - } - }, 2000); // close with delay to allow next Test to start - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeLexers.java deleted file mode 100644 index a7b4a13cd8..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeParsers.java deleted file mode 100644 index e7fd00723a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestFullContextParsing.java deleted file mode 100644 index 40aef55059..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLeftRecursion.java deleted file mode 100644 index 29679cd3bc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerErrors.java deleted file mode 100644 index 9fd9fac5d3..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerExec.java deleted file mode 100644 index b7b04e837e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestListeners.java deleted file mode 100644 index 950dd75cad..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParseTrees.java deleted file mode 100644 index 5fa612d6bf..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserErrors.java deleted file mode 100644 index af8dbeb367..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserExec.java deleted file mode 100644 index 019b15e368..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestPerformance.java deleted file mode 100644 index af1eacd011..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalLexer.java deleted file mode 100644 index cf5aaf9a51..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalParser.java deleted file mode 100644 index 3e16eae7dd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSets.java deleted file mode 100644 index 5a967c307b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/BaseNodeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/BaseNodeTest.java deleted file mode 100644 index 7246f4ec26..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/BaseNodeTest.java +++ /dev/null @@ -1,906 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public class BaseNodeTest implements RuntimeTestSupport { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - // private static final Logger LOGGER = - // Logger.getLogger(BaseTest.class.getName()); - - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public String tmpdir = null; - - /** - * If error during parser execution, store stderr here; can't return stdout - * and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String prop = System.getProperty("antlr-javascript-test-dir"); - if (prop != null && prop.length() > 0) { - tmpdir = prop; - } - else { - tmpdir = new File(System.getProperty("java.io.tmpdir"), getClass() - .getSimpleName()+"-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()) - .getAbsolutePath(); - } - File dir = new File(tmpdir); - if (dir.exists()) - this.eraseFiles(dir); - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] { "-o", tmpdir }); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if (g.atn == null) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if (g.isLexer()) { - f = new LexerATNFactory((LexerGrammar) g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if (g.ast != null && !g.ast.hasErrors) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if (g.getImportedGrammars() != null) { // process imported grammars - // (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if (expecting != null && !expecting.trim().isEmpty()) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, - LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while (ttype != Token.EOF); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, ATN atn, CharStream input) { - LexerATNSimulator interp = new LexerATNSimulator(atn, - new DFA[] { new DFA( - atn.modeToStartState.get(Lexer.DEFAULT_MODE)) }, null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if (hitEOF) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if (ttype == Token.EOF) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if (t == IntStream.EOF) { - hitEOF = true; - } - } while (ttype != Token.EOF); - return tokenTypes; - } - - protected String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input, boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, null, lexerName, "-no-listener"); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.js"); - if ( output.length()==0 ) { - output = null; - } - return output; - } - - @Override - public String execParser(String grammarFileName, String grammarStr, - String parserName, String lexerName, String listenerName, - String visitorName, String startRuleName, String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, parserName, lexerName, "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - rawBuildRecognizerTestFile(parserName, lexerName, listenerName, - visitorName, startRuleName, showDiagnosticErrors); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - String... extraOptions) { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, - parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - boolean defaultListener, String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTmpDir(), "JavaScript", grammarFileName, grammarStr, - defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if (lexerName != null) { - files.add(lexerName + ".js"); - } - if (parserName != null) { - files.add(parserName + ".js"); - Set optionsSet = new HashSet( - Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, - grammarFileName.lastIndexOf('.')) - + "Listener.js"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, - grammarFileName.lastIndexOf('.')) - + "Visitor.js"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - this.stderrDuringParse = null; - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, lexerName, listenerName, - visitorName, parserStartRuleName, debug); - } - } - - public String execRecognizer() { - return execModule("Test.js"); - } - - public String execModule(String fileName) { - String nodejsPath = locateNodeJS(); - String runtimePath = locateRuntime(); - String modulePath = new File(new File(tmpdir), fileName) - .getAbsolutePath(); - String inputPath = new File(new File(tmpdir), "input") - .getAbsolutePath(); - try { - ProcessBuilder builder = new ProcessBuilder(nodejsPath, modulePath, - inputPath); - builder.environment().put("NODE_PATH", - runtimePath + File.pathSeparator + tmpdir); - builder.directory(new File(tmpdir)); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum( - process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum( - process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if (stderrVacuum.toString().length() > 0) { - this.stderrDuringParse = stderrVacuum.toString(); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(String tool) { - String[] roots = { "/usr/bin/", "/usr/local/bin/" }; - for (String root : roots) { - if (new File(root + tool).exists()) { - return root + tool; - } - } - return null; - } - - private boolean canExecute(String tool) { - try { - ProcessBuilder builder = new ProcessBuilder(tool, "--version"); - builder.redirectErrorStream(true); - Process process = builder.start(); - StreamVacuum vacuum = new StreamVacuum(process.getInputStream()); - vacuum.start(); - process.waitFor(); - vacuum.join(); - return process.exitValue() == 0; - } - catch (Exception e) { - ; - } - return false; - } - - private String locateNodeJS() { - // typically /usr/local/bin/node - String propName = "antlr-javascript-nodejs"; - String prop = System.getProperty(propName); - - if ( prop!=null && prop.length()!=0 ) { - return prop; - } - if (canExecute("nodejs")) { - return "nodejs"; // nodejs on Debian without node-legacy package - } - return "node"; // everywhere else - } - - private String locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource("JavaScript/src"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find JavaScript runtime"); - } - if(isWindows()){ - return runtimeSrc.getPath().replaceFirst("/", ""); - } - return runtimeSrc.getPath(); - } - - private boolean isWindows() { - return System.getProperty("os.name").toLowerCase().contains("windows"); - } - - // void ambig(List msgs, int[] expectedAmbigAlts, String - // expectedAmbigInput) - // throws Exception - // { - // ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); - // } - - // void ambig(List msgs, int i, int[] expectedAmbigAlts, String - // expectedAmbigInput) - // throws Exception - // { - // List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); - // AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); - // if ( a==null ) assertNull(expectedAmbigAlts); - // else { - // assertEquals(a.conflictingAlts.toString(), - // Arrays.toString(expectedAmbigAlts)); - // } - // assertEquals(expectedAmbigInput, a.input); - // } - - // void unreachable(List msgs, int[] expectedUnreachableAlts) - // throws Exception - // { - // unreachable(msgs, 0, expectedUnreachableAlts); - // } - - // void unreachable(List msgs, int i, int[] - // expectedUnreachableAlts) - // throws Exception - // { - // List amsgs = getMessagesOfType(msgs, - // UnreachableAltsMessage.class); - // UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); - // if ( u==null ) assertNull(expectedUnreachableAlts); - // else { - // assertEquals(u.conflictingAlts.toString(), - // Arrays.toString(expectedUnreachableAlts)); - // } - // } - - List getMessagesOfType(List msgs, - Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if (m.getClass() == c) - filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out - .println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - // System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, - String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if (g.ast != null && !g.ast.hasErrors) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if (g.isLexer()) - factory = new LexerATNFactory((LexerGrammar) g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - // System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start + b.length(), end); - assertEquals(expected, snippet); - } - if (equeue.size() > 0) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) throws Exception { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertNotNull("no error; " + expectedMessage.getErrorType() - + " expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), - Arrays.toString(foundMsg.getArgs())); - if (equeue.size() != 1) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) throws Exception { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertNotNull("no error; " + expectedMessage.getErrorType() - + " expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), - Arrays.toString(foundMsg.getArgs())); - if (equeue.size() != 1) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, ANTLRMessage expectedMessage) - throws Exception { - // System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertTrue("no error; " + expectedMessage.getErrorType() + " expected", - !equeue.errors.isEmpty()); - assertTrue("too many errors; " + equeue.errors, - equeue.errors.size() <= 1); - assertNotNull( - "couldn't find expected error: " - + expectedMessage.getErrorType(), foundMsg); - /* - * assertTrue("error is not a GrammarSemanticsMessage", foundMsg - * instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { - super(src); - } - - Set hide = new HashSet(); - - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if (hide.contains(t.getType())) { - ((WritableToken) t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - ST outputFileST = new ST( - "var antlr4 = require('antlr4');\n" - + "var = require('./');\n" - + "var = require('./');\n" - + "var = require('./').;\n" - + "var = require('./').;\n" - + "\n" - + "function TreeShapeListener() {\n" - + " antlr4.tree.ParseTreeListener.call(this);\n" - + " return this;\n" - + "}\n" - + "\n" - + "TreeShapeListener.prototype = Object.create(antlr4.tree.ParseTreeListener.prototype);\n" - + "TreeShapeListener.prototype.constructor = TreeShapeListener;\n" - + "\n" - + "TreeShapeListener.prototype.enterEveryRule = function(ctx) {\n" - + " for(var i=0;i\\.(input);\n" - + " var stream = new antlr4.CommonTokenStream(lexer);\n" - + "" - + " parser.buildParseTrees = true;\n" - + " printer = function() {\n" - + " this.println = function(s) { console.log(s); }\n" - + " this.print = function(s) { process.stdout.write(s); }\n" - + " return this;\n" - + " };\n" - + " parser.printer = new printer();\n" - + " var tree = parser.();\n" - + " antlr4.tree.ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" - + "}\n" + "\n" + "main(process.argv);\n" + "\n"); - ST createParserST = new ST( - " var parser = new .(stream);\n"); - if (debug) { - createParserST = new ST( - " var parser = new .(stream);\n" - + " parser.addErrorListener(new antlr4.error.DiagnosticErrorListener());\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.js", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "var antlr4 = require('antlr4');\n" - + "var = require('./');\n" - + "\n" - + "function main(argv) {\n" - + " var input = new antlr4.FileStream(argv[2], true);\n" - + " var lexer = new .(input);\n" - + " var stream = new antlr4.CommonTokenStream(lexer);\n" - + " stream.fill();\n" - + " for(var i=0; i\\ 0) - doErase = Boolean.getBoolean(prop); - if (doErase) { - File tmpdirF = new File(tmpdir); - if (tmpdirF.exists()) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - public String getFirstLineOfException() { - if (this.stderrDuringParse == null) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix = "Exception in thread \"main\" "; - return lines[0].substring(prefix.length(), lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable we cannot - * rely on the output order, as the hashing algorithm or other aspects of - * the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the - * Map, which is a bit of a hack, but guarantees that we get the same order - * on all systems. We assume that the keys are strings. - * - * @param m - * The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p = 0; - - public IntTokenStream(IntegerList types) { - this.types = types; - } - - @Override - public void consume() { - p++; - } - - @Override - public int LA(int i) { - return LT(i).getType(); - } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { - return p; - } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if (rawIndex >= types.size()) - t = new CommonToken(Token.EOF); - else - t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public , V> LinkedHashMap sort( - Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeLexers.java deleted file mode 100644 index 92b094574b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeParsers.java deleted file mode 100644 index 9662ab087b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestFullContextParsing.java deleted file mode 100644 index 8978ecddf3..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLeftRecursion.java deleted file mode 100644 index 27182e063c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerErrors.java deleted file mode 100644 index b4ee553ad2..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerExec.java deleted file mode 100644 index 0a9bd715e9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestListeners.java deleted file mode 100644 index d4316f56db..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParseTrees.java deleted file mode 100644 index 6884ac3b1b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserErrors.java deleted file mode 100644 index e3aa097893..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserExec.java deleted file mode 100644 index 3845a1f143..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestPerformance.java deleted file mode 100644 index d053e85f55..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalLexer.java deleted file mode 100644 index 3f591fd619..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalParser.java deleted file mode 100644 index 2dd3496142..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSets.java deleted file mode 100644 index 2d11a0a60b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/BaseSafariTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/BaseSafariTest.java deleted file mode 100644 index dcdc31bf75..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/BaseSafariTest.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/* see https://code.google.com/p/selenium/wiki/SafariDriver for instructions */ -public class BaseSafariTest extends BaseBrowserTest { - - @BeforeClass - public static void initWebDriver() { - driver = SharedWebDriver.init(); - } - - @AfterClass - public static void closeWebDriver() { - SharedWebDriver.close(); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/SharedWebDriver.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/SharedWebDriver.java deleted file mode 100644 index d0efb37dc6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/SharedWebDriver.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.openqa.selenium.WebDriver; -import org.openqa.selenium.safari.SafariDriver; - -import java.util.Timer; -import java.util.TimerTask; - -public class SharedWebDriver { - - static WebDriver driver; - static Timer timer; - - public static WebDriver init() { - if(driver==null) { - System.setProperty("webdriver.safari.noinstall", "true"); - driver = new SafariDriver(); - } else if(timer!=null) { - timer.cancel(); - timer = null; - } - - return driver; - } - - public static void close() { - if(driver!=null) { - if(timer!=null) { - timer.cancel(); - timer = null; - } - timer = new Timer(); - timer.schedule(new TimerTask() { - @Override public void run() { - driver.quit(); - driver = null; - } - }, 2000); // close with delay to allow next Test to start - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeLexers.java deleted file mode 100644 index b426850d2a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeParsers.java deleted file mode 100644 index 7bfe1310f0..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestFullContextParsing.java deleted file mode 100644 index ac0842702f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLeftRecursion.java deleted file mode 100644 index 526254436f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerErrors.java deleted file mode 100644 index 3e3f352fa1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerExec.java deleted file mode 100644 index da29e2de78..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestListeners.java deleted file mode 100644 index 6d8c0e11b7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParseTrees.java deleted file mode 100644 index 3e75cf015c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserErrors.java deleted file mode 100644 index 52346a9396..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserExec.java deleted file mode 100644 index 75a06c1cbc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestPerformance.java deleted file mode 100644 index 1a09a64c71..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalLexer.java deleted file mode 100644 index 24d1e7bfe9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalParser.java deleted file mode 100644 index 3387a6d496..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSets.java deleted file mode 100644 index b3cd0237f9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/BasePHPTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/BasePHPTest.java deleted file mode 100644 index 8f2cc5bcc9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/BasePHPTest.java +++ /dev/null @@ -1,599 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.antlr.v4.Tool; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.stringtemplate.v4.ST; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class BasePHPTest implements RuntimeTestSupport { - public static final String newline = System.getProperty("line.separator"); - - public String tmpdir = null; - - /** - * If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** - * Errors found while running antlr - */ - protected StringBuilder antlrToolErrors; - - private String getPropertyPrefix() { - return "antlr-php"; - } - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String propName = getPropertyPrefix() + "-test-dir"; - String prop = System.getProperty(propName); - - if (prop != null && prop.length() > 0) { - tmpdir = prop; - } else { - String classSimpleName = getClass().getSimpleName(); - String threadName = Thread.currentThread().getName(); - String childPath = String.format("%s-%s-%s", classSimpleName, threadName, System.currentTimeMillis()); - tmpdir = new File(System.getProperty("java.io.tmpdir"), childPath).getAbsolutePath(); - } - - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if (antlrToolErrors.length() == 0) { - return null; - } - - return antlrToolErrors.toString(); - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if (g.atn == null) { - semanticProcess(g); - - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - - if (g.isLexer()) { - f = new LexerATNFactory((LexerGrammar) g); - } else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if (g.ast != null && !g.ast.hasErrors) { - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - if (g.getImportedGrammars() != null) { - for (Grammar imp: g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - protected String execLexer( - String grammarFileName, - String grammarStr, - String lexerName, - String input - ) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer( - String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA - ) { - boolean success = rawGenerateAndBuildRecognizer( - grammarFileName, - grammarStr, - null, - lexerName, - "-no-listener" - ); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.php"); - - return output; - } - - public String execParser( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors - ) { - return execParser_( - grammarFileName, - grammarStr, - parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - input, - showDiagnosticErrors, - false - ); - } - - public String execParser_( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean debug, - boolean trace - ) { - boolean success = rawGenerateAndBuildRecognizer( - grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor" - ); - - assertTrue(success); - - writeFile(tmpdir, "input", input); - - rawBuildRecognizerTestFile( - parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - debug, - trace - ); - - return execRecognizer(); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateAndBuildRecognizer( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions - ) { - return rawGenerateAndBuildRecognizer( - grammarFileName, - grammarStr, - parserName, - lexerName, - false, - extraOptions - ); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateAndBuildRecognizer( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions - ) { - ErrorQueue equeue = antlrOnString(getTmpDir(), "PHP", grammarFileName, grammarStr, defaultListener, extraOptions); - - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - - if (lexerName != null) { - files.add(lexerName + ".php"); - } - - if (parserName != null) { - files.add(parserName + ".php"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.')) + "Listener.php"); - } - - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.')) + "Visitor.php"); - } - } - - return true; - } - - protected void rawBuildRecognizerTestFile( - String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean trace - ) { - this.stderrDuringParse = null; - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } else { - writeParserTestFile( - parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, - trace - ); - } - } - - public String execRecognizer() { - return execModule("Test.php"); - } - - public String execModule(String fileName) { - String phpPath = locatePhp(); - String runtimePath = locateRuntime(); - - File tmpdirFile = new File(tmpdir); - String modulePath = new File(tmpdirFile, fileName).getAbsolutePath(); - String inputPath = new File(tmpdirFile, "input").getAbsolutePath(); - Path outputPath = tmpdirFile.toPath().resolve("output").toAbsolutePath(); - - try { - ProcessBuilder builder = new ProcessBuilder(phpPath, modulePath, inputPath, outputPath.toString()); - builder.environment().put("RUNTIME", runtimePath); - builder.directory(tmpdirFile); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - - if (output.length() == 0) { - output = null; - } - - if (stderrVacuum.toString().length() > 0) { - this.stderrDuringParse = stderrVacuum.toString(); - } - - return output; - } catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(String tool) { - final String phpPath = System.getProperty("PHP_PATH"); - - if (phpPath != null && new File(phpPath).exists()) { - return phpPath; - } - - String[] roots = {"/usr/local/bin/", "/opt/local/bin", "/usr/bin/"}; - - for (String root: roots) { - if (new File(root + tool).exists()) { - return root + tool; - } - } - - throw new RuntimeException("Could not locate " + tool); - } - - protected String locatePhp() { - String propName = getPropertyPrefix() + "-php"; - String prop = System.getProperty(propName); - - if (prop == null || prop.length() == 0) { - prop = locateTool("php"); - } - - File file = new File(prop); - - if (!file.exists()) { - throw new RuntimeException("Missing system property:" + propName); - } - - return file.getAbsolutePath(); - } - - protected String locateRuntime() { - String propName = "antlr-php-runtime"; - String prop = System.getProperty(propName); - - if (prop == null || prop.length() == 0) { - prop = "../runtime/PHP"; - } - - File file = new File(prop); - - if (!file.exists()) { - throw new RuntimeException("Missing system property:" + propName); - } - - try { - return file.getCanonicalPath(); - } catch (IOException e) { - return file.getAbsolutePath(); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "\\($input);\n" - + "$lexer->addErrorListener(new ConsoleErrorListener());" - + "$tokens = new CommonTokenStream($lexer);\n" - + "$tokens->fill();\n" - + "\n" - + "foreach ($tokens->getAllTokens() as $token) {\n" - + " echo $token . \\PHP_EOL;\n" - + "}" - + (showDFA - ? "echo $lexer->getInterpreter()->getDFA(Lexer::DEFAULT_MODE)->toLexerString();\n" - : "") - ); - - outputFileST.add("lexerName", lexerName); - - writeFile(tmpdir, "Test.php", outputFileST.render()); - } - - protected void writeParserTestFile( - String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace - ) { - if (!parserStartRuleName.endsWith(")")) { - parserStartRuleName += "()"; - } - ST outputFileST = new ST( - "\\getChildCount(); $i \\< $count; $i++) {\n" - + " $parent = $ctx->getChild($i)->getParent();\n" - + "\n" - + " if (!($parent instanceof RuleNode) || $parent->getRuleContext() !== $ctx) {\n" - + " throw new RuntimeException('Invalid parse tree shape detected.');\n" - + " }\n" - + " }\n" - + " }\n" - + "}" - + "\n" - + "$input = InputStream::fromPath($argv[1]);\n" - + "$lexer = new ($input);\n" - + "$lexer->addErrorListener(new ConsoleErrorListener());" - + "$tokens = new CommonTokenStream($lexer);\n" - + "" - + "$parser->addErrorListener(new ConsoleErrorListener());" - + "$parser->setBuildParseTree(true);\n" - + "$tree = $parser->;\n\n" - + "ParseTreeWalker::default()->walk(new TreeShapeListener(), $tree);\n" - ); - - String stSource = "$parser = new ($tokens);\n"; - - if (debug) { - stSource += "$parser->addErrorListener(new DiagnosticErrorListener());\n"; - } - - if (trace) { - stSource += "$parser->setTrace(true);\n"; - } - - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - - writeFile(tmpdir, "Test.php", outputFileST.render()); - } - - protected void eraseFiles(File dir) { - String[] files = dir.list(); - for (int i = 0; files != null && i < files.length; i++) { - new File(dir, files[i]).delete(); - } - } - - @Override - public void eraseTempDir() { - boolean doErase = true; - String propName = getPropertyPrefix() + "-erase-test-dir"; - String prop = System.getProperty(propName); - if (prop != null && prop.length() > 0) { - doErase = Boolean.getBoolean(prop); - } - if (doErase) { - File tmpdirF = new File(tmpdir); - if (tmpdirF.exists()) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - /** - * Sort a list - */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** - * Return map sorted by key - */ - public , V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k: keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PHPRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PHPRunner.java new file mode 100644 index 0000000000..94655dc052 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PHPRunner.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.php; + +import java.util.HashMap; +import java.util.Map; + +import org.antlr.v4.test.runtime.*; + +public class PHPRunner extends RuntimeRunner { + private static final Map environment; + + static { + environment = new HashMap<>(); + environment.put("RUNTIME", getRuntimePath("PHP")); + } + + @Override + public String getLanguage() { + return "PHP"; + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PhpRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PhpRuntimeTests.java new file mode 100644 index 0000000000..9de0273bb9 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PhpRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.php; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class PhpRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new PHPRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeLexers.java deleted file mode 100644 index d1d353875d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeParsers.java deleted file mode 100644 index dd5b0015a7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestFullContextParsing.java deleted file mode 100644 index 60efc07184..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLeftRecursion.java deleted file mode 100644 index cb200ef38a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerErrors.java deleted file mode 100644 index cd7a5c5968..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerExec.java deleted file mode 100644 index 03595f564e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestListeners.java deleted file mode 100644 index 52260158d5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParseTrees.java deleted file mode 100644 index 656e14d71e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserErrors.java deleted file mode 100644 index ac3ab88ef6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserExec.java deleted file mode 100644 index 01e3f321ca..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestPerformance.java deleted file mode 100644 index 7459d77d82..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalLexer.java deleted file mode 100644 index ec7f14efc8..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalParser.java deleted file mode 100644 index 1441444c53..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSets.java deleted file mode 100644 index 9960453614..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python/BasePythonTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python/BasePythonTest.java deleted file mode 100644 index 06cbfa1afc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python/BasePythonTest.java +++ /dev/null @@ -1,961 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.python; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.DecisionState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.test.runtime.TestOutputReading; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URL; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public abstract class BasePythonTest implements RuntimeTestSupport { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - // private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public String tmpdir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - @org.junit.Rule - public final TestRule testWatcher = new TestWatcher() { - - @Override - protected void succeeded(Description description) { - // remove tmpdir if no error. - eraseTempPyCache(); - eraseTempDir(); - } - - }; - - private String getPropertyPrefix() { - return "antlr-" + getLanguage().toLowerCase(); - } - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String propName = getPropertyPrefix() + "-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) { - tmpdir = prop; - } - else { - tmpdir = new File(System.getProperty("java.io.tmpdir"), getClass().getSimpleName()+ - "-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()).getAbsolutePath(); - } - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if ( g.isLexer() ) { - f = new LexerATNFactory((LexerGrammar)g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - public DFA createDFA(Grammar g, DecisionState s) { -// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); -// DFA dfa = conv.createDFA(); -// conv.issueAmbiguityWarnings(); -// System.out.print("DFA="+dfa); -// return dfa; - return null; - } - -// public void minimizeDFA(DFA dfa) { -// DFAMinimizer dmin = new DFAMinimizer(dfa); -// dfa.minimized = dmin.minimize(); -// } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if ( expecting!=null && !expecting.trim().isEmpty() ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) - { - LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype == Token.EOF ) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if ( t== IntStream.EOF ) { - hitEOF = true; - } - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - List checkRuleDFA(String gtext, String ruleName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; - if ( s==null ) { - System.err.println("no such rule: "+ruleName); - return null; - } - ATNState t = s.transition(0).target; - if ( !(t instanceof DecisionState) ) { - System.out.println(ruleName+" has no decision"); - return null; - } - DecisionState blk = (DecisionState)t; - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - List checkRuleDFA(String gtext, int decision, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - DecisionState blk = atn.decisionToState.get(decision); - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - void checkRuleDFA(Grammar g, DecisionState blk, String expecting) - throws Exception - { - DFA dfa = createDFA(g, blk); - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - } - - List checkLexerDFA(String gtext, String expecting) - throws Exception - { - return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); - } - - List checkLexerDFA(String gtext, String modeName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - LexerGrammar g = new LexerGrammar(gtext, equeue); - g.atn = createATN(g, false); -// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); -// DFA dfa = conv.createDFA(modeName); -// g.setLookaheadDFA(0, dfa); // only one decision to worry about -// -// String result = null; -// if ( dfa!=null ) result = dfa.toString(); -// assertEquals(expecting, result); -// -// return equeue.all; - return null; - } - - protected abstract String getLanguage(); - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) - { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName,"-no-listener"); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.py"); - return output; - } - - public ParseTree execStartRule(String startRuleName, Parser parser) - throws IllegalAccessException, InvocationTargetException, - NoSuchMethodException - { - Method startRule = null; - Object[] args = null; - try { - startRule = parser.getClass().getMethod(startRuleName); - } - catch (NoSuchMethodException nsme) { - // try with int _p arg for recursive func - startRule = parser.getClass().getMethod(startRuleName, int.class); - args = new Integer[] {0}; - } - ParseTree result = (ParseTree)startRule.invoke(parser, args); -// System.out.println("parse tree = "+result.toStringTree(parser)); - return result; - } - - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) { - return execParser_(grammarFileName, grammarStr, parserName, lexerName, - listenerName, visitorName, startRuleName, input, showDiagnosticErrors, false); - } - - public String execParser_(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean debug, - boolean trace) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - rawBuildRecognizerTestFile(parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - debug, - trace); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - antlrOnString(getTmpDir(), getLanguage(), grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".py"); - } - if ( parserName!=null ) { - files.add(parserName+".py"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.py"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.py"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean trace) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, trace); - } - } - - public String execRecognizer() { - return execModule("Test.py"); - } - - public String execModule(String fileName) { - String pythonPath = locatePython(); - String runtimePath = locateRuntime(); - File tmpdirFile = new File(tmpdir); - String modulePath = new File(tmpdirFile, fileName).getAbsolutePath(); - String inputPath = new File(tmpdirFile, "input").getAbsolutePath(); - Path outputPath = tmpdirFile.toPath().resolve("output").toAbsolutePath(); - try { - ProcessBuilder builder = new ProcessBuilder( pythonPath, modulePath, inputPath, outputPath.toString() ); - builder.environment().put("PYTHONPATH",runtimePath); - builder.environment().put("PYTHONIOENCODING", "utf-8"); - builder.directory(tmpdirFile); - Process process = builder.start(); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stderrVacuum.start(); - process.waitFor(); - stderrVacuum.join(); - String output = TestOutputReading.read(outputPath); - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(String tool) { - String[] roots = { - "/opt/local/bin", "/usr/bin/", "/usr/local/bin/", - "/Users/"+System.getProperty("user.name")+"/anaconda3/bin/" - }; - for(String root : roots) { - if(new File(root + tool).exists()) { - return root+tool; - } - } - throw new RuntimeException("Could not locate " + tool); - } - - protected String locatePython() { - String propName = getPropertyPrefix() + "-python"; - String prop = System.getProperty(propName); - if(prop==null || prop.length()==0) - prop = locateTool(getPythonExecutable()); - File file = new File(prop); - if(!file.exists()) - throw new RuntimeException("Missing system property:" + propName); - return file.getAbsolutePath(); - } - - protected abstract String getPythonExecutable(); - - protected String locateRuntime() { return locateRuntime(getLanguage()); } - - protected String locateRuntime(String targetName) { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource(targetName+"/src"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find "+targetName+" runtime"); - } - if(isWindows()){ - return runtimeSrc.getPath().replaceFirst("/", ""); - } - return runtimeSrc.getPath(); - } - - private boolean isWindows() { - return System.getProperty("os.name").toLowerCase().contains("windows"); - } - -// void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) -// throws Exception -// { -// ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); -// } - -// void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) -// throws Exception -// { -// List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); -// AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); -// if ( a==null ) assertNull(expectedAmbigAlts); -// else { -// assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); -// } -// assertEquals(expectedAmbigInput, a.input); -// } - -// void unreachable(List msgs, int[] expectedUnreachableAlts) -// throws Exception -// { -// unreachable(msgs, 0, expectedUnreachableAlts); -// } - -// void unreachable(List msgs, int i, int[] expectedUnreachableAlts) -// throws Exception -// { -// List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); -// UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); -// if ( u==null ) assertNull(expectedUnreachableAlts); -// else { -// assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); -// } -// } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, - ANTLRMessage expectedMessage) - throws Exception - { - //System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); - assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); - assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); - /* - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected abstract void writeParserTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean setTrace); - - - - protected abstract void writeLexerTestFile(String lexerName, boolean showDFA); - - public void writeRecognizer(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if ( parserName==null ) { - writeLexerTestFile(lexerName, debug); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, - trace); - } - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles(File dir) { - String[] files = dir.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(dir,files[i]).delete(); - } - } - - @Override - public void eraseTempDir() { - boolean doErase = true; - String propName = getPropertyPrefix() + "-erase-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) - doErase = Boolean.getBoolean(prop); - if(doErase) { - File tmpdirF = new File(tmpdir); - if ( tmpdirF.exists() ) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - protected void eraseTempPyCache() { - File tmpdirF = new File(tmpdir+"/__pycache__"); - if ( tmpdirF.exists() ) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p=0; - public IntTokenStream(IntegerList types) { this.types = types; } - - @Override - public void consume() { p++; } - - @Override - public int LA(int i) { return LT(i).getType(); } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { return p; } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); - else t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python/PythonRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python/PythonRunner.java new file mode 100644 index 0000000000..90085af5a7 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python/PythonRunner.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.python; + +import org.antlr.v4.test.runtime.*; +import org.stringtemplate.v4.ST; + +public abstract class PythonRunner extends RuntimeRunner { + @Override + public String getExtension() { return "py"; } + + @Override + protected void addExtraRecognizerParameters(ST template) { + template.add("python3", getLanguage().equals("Python3")); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/BasePython2Test.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/BasePython2Test.java deleted file mode 100644 index aa0eea7e2e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/BasePython2Test.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.python.BasePythonTest; -import org.stringtemplate.v4.ST; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; - -public class BasePython2Test extends BasePythonTest { - - @Override - protected String getLanguage() { - return "Python2"; - } - - @Override - protected String getPythonExecutable() { - return "python2.7"; - } - - @Override - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "from __future__ import print_function\n" - + "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + " stream.fill()\n" - + " [ print(unicode(t), file=output) for t in stream.tokens ]\n" - + (showDFA ? " print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='', file=output)\n" - : "") + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.py", outputFileST.render()); - } - - @Override - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if(!parserStartRuleName.endsWith(")")) - parserStartRuleName += "()"; - ST outputFileST = new ST( - "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "from import \n" - + "from import \n" - + "from import \n" - + "\n" - + "class TreeShapeListener(ParseTreeListener):\n" - + "\n" - + " def visitTerminal(self, node):\n" - + " pass\n" - + "\n" - + " def visitErrorNode(self, node):\n" - + " pass\n" - + "\n" - + " def exitEveryRule(self, ctx):\n" - + " pass\n" - + "\n" - + " def enterEveryRule(self, ctx):\n" - + " for child in ctx.getChildren():\n" - + " parent = child.parentCtx\n" - + " if not isinstance(parent, RuleNode) or parent.getRuleContext() != ctx:\n" - + " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + "" - + " parser.buildParseTrees = True\n" - + " tree = parser.\n" - + " ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree)\n" - + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - String stSource = " parser = (stream, output)\n"; - if(debug) - stSource += " parser.addErrorListener(DiagnosticErrorListener())\n"; - if(trace) - stSource += " parser.setTrace(True)\n"; - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.py", outputFileST.render()); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeLexers.java deleted file mode 100644 index c68c0f579c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeParsers.java deleted file mode 100644 index 489249c23c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestFullContextParsing.java deleted file mode 100644 index 05eb6038f2..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLeftRecursion.java deleted file mode 100644 index 7685599d2e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerErrors.java deleted file mode 100644 index 3708c6ff8e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerExec.java deleted file mode 100644 index d99851ccea..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestListeners.java deleted file mode 100644 index 0cbcbcafaf..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParseTrees.java deleted file mode 100644 index 311f85cd79..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserErrors.java deleted file mode 100644 index d405643dde..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserExec.java deleted file mode 100644 index 95c41f4c93..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestPerformance.java deleted file mode 100644 index 7248041e8e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalLexer.java deleted file mode 100644 index 96fbe6c2b6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalParser.java deleted file mode 100644 index a0bc6d0348..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSets.java deleted file mode 100644 index 8f569648df..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java deleted file mode 100644 index 44b0c12941..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.python.BasePythonTest; -import org.stringtemplate.v4.ST; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; - -public class BasePython3Test extends BasePythonTest { - - @Override - protected String getLanguage() { - return "Python3"; - } - - @Override - protected String getPythonExecutable() { - return "python3.7"; - } // force 3.7 - - @Override - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + " stream.fill()\n" - + " [ print(t, file=output) for t in stream.tokens ]\n" - + (showDFA ? " print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='', file=output)\n" - : "") + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.py", outputFileST.render()); - } - - @Override - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if(!parserStartRuleName.endsWith(")")) - parserStartRuleName += "()"; - ST outputFileST = new ST( - "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "from import \n" - + "from import \n" - + "from import \n" - + "\n" - + "class TreeShapeListener(ParseTreeListener):\n" - + "\n" - + " def visitTerminal(self, node:TerminalNode):\n" - + " pass\n" - + "\n" - + " def visitErrorNode(self, node:ErrorNode):\n" - + " pass\n" - + "\n" - + " def exitEveryRule(self, ctx:ParserRuleContext):\n" - + " pass\n" - + "\n" - + " def enterEveryRule(self, ctx:ParserRuleContext):\n" - + " for child in ctx.getChildren():\n" - + " parent = child.parentCtx\n" - + " if not isinstance(parent, RuleNode) or parent.getRuleContext() != ctx:\n" - + " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + "" - + " parser.buildParseTrees = True\n" - + " tree = parser.\n" - + " ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree)\n" - + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - String stSource = " parser = (stream, output)\n"; - if (debug) - stSource += " parser.addErrorListener(DiagnosticErrorListener())\n"; - if (trace) - stSource += " parser.setTrace(True)\n"; - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.py", outputFileST.render()); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3Runner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3Runner.java new file mode 100644 index 0000000000..6e11344a45 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3Runner.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.python3; + +import org.antlr.v4.test.runtime.python.PythonRunner; + +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; + +public class Python3Runner extends PythonRunner { + public final static Map environment; + + static { + environment = new HashMap<>(); + environment.put("PYTHONPATH", Paths.get(getRuntimePath("Python3"), "src").toString()); + environment.put("PYTHONIOENCODING", "utf-8"); + } + + @Override + public String getLanguage() { + return "Python3"; + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3RuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3RuntimeTests.java new file mode 100644 index 0000000000..881b37fbcb --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3RuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.python3; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class Python3RuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new Python3Runner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeLexers.java deleted file mode 100644 index e9511e7423..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeParsers.java deleted file mode 100644 index d7f51a5e18..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestFullContextParsing.java deleted file mode 100644 index 4fb28eea08..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLeftRecursion.java deleted file mode 100644 index 75c1c575f9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerErrors.java deleted file mode 100644 index 285174171d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerExec.java deleted file mode 100644 index c612164a20..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestListeners.java deleted file mode 100644 index 6264fa7dd7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParseTrees.java deleted file mode 100644 index 5e62ea93ba..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserErrors.java deleted file mode 100644 index 4920c9b8d6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserExec.java deleted file mode 100644 index f041443e40..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestPerformance.java deleted file mode 100644 index 961d98b06d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalLexer.java deleted file mode 100644 index 3f0c659535..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalParser.java deleted file mode 100644 index a931fd2b96..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSets.java deleted file mode 100644 index 1a3ac2641c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/BaseRustTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/BaseRustTest.java deleted file mode 120000 index b65b090406..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/BaseRustTest.java +++ /dev/null @@ -1 +0,0 @@ -../../../../../../../../runtime/Rust/templates/BaseRustTest.java \ No newline at end of file diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/RustRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/RustRunner.java new file mode 100644 index 0000000000..48fb1c26a3 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/RustRunner.java @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.rust; + +import org.antlr.v4.test.runtime.Processor; +import org.antlr.v4.test.runtime.ProcessorResult; +import org.antlr.v4.test.runtime.RunOptions; +import org.antlr.v4.test.runtime.RuntimeRunner; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class RustRunner extends RuntimeRunner { + + public static final String CARGO_TOML = "Cargo.toml"; + + @Override + public String getLanguage() { + return "Rust"; + } + + @Override + public String getLexerSuffix() { + return "_lexer"; + } + + @Override + public String getParserSuffix() { + return "_parser"; + } + + @Override + public String getBaseListenerSuffix() { + return "_base_listener"; + } + + @Override + public String getListenerSuffix() { + return "_listener"; + } + + @Override + public String getBaseVisitorSuffix() { + return "_base_visitor"; + } + + @Override + public String getVisitorSuffix() { + return "_visitor"; + } + + @Override + protected String grammarNameToFileName(String grammarName) { + return grammarName.toLowerCase(); + } + + private final static Map environment; + + private static String cachedCargo; + + private static String libName; + + private static String cargoNativePath; + + static { + environment = new HashMap<>(); + } + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + String cachePath = getCachePath(); + Path runtimeFilesPath = Paths.get(getRuntimePath("Rust")); + String runtimeToolPath = "cargo"; + String runtimePath = runtimeFilesPath.toString(); + + Processor.run(new String[]{runtimeToolPath, + "clean", "--target-dir", cachePath}, runtimePath); + ProcessorResult result = Processor.run(new String[]{runtimeToolPath, + "build", "--target-dir", cachePath, "-v"}, runtimePath); + libName = findLibName(cachePath); + String tomlPath = cachePath + File.separator + "toml"; + mkdir(tomlPath); + Processor.run(new String[]{runtimeToolPath, "init"}, tomlPath); + Processor.run(new String[]{runtimeToolPath, "add", "--path", runtimePath}, tomlPath); + cachedCargo = readFile(tomlPath + FileSeparator, CARGO_TOML); + + } + + public static String findLibName(String cachePath) { + List fileNames = new ArrayList<>(); + File dir = new File(cachePath + "/debug/deps"); + + FilenameFilter filter = new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + File file = new File(dir, name); + if (!file.isFile()) { + return false; + } + return name.startsWith("libantlr4rust-") && name.endsWith(".rlib"); + } + }; + + File[] files = dir.listFiles(filter); + if (files != null) { + for (File file : files) { + return file.getName(); + } + } + return null; + } + + @Override + protected List getTargetToolOptions(RunOptions ro) { + ArrayList options = new ArrayList<>(); + options.add("-o"); + options.add(tempTestDir.resolve("src").toString()); + return options; + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + writeFile(getTempDirPath(), CARGO_TOML, cachedCargo); + + String depsPath = getCachePath() + File.separator + "debug" + File.separator + "deps"; + Exception ex = null; + try { + String[] arguments = {"rustc", + "--crate-name", "Rust", "--edition=2024", "src" + File.separator + "main.rs", "--out-dir", + "target" + File.separator + "debug", + "-L", "dependency=" + depsPath, "--extern", "antlr4rust=" + depsPath + File.separator + libName}; + Processor.run(arguments, getTempDirPath(), environment); + } catch (InterruptedException | IOException e) { + ex = e; + } + + return new CompiledState(generatedState, ex); + } + + @Override + public Map getExecEnvironment() { + return environment; + } + + @Override + protected String getRuntimeToolName() { + return null; + } + + @Override + protected String getExtension() { + return "rs"; + } + + @Override + protected String getTestFileName() { + return "src/main"; + } + + @Override + protected String getExecFileName() { + return Paths.get(getTempDirPath(), "target/debug/Rust").toString(); + } + +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/RustRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/RustRuntimeTests.java new file mode 100644 index 0000000000..cc87cc6d74 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/RustRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.rust; + +import org.antlr.v4.test.runtime.RuntimeRunner; +import org.antlr.v4.test.runtime.RuntimeTests; + +public class RustRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new RustRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestCompositeLexers.java deleted file mode 100644 index b4afcae6d1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestCompositeParsers.java deleted file mode 100644 index bdf55028f4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestFullContextParsing.java deleted file mode 100644 index a6ff193914..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLeftRecursion.java deleted file mode 100644 index 230d10d45a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLexerErrors.java deleted file mode 100644 index 3d46210464..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLexerExec.java deleted file mode 100644 index 6ab4ab1acd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestListeners.java deleted file mode 100644 index 1318d7799c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParseTrees.java deleted file mode 100644 index b36cef9186..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParserErrors.java deleted file mode 100644 index 5ab7d8d7c5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParserExec.java deleted file mode 100644 index 7adf9ecb4e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestPerformance.java deleted file mode 100644 index 475910665d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestPerformance.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - ((BaseRustTest) this.delegate).cargo_options = "--release"; - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSemPredEvalLexer.java deleted file mode 100644 index 8b11dc1edd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSemPredEvalParser.java deleted file mode 100644 index 16619de1f5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSets.java deleted file mode 100644 index 774c3ad12f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/rust/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.rust; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseRustTest()); - } - - @Parameterized.Parameters(name = "{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Rust"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/CompiledState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/CompiledState.java new file mode 100644 index 0000000000..bea78b36c1 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/CompiledState.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.Stage; + +public class CompiledState extends State { + @Override + public Stage getStage() { + return Stage.Compile; + } + + public CompiledState(GeneratedState previousState, Exception exception) { + super(previousState, exception); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/ExecutedState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/ExecutedState.java new file mode 100644 index 0000000000..af693fed92 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/ExecutedState.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.Stage; + +public class ExecutedState extends State { + @Override + public Stage getStage() { + return Stage.Execute; + } + + public final String output; + + public final String errors; + + public ExecutedState(CompiledState previousState, String output, String errors, Exception exception) { + super(previousState, exception); + this.output = output != null ? output : ""; + this.errors = errors != null ? errors : ""; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/GeneratedState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/GeneratedState.java new file mode 100644 index 0000000000..faecec5c62 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/GeneratedState.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.ErrorQueue; +import org.antlr.v4.test.runtime.GeneratedFile; +import org.antlr.v4.test.runtime.Stage; + +import java.util.List; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; + +public class GeneratedState extends State { + @Override + public Stage getStage() { + return Stage.Generate; + } + + public final ErrorQueue errorQueue; + public final List generatedFiles; + + @Override + public boolean containsErrors() { + return errorQueue.errors.size() > 0 || super.containsErrors(); + } + + public String getErrorMessage() { + String result = super.getErrorMessage(); + + if (errorQueue.errors.size() > 0) { + result = joinLines(result, errorQueue.toString(true)); + } + + return result; + } + + public GeneratedState(ErrorQueue errorQueue, List generatedFiles, Exception exception) { + super(null, exception); + this.errorQueue = errorQueue; + this.generatedFiles = generatedFiles; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaCompiledState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaCompiledState.java new file mode 100644 index 0000000000..a6f68dcc8f --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaCompiledState.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.test.runtime.RuntimeRunner; + +import java.lang.reflect.InvocationTargetException; + +public class JavaCompiledState extends CompiledState { + public final ClassLoader loader; + public final Class lexer; + public final Class parser; + + public JavaCompiledState(GeneratedState previousState, + ClassLoader loader, + Class lexer, + Class parser, + Exception exception + ) { + super(previousState, exception); + this.loader = loader; + this.lexer = lexer; + this.parser = parser; + } + + public Pair initializeDummyLexerAndParser() + throws InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException { + return initializeLexerAndParser(""); + } + + public Pair initializeLexerAndParser(String input) + throws NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException { + Lexer lexer = initializeLexer(input); + Parser parser = initializeParser(new CommonTokenStream(lexer)); + return new Pair<>(lexer, parser); + } + + public Lexer initializeLexer(String input) + throws NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException { + CharStream inputString = CharStreams.fromString(input, RuntimeRunner.InputFileName); + return lexer.getConstructor(CharStream.class).newInstance(inputString); + } + + public Parser initializeParser(CommonTokenStream tokenStream) + throws NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException { + return parser.getConstructor(TokenStream.class).newInstance(tokenStream); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaExecutedState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaExecutedState.java new file mode 100644 index 0000000000..57431ccf60 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaExecutedState.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.runtime.tree.ParseTree; + +public class JavaExecutedState extends ExecutedState { + public final ParseTree parseTree; + + public JavaExecutedState(JavaCompiledState previousState, String output, String errors, ParseTree parseTree, + Exception exception) { + super(previousState, output, errors, exception); + this.parseTree = parseTree; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/State.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/State.java new file mode 100644 index 0000000000..eda832a690 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/State.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.Stage; + +public abstract class State { + public final State previousState; + + public final Exception exception; + + public abstract Stage getStage(); + + public boolean containsErrors() { + return exception != null; + } + + public String getErrorMessage() { + String result = "State: " + getStage() + "; "; + if (exception != null) { + result += exception.toString(); + if ( exception.getCause()!=null ) { + result += "\nCause:\n"; + result += exception.getCause().toString(); + } + } + return result; + } + + public State(State previousState, Exception exception) { + this.previousState = previousState; + this.exception = exception; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java deleted file mode 100644 index 2e71deb9b4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.stringtemplate.v4.ST; - -import java.io.File; -import java.io.IOException; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.mkdir; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertTrue; - -public class BaseSwiftTest implements RuntimeTestSupport { - - /** - * Path of the ANTLR runtime. - */ - private static String ANTLR_RUNTIME_PATH; - - /** - * Absolute path to swift command. - */ - private static String SWIFT_CMD; - - /** - * Environment variable name for swift home. - */ - private static final String SWIFT_HOME_ENV_KEY = "SWIFT_HOME"; - - static { - Map env = System.getenv(); - String swiftHome = env.containsKey(SWIFT_HOME_ENV_KEY) ? env.get(SWIFT_HOME_ENV_KEY) : ""; - SWIFT_CMD = swiftHome + "swift"; - - ClassLoader loader = Thread.currentThread().getContextClassLoader(); - // build swift runtime - URL swiftRuntime = loader.getResource("Swift"); - if (swiftRuntime == null) { - throw new RuntimeException("Swift runtime file not found at:" + swiftRuntime.getPath()); - } - ANTLR_RUNTIME_PATH = swiftRuntime.getPath(); - try { - fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "build"); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - throw new RuntimeException(e); - } - - // shutdown logic - Runtime.getRuntime().addShutdownHook(new Thread() { - public void run() { - try { - fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "package", "clean"); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - } - } - }); - } - - public String tmpdir = null; - - /** - * If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - private String stderrDuringParse; - - /** - * Errors found while running antlr - */ - private StringBuilder antlrToolErrors; - - /** - * Source files used in each small swift project. - */ - private Set sourceFiles = new HashSet<>(); - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String propName = "antlr-swift-test-dir"; - String prop = System.getProperty(propName); - if (prop != null && prop.length() > 0) { - tmpdir = prop; - } - else { - String classSimpleName = getClass().getSimpleName(); - String threadName = Thread.currentThread().getName(); - String childPath = String.format("%s-%s-%s", classSimpleName, threadName, System.currentTimeMillis()); - tmpdir = new File(System.getProperty("java.io.tmpdir"), childPath).getAbsolutePath(); - } - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public void eraseTempDir() { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if (antlrToolErrors.length() == 0) { - return null; - } - return antlrToolErrors.toString(); - } - - @Override - public String execLexer(String grammarFileName, String grammarStr, String lexerName, String input, boolean showDFA) { - generateParser(grammarFileName, - grammarStr, - null, - lexerName); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - addSourceFiles("main.swift"); - - String projectName = "testcase-" + System.currentTimeMillis(); - String projectDir = getTmpDir() + "/" + projectName; - try { - buildProject(projectDir, projectName); - return execTest(projectDir, projectName); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - return null; - } - } - - @Override - public String execParser(String grammarFileName, String grammarStr, String parserName, String lexerName, String listenerName, String visitorName, String startRuleName, String input, boolean showDiagnosticErrors) { - generateParser(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - writeFile(getTmpDir(), "input", input); - return execParser(parserName, - lexerName, - startRuleName, - showDiagnosticErrors,false); - } - - private String execTest(String projectDir, String projectName) { - try { - Pair output = runProcess(projectDir, "./.build/debug/" + projectName, "input"); - if (output.b.length() > 0) { - stderrDuringParse = output.b; - } - String stdout = output.a; - return stdout.length() > 0 ? stdout : null; - } - catch (Exception e) { - System.err.println("Execution of testcase failed."); - e.printStackTrace(System.err); - } - return null; - } - - private void addSourceFiles(String... files) { - Collections.addAll(this.sourceFiles, files); - } - - private void buildProject(String projectDir, String projectName) throws IOException, InterruptedException { - mkdir(projectDir); - fastFailRunProcess(projectDir, SWIFT_CMD, "package", "init", "--type", "executable"); - for (String sourceFile: sourceFiles) { - String absPath = getTmpDir() + "/" + sourceFile; - fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/" + projectName); - } - fastFailRunProcess(getTmpDir(), "mv", "-f", "input", projectDir); - String dylibPath = ANTLR_RUNTIME_PATH + "/.build/debug/"; -// System.err.println(dylibPath); - Pair buildResult = runProcess(projectDir, SWIFT_CMD, "build", - "-Xswiftc", "-I"+dylibPath, - "-Xlinker", "-L"+dylibPath, - "-Xlinker", "-lAntlr4", - "-Xlinker", "-rpath", - "-Xlinker", dylibPath); - if (buildResult.b.length() > 0) { - throw new IOException("unit test build failed: " + buildResult.a + "\n" + buildResult.b); - } - } - - private static Pair runProcess(String execPath, String... args) throws IOException, InterruptedException { - Process process = Runtime.getRuntime().exec(args, null, new File(execPath)); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - int status = process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - if (status != 0) { - throw new IOException("Process exited with status " + status + ":\n" + stdoutVacuum.toString() + "\n" + stderrVacuum.toString()); - } - return new Pair<>(stdoutVacuum.toString(), stderrVacuum.toString()); - } - - private static void fastFailRunProcess(String workingDir, String... command) throws IOException, InterruptedException { - ProcessBuilder builder = new ProcessBuilder(command); - builder.directory(new File(workingDir)); - Process p = builder.start(); - int status = p.waitFor(); - if (status != 0) { - throw new IOException("Process exited with status " + status); - } - } - - private String execParser(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - addSourceFiles("main.swift"); - String projectName = "testcase-" + System.currentTimeMillis(); - String projectDir = getTmpDir() + "/" + projectName; - try { - buildProject(projectDir, projectName); - return execTest(projectDir, projectName); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - return null; - } - } - - private void writeParserTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) { - - ST outputFileST = new ST( - "import Antlr4\n" + - "import Foundation\n" + - "setbuf(stdout, nil)\n" + - "class TreeShapeListener: ParseTreeListener{\n" + - " func visitTerminal(_ node: TerminalNode){ }\n" + - " func visitErrorNode(_ node: ErrorNode){ }\n" + - " func enterEveryRule(_ ctx: ParserRuleContext) throws { }\n" + - " func exitEveryRule(_ ctx: ParserRuleContext) throws {\n" + - " for i in 0..\\(input)\n" + - "let tokens = CommonTokenStream(lex)\n" + - "\n" + - "parser.setBuildParseTree(true)\n" + - "\n" + - "let tree = try parser.()\n" + - "print(profiler.getDecisionInfo().description)\n" + - "try ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree)\n" - ); - ST createParserST = new ST(" let parser = try (tokens)\n"); - if (debug) { - createParserST = - new ST( - " let parser = try (tokens)\n" + - " parser.addErrorListener(DiagnosticErrorListener())\n"); - } - if (profile) { - outputFileST.add("profile", - "let profiler = ProfilingATNSimulator(parser)\n" + - "parser.setInterpreter(profiler)"); - } - else { - outputFileST.add("profile", new ArrayList()); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "main.swift", outputFileST.render()); - } - - private void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import Antlr4\n" + - "import Foundation\n" + - - "setbuf(stdout, nil)\n" + - "let args = CommandLine.arguments\n" + - "let input = try ANTLRFileStream(args[1])\n" + - "let lex = (input)\n" + - "let tokens = CommonTokenStream(lex)\n" + - - "try tokens.fill()\n" + - - "for t in tokens.getTokens() {\n" + - " print(t)\n" + - "}\n" + - (showDFA ? "print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString(), terminator: \"\" )\n" : "")); - - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "main.swift", outputFileST.render()); - } - - /** - * Generates the parser for one test case. - */ - private void generateParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTmpDir(), "Swift", grammarFileName, grammarStr, false, extraOptions); - assertTrue(equeue.errors.isEmpty()); -// System.out.println(getTmpDir()); - - List files = new ArrayList<>(); - if (lexerName != null) { - files.add(lexerName + ".swift"); - files.add(lexerName + "ATN.swift"); - } - - if (parserName != null) { - files.add(parserName + ".swift"); - files.add(parserName + "ATN.swift"); - Set optionsSet = new HashSet<>(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName + "Listener.swift"); - files.add(grammarName + "BaseListener.swift"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName + "Visitor.swift"); - files.add(grammarName + "BaseVisitor.swift"); - } - } - addSourceFiles(files.toArray(new String[files.size()])); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRunner.java new file mode 100644 index 0000000000..5b3cd8f6f9 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRunner.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.swift; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.io.File; +import java.io.FilenameFilter; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.getTextFromResource; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.isWindows; + +public class SwiftRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Swift"; + } + + @Override + public String getTestFileName() { + return "main"; + } + + private static final String swiftRuntimePath; + private static final String buildSuffix; + private static final Map environment; + + private static final String includePath; + private static final String libraryPath; + + static { + swiftRuntimePath = getRuntimePath("Swift"); + buildSuffix = isWindows() ? "x86_64-unknown-windows-msvc" : ""; + includePath = Paths.get(swiftRuntimePath, ".build", buildSuffix, "release").toString(); + environment = new HashMap<>(); + if (isWindows()) { + libraryPath = Paths.get(includePath, "Antlr4.lib").toString(); + String path = System.getenv("PATH"); + environment.put("PATH", path == null ? includePath : path + ";" + includePath); + } + else { + libraryPath = includePath; + } + } + + @Override + protected String getCompilerName() { + return "swift"; + } + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + runCommand(new String[] {getCompilerPath(), "build", "-c", "release"}, swiftRuntimePath, "build Swift runtime"); + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + Exception exception = null; + try { + String tempDirPath = getTempDirPath(); + File tempDirFile = new File(tempDirPath); + + File[] ignoredFiles = tempDirFile.listFiles(NoSwiftFileFilter.Instance); + assert ignoredFiles != null; + List excludedFiles = Arrays.stream(ignoredFiles).map(File::getName).collect(Collectors.toList()); + + String text = getTextFromResource("org/antlr/v4/test/runtime/helpers/Package.swift.stg"); + ST outputFileST = new ST(text); + outputFileST.add("excludedFiles", excludedFiles); + writeFile(tempDirPath, "Package.swift", outputFileST.render()); + + String[] buildProjectArgs = new String[]{ + getCompilerPath(), + "build", + "-c", + "release", + "-Xswiftc", + "-I" + includePath, + "-Xlinker", + "-L" + includePath, + "-Xlinker", + "-lAntlr4", + "-Xlinker", + "-rpath", + "-Xlinker", + libraryPath + }; + runCommand(buildProjectArgs, tempDirPath); + } catch (Exception e) { + exception = e; + } + + return new CompiledState(generatedState, exception); + } + + static class NoSwiftFileFilter implements FilenameFilter { + public final static NoSwiftFileFilter Instance = new NoSwiftFileFilter(); + + public boolean accept(File dir, String name) { + return !name.endsWith(".swift"); + } + } + + @Override + public String getRuntimeToolName() { + return null; + } + + @Override + public String getExecFileName() { + return Paths.get(getTempDirPath(), + ".build", + buildSuffix, + "release", + "Test" + (isWindows() ? ".exe" : "")).toString(); + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRuntimeTests.java new file mode 100644 index 0000000000..11efa5c895 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.swift; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class SwiftRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new SwiftRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeLexers.java deleted file mode 100644 index 5dad5c8395..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeLexers.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeParsers.java deleted file mode 100644 index faaa7792eb..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeParsers.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestFullContextParsing.java deleted file mode 100644 index ad3844211f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestFullContextParsing.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLeftRecursion.java deleted file mode 100644 index cfea28411c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLeftRecursion.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LeftRecursionTests; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LeftRecursionTests.class) -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerErrors.java deleted file mode 100644 index 9e094dc486..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerErrors.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerExec.java deleted file mode 100644 index 9d5a64716c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerExec.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestListeners.java deleted file mode 100644 index 330680de94..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestListeners.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParseTrees.java deleted file mode 100644 index a03e8fa576..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParseTrees.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserErrors.java deleted file mode 100644 index 98d310e992..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserErrors.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserExec.java deleted file mode 100644 index 23b9823dfd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserExec.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestPerformance.java deleted file mode 100644 index 469877d3d8..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestPerformance.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalLexer.java deleted file mode 100644 index fe0e380a53..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalLexer.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */ -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalParser.java deleted file mode 100644 index ba231c9674..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalParser.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.ParserTests; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(ParserTests.class) -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSets.java deleted file mode 100644 index e46e9514db..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSets.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.category.LexerTests; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category(LexerTests.class) -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/typescript/TsNodeRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/typescript/TsNodeRunner.java new file mode 100644 index 0000000000..0a0622e67c --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/typescript/TsNodeRunner.java @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.typescript; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; + +import java.io.File; +import java.nio.file.Files; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.isWindows; + +public class TsNodeRunner extends RuntimeRunner { + + /* TypeScript runtime is the same as JavaScript runtime */ + private final static String NORMALIZED_JAVASCRIPT_RUNTIME_PATH = getRuntimePath("JavaScript").replace('\\', '/'); + private final static String NPM_EXEC = "npm" + (isWindows() ? ".cmd" : ""); + private final static String WEBPACK_EXEC = "webpack" + (isWindows() ? ".cmd" : ""); + + @Override + public String getLanguage() { + return "TypeScript"; + } + + + @Override + protected void initRuntime(RunOptions runOptions) throws Exception { + npmInstallTsNodeAndWebpack(); + npmLinkRuntime(); + } + + private void npmInstallTsNodeAndWebpack() throws Exception { + Processor.run(new String[] {NPM_EXEC, "--silent", "install", "-g", "typescript", "ts-node", "webpack", "webpack-cli"}, null); + } + + private void npmLinkRuntime() throws Exception { + Processor.run(new String[] {NPM_EXEC, "--silent", "install"}, NORMALIZED_JAVASCRIPT_RUNTIME_PATH); + Processor.run(new String[] {WEBPACK_EXEC, "--no-stats"}, NORMALIZED_JAVASCRIPT_RUNTIME_PATH); + Processor.run(new String[] {NPM_EXEC, "--silent", "link"}, NORMALIZED_JAVASCRIPT_RUNTIME_PATH); + } + + @Override + public String getExtension() { return "ts"; } + + @Override + protected String getExecFileName() { return getTestFileName() + ".ts"; } + + @Override + public String getBaseListenerSuffix() { return null; } + + @Override + public String getBaseVisitorSuffix() { return null; } + + @Override + public String getRuntimeToolName() { return "ts-node" + (isWindows() ? ".cmd" : ""); } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + + try { + writeFile(getTempDirPath(), "package.json", + RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/package_ts.json")); + + writeFile(getTempDirPath(), "tsconfig.json", + RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/tsconfig.json")); + + npmInstall(); + + npmLinkAntlr4(); + + return new CompiledState(generatedState, null); + + } catch (Exception e) { + return new CompiledState(generatedState, e); + } + + } + + private void npmInstall() throws Exception { + Processor.run(new String[] {NPM_EXEC, "--silent", "install"}, getTempDirPath()); + } + + + private void npmLinkAntlr4() throws Exception { + Processor.run(new String[] {NPM_EXEC, "--silent", "link", "antlr4"}, getTempDirPath()); + } + + +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/typescript/TypeScriptRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/typescript/TypeScriptRuntimeTests.java new file mode 100644 index 0000000000..548b5c4ccb --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/typescript/TypeScriptRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.typescript; + +import org.antlr.v4.test.runtime.RuntimeRunner; +import org.antlr.v4.test.runtime.RuntimeTests; + +public class TypeScriptRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new TsNodeRunner(); + } +} diff --git a/runtime/CSharp/README.md b/runtime/CSharp/README.md deleted file mode 100644 index 94ea0440ab..0000000000 --- a/runtime/CSharp/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# C# target for ANTLR 4 - -### Note to historical users - -Versions of ANTLR 4.4.x and before managed the C# -target as part of a [separate tool provided by Sam Harwell](https://github.com/tunnelvisionlabs/antlr4cs/releases/tag/v4.3.0). -As of 4.5, we our releasing a (mono-compatible) C# target together -with the main tool. - -Mono compatibility makes it possible to use ANTLR 4 in any C# development -environment, including of course Microsoft Visual Studio, but also Xamarin Studio, which runs on MacOS X. -Given Microsoft's recent commitment to *cross-platform developer experiences*, -we believe this is a great opportunity for C# developers. - -Releasing the runtime with the tool ensures that you can get the exact same behavior across many languages: Java, C#, Python, JavaScript, Go, Swift and C++. - -## Getting Started - -### Step 1: Install Java - -The C# target for ANTLR 4 requires Java for *generating* C# code (but the applications compiled from this C# code will not require Java to be installed). -You can install *any* of the following versions of Java to use this target. - -If you already have one of the following installed, you should check to make sure the installation is up-to-date. - -* Java 8 runtime environment (x86 or x64) -* Java 8 development kit (x86 or x64, provided that the JRE option is also installed during the development kit installation) -* Java 7 runtime environment (x86 or x64) -* Java 7 development kit (x86 or x64, provided that the JRE option is also installed during the development kit installation) - -### Step 2: Download the tool - -You need to download the ANTLR tool from the ANTLR web site. -This is a Java archive (*.jar) used to generate the C# code from an ANTLR grammar. - - -### Step 3: Add or create a grammar file (*.g4) in your project - -To avoid confusing your IDE, we suggest setting the build action to None for this file. -See the docs and the book to learn about writing lexer and parser grammars. - - -### Step 4: Generate the C# code - -This can be done either from the cmd line, or by adding a custom pre-build command in your project. -At minimal, the cmd line should look as follows: ``java -jar antlr4-4.8.jar -Dlanguage=CSharp grammar.g4`` -This will generate the files, which you can then integrate in your project. -This is just a quick start. The tool has many useful options to control generation, please refer to its documentation. - -### Step 5: Add a reference to the ANTLR runtime in your project - -The Antlr 4 standard runtime for C# is now available from NuGet. -We trust that you know how to do add NuGet references to your project :-). -The package id is [Antlr4.Runtime.Standard](https://www.nuget.org/packages/Antlr4.Runtime.Standard/). We do not support other packages. - -Use the GUI or the following command in the Package Manager Console: - -``` -Install-Package Antlr4.Runtime.Standard -``` - - -### Step 6: You're done! - -Of course, the generated code is not going to meet your requirement by magic. -There are 3 ways to use the generated code: - - by generating a parse tree, and traversing it using a listener. This is the most common method. - - by generating a parse tree, and traversing it using a visitor. This requires the -visitor option, and is a bit more work. - - by providing code within your grammar, which gets executed when your input files are parsed. -While the latter works, it is no longer the recommended approach, because it is not portable, and harder to maintain. More importantly, it breaks the parsing when your code breaks. - -See the web site for examples of using the generated code. - -To learn more about ANTLR 4, read [the book](http://a.co/2n4rJlb). - -### Visual Studio integration - -If you require tighter Visual Studio integration, you can use the tools from [Tunnel Vision Labs](http://tunnelvisionlabs.com/). -(please note however that they use a different tool and runtime) - diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj deleted file mode 100644 index 50814fc50c..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj +++ /dev/null @@ -1,53 +0,0 @@ - - - The ANTLR Organization - 4.8 - en-US - netstandard1.3;net35 - $(NoWarn);CS1591;CS1574;CS1580 - true - Antlr4.Runtime.Standard - ../../Antlr4.snk - true - true - Antlr4.Runtime.Standard - ANTLR 4 .NET Standard Runtime - Eric Vergnaud, Terence Parr, Sam Harwell - The .NET Core C# ANTLR 4 runtime from the ANTLR Organization - The runtime library for parsers generated by the C# target of the standard ANTLR 4 tool. - Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - true - https://github.com/antlr/antlr4/blob/master/LICENSE.txt - https://github.com/antlr/antlr4 - https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png - https://github.com/antlr/antlr4/releases - antlr parsing grammar - 1.6.1 - false - false - false - false - false - false - false - false - false - Antlr4.Runtime - - - true - full - false - lib\Debug - - - true - lib\Release - - - DOTNETCORE;NET35PLUS;NET40PLUS;NET45PLUS - - - NET35PLUS - - diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.mono.csproj b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.mono.csproj deleted file mode 100644 index fc42da2547..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.mono.csproj +++ /dev/null @@ -1,241 +0,0 @@ - - - - - Debug - AnyCPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF} - Library - Properties - Antlr4.Runtime - Antlr4.Runtime.Standard - v3.5 - 512 - obj\net20\ - ..\ - true - true - ..\..\Antlr4.snk - - - true - full - false - lib\Debug - DEBUG;TRACE;NET35PLUS - prompt - 4 - 1591 0659 - - - true - lib\Release - TRACE;NET35PLUS - prompt - 4 - 1591 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - MSBuild:Compile - Antlr4.Runtime.Tree.Xpath - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.vs2013.csproj b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.vs2013.csproj deleted file mode 100644 index b9117f3673..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.vs2013.csproj +++ /dev/null @@ -1,239 +0,0 @@ - - - - - Debug - AnyCPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF} - Library - Properties - Antlr4.Runtime - Antlr4.Runtime.Standard - v3.5 - 512 - obj\net35\ - ..\ - true - - - - true - full - false - bin\net35\Debug\ - $(OutputPath)$(AssemblyName).xml - DEBUG;TRACE;NET35PLUS - prompt - 4 - 1591 - - - pdbonly - true - bin\net35\Release\ - $(OutputPath)$(AssemblyName).xml - TRACE;NET35PLUS - prompt - 4 - 1591 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrFileStream.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrFileStream.cs deleted file mode 100644 index ec59b87e6d..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrFileStream.cs +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#if !PORTABLE - -using Antlr4.Runtime.Sharpen; -using Encoding = System.Text.Encoding; -using File = System.IO.File; - -namespace Antlr4.Runtime -{ -#if COMPACT - using StreamReader = System.IO.StreamReader; -#endif - - /// - /// This is an - /// - /// that is loaded from a file all at once - /// when you construct the object. - /// - public class AntlrFileStream : AntlrInputStream - { - protected internal string fileName; - - /// - public AntlrFileStream(string fileName) - : this(fileName, null) - { - } - - /// - public AntlrFileStream(string fileName, Encoding encoding) - { - this.fileName = fileName; - Load(fileName, encoding); - } - - /// - public virtual void Load(string fileName, Encoding encoding) - { - if (fileName == null) - { - return; - } - - string text; -#if !COMPACT - if (encoding != null) - text = File.ReadAllText(fileName, encoding); - else - text = File.ReadAllText(fileName); -#else - if (encoding != null) - text = ReadAllText(fileName, encoding); - else - text = ReadAllText(fileName); -#endif - - data = text.ToCharArray(); - n = data.Length; - } - - public override string SourceName - { - get - { - return fileName; - } - } - -#if COMPACT - private static string ReadAllText(string path) - { - using (var reader = new StreamReader(path)) - { - return reader.ReadToEnd(); - } - } - - private static string ReadAllText(string path, Encoding encoding) - { - using (var reader = new StreamReader(path, encoding ?? Encoding.Default)) - { - return reader.ReadToEnd(); - } - } -#endif - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ErrorInfo.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ErrorInfo.cs deleted file mode 100644 index 91466fa555..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ErrorInfo.cs +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -using Antlr4.Runtime; -using Antlr4.Runtime.Atn; -using Antlr4.Runtime.Sharpen; - -namespace Antlr4.Runtime.Atn -{ - /// - /// This class represents profiling event information for a syntax error - /// identified during prediction. - /// - /// - /// This class represents profiling event information for a syntax error - /// identified during prediction. Syntax errors occur when the prediction - /// algorithm is unable to identify an alternative which would lead to a - /// successful parse. - /// - /// - /// - /// 4.3 - public class ErrorInfo : DecisionEventInfo - { - /// - /// Constructs a new instance of the - /// - /// class with the - /// specified detailed syntax error information. - /// - /// The decision number - /// - /// The final simulator state reached during prediction - /// prior to reaching the - /// - /// state - /// - /// The input token stream - /// The start index for the current prediction - /// The index at which the syntax error was identified - public ErrorInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex) - : base(decision, state, input, startIndex, stopIndex, state.useContext) - { - } - } -} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LL1Analyzer.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LL1Analyzer.cs deleted file mode 100644 index 06bcebb339..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LL1Analyzer.cs +++ /dev/null @@ -1,375 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -using System.Collections.Generic; -using Antlr4.Runtime.Atn; -using Antlr4.Runtime.Misc; -using Antlr4.Runtime.Sharpen; - -namespace Antlr4.Runtime.Atn -{ - public class LL1Analyzer - { - /// - /// Special value added to the lookahead sets to indicate that we hit - /// a predicate during analysis if - /// seeThruPreds==false - /// . - /// - public const int HitPred = TokenConstants.InvalidType; - - [NotNull] - public readonly ATN atn; - - public LL1Analyzer(ATN atn) - { - this.atn = atn; - } - - /// - /// Calculates the SLL(1) expected lookahead set for each outgoing transition - /// of an - /// - /// . The returned array has one element for each - /// outgoing transition in - /// - /// . If the closure from transition - /// i leads to a semantic predicate before matching a symbol, the - /// element at index i of the result will be - /// - /// . - /// - /// the ATN state - /// - /// the expected symbols for each outgoing transition of - /// - /// . - /// - [return: Nullable] - public virtual IntervalSet[] GetDecisionLookahead(ATNState s) - { - // System.out.println("LOOK("+s.stateNumber+")"); - if (s == null) - { - return null; - } - IntervalSet[] look = new IntervalSet[s.NumberOfTransitions]; - for (int alt = 0; alt < s.NumberOfTransitions; alt++) - { - look[alt] = new IntervalSet(); - HashSet lookBusy = new HashSet(); - bool seeThruPreds = false; - // fail to get lookahead upon pred - Look(s.Transition(alt).target, null, PredictionContext.EMPTY, look[alt], lookBusy, new BitSet(), seeThruPreds, false); - // Wipe out lookahead for this alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if (look[alt].Count == 0 || look[alt].Contains(HitPred)) - { - look[alt] = null; - } - } - return look; - } - - /// - /// Compute set of tokens that can follow - /// - /// in the ATN in the - /// specified - /// - /// . - ///

    If - /// - /// is - /// - /// and the end of the rule containing - /// - /// is reached, - /// - /// is added to the result set. - /// If - /// - /// is not - /// - /// and the end of the outermost rule is - /// reached, - /// - /// is added to the result set.

    - ///
    - /// the ATN state - /// - /// the complete parser context, or - /// - /// if the context - /// should be ignored - /// - /// - /// The set of tokens that can follow - /// - /// in the ATN in the - /// specified - /// - /// . - /// - [return: NotNull] - public virtual IntervalSet Look(ATNState s, RuleContext ctx) - { - return Look(s, null, ctx); - } - - /// - /// Compute set of tokens that can follow - /// - /// in the ATN in the - /// specified - /// - /// . - ///

    If - /// - /// is - /// - /// and the end of the rule containing - /// - /// is reached, - /// - /// is added to the result set. - /// If - /// - /// is not - /// PredictionContext#EMPTY_LOCAL - /// and the end of the outermost rule is - /// reached, - /// - /// is added to the result set.

    - ///
    - /// the ATN state - /// - /// the ATN state to stop at. This can be a - /// - /// to detect epsilon paths through a closure. - /// - /// - /// the complete parser context, or - /// - /// if the context - /// should be ignored - /// - /// - /// The set of tokens that can follow - /// - /// in the ATN in the - /// specified - /// - /// . - /// - [return: NotNull] - public virtual IntervalSet Look(ATNState s, ATNState stopState, RuleContext ctx) - { - IntervalSet r = new IntervalSet(); - bool seeThruPreds = true; - PredictionContext lookContext = ctx != null ? PredictionContext.FromRuleContext(s.atn, ctx) : null; - Look(s, stopState, lookContext, r, new HashSet(), new BitSet(), seeThruPreds, true); - return r; - } - - /// - /// Compute set of tokens that can follow - /// - /// in the ATN in the - /// specified - /// - /// . - ///

    - /// If - /// - /// is - /// - /// and - /// - /// or the end of the rule containing - /// - /// is reached, - /// - /// is added to the result set. If - /// - /// is not - /// - /// and - /// - /// is - /// - /// and - /// - /// or the end of the outermost rule is reached, - /// - /// is added to the result set. - ///

    - /// the ATN state. - /// - /// the ATN state to stop at. This can be a - /// - /// to detect epsilon paths through a closure. - /// - /// - /// The outer context, or - /// - /// if - /// the outer context should not be used. - /// - /// The result lookahead set. - /// - /// A set used for preventing epsilon closures in the ATN - /// from causing a stack overflow. Outside code should pass - /// new HashSet<ATNConfig> - /// for this argument. - /// - /// - /// A set used for preventing left recursion in the - /// ATN from causing a stack overflow. Outside code should pass - /// new BitSet() - /// for this argument. - /// - /// - /// - /// - /// to true semantic predicates as - /// implicitly - /// - /// and "see through them", otherwise - /// - /// to treat semantic predicates as opaque and add - /// - /// to the - /// result if one is encountered. - /// - /// - /// Add - /// - /// to the result if the end of the - /// outermost context is reached. This parameter has no effect if - /// - /// is - /// - /// . - /// - protected internal virtual void Look(ATNState s, ATNState stopState, PredictionContext ctx, IntervalSet look, HashSet lookBusy, BitSet calledRuleStack, bool seeThruPreds, bool addEOF) - { - // System.out.println("_LOOK("+s.stateNumber+", ctx="+ctx); - ATNConfig c = new ATNConfig(s, 0, ctx); - if (!lookBusy.Add(c)) - { - return; - } - if (s == stopState) - { - if (ctx == null) - { - look.Add(TokenConstants.EPSILON); - return; - } - else if (ctx.IsEmpty && addEOF) { - look.Add(TokenConstants.EOF); - return; - } - } - if (s is RuleStopState) - { - if (ctx == null) - { - look.Add(TokenConstants.EPSILON); - return; - } - else if (ctx.IsEmpty && addEOF) - { - look.Add(TokenConstants.EOF); - return; - } - if (ctx != PredictionContext.EMPTY) - { - for (int i = 0; i < ctx.Size; i++) - { - ATNState returnState = atn.states[ctx.GetReturnState(i)]; - bool removed = calledRuleStack.Get(returnState.ruleIndex); - try - { - calledRuleStack.Clear(returnState.ruleIndex); - Look(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } - finally - { - if (removed) - { - calledRuleStack.Set(returnState.ruleIndex); - } - } - } - return; - } - } - int n = s.NumberOfTransitions; - for (int i_1 = 0; i_1 < n; i_1++) - { - Transition t = s.Transition(i_1); - if (t is RuleTransition) - { - RuleTransition ruleTransition = (RuleTransition)t; - if (calledRuleStack.Get(ruleTransition.ruleIndex)) - { - continue; - } - PredictionContext newContext = SingletonPredictionContext.Create(ctx, ruleTransition.followState.stateNumber); - try - { - calledRuleStack.Set(ruleTransition.target.ruleIndex); - Look(t.target, stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } - finally - { - calledRuleStack.Clear(ruleTransition.target.ruleIndex); - } - } - else - { - if (t is AbstractPredicateTransition) - { - if (seeThruPreds) - { - Look(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } - else - { - look.Add(HitPred); - } - } - else - { - if (t.IsEpsilon) - { - Look(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } - else - { - if (t is WildcardTransition) - { - look.AddAll(IntervalSet.Of(TokenConstants.MinUserTokenType, atn.maxTokenType)); - } - else - { - IntervalSet set = t.Label; - if (set != null) - { - if (t is NotSetTransition) - { - set = set.Complement(IntervalSet.Of(TokenConstants.MinUserTokenType, atn.maxTokenType)); - } - look.AddAll(set); - } - } - } - } - } - } - } - } -} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LookaheadEventInfo.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LookaheadEventInfo.cs deleted file mode 100644 index 2b5e3f30ab..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LookaheadEventInfo.cs +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -using Antlr4.Runtime; -using Antlr4.Runtime.Atn; -using Antlr4.Runtime.Sharpen; - -namespace Antlr4.Runtime.Atn -{ - /// - /// This class represents profiling event information for tracking the lookahead - /// depth required in order to make a prediction. - /// - /// - /// This class represents profiling event information for tracking the lookahead - /// depth required in order to make a prediction. - /// - /// 4.3 - public class LookaheadEventInfo : DecisionEventInfo - { - /// - /// Constructs a new instance of the - /// - /// class with - /// the specified detailed lookahead information. - /// - /// The decision number - /// - /// The final simulator state containing the necessary - /// information to determine the result of a prediction, or - /// - /// if - /// the final state is not available - /// - /// The input token stream - /// The start index for the current prediction - /// The index at which the prediction was finally made - /// - /// - /// - /// if the current lookahead is part of an LL - /// prediction; otherwise, - /// - /// if the current lookahead is part of - /// an SLL prediction - /// - public LookaheadEventInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex, bool fullCtx) - : base(decision, state, input, startIndex, stopIndex, fullCtx) - { - } - } -} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/RuleDependencyChecker.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/RuleDependencyChecker.cs deleted file mode 100644 index 0d77cbb4f5..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/RuleDependencyChecker.cs +++ /dev/null @@ -1,1160 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#if NET45PLUS - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Reflection; -using System.Security; -using System.Text; -using Antlr4.Runtime; -using Antlr4.Runtime.Atn; -using Antlr4.Runtime.Misc; -using Antlr4.Runtime.Sharpen; - -namespace Antlr4.Runtime.Misc -{ - /// Sam Harwell - public class RuleDependencyChecker - { - private static readonly HashSet checkedAssemblies = new HashSet(); - - public static void CheckDependencies(Assembly assembly) - { - if (IsChecked(assembly)) - { - return; - } - - IEnumerable typesToCheck = GetTypesToCheck(assembly); - List> dependencies = new List>(); - foreach (TypeInfo clazz in typesToCheck) - { - dependencies.AddRange(GetDependencies(clazz)); - } - - if (dependencies.Count > 0) - { - IDictionary>> recognizerDependencies = new Dictionary>>(); - foreach (Tuple dependency in dependencies) - { - TypeInfo recognizerType = dependency.Item1.Recognizer.GetTypeInfo(); - IList> list; - if (!recognizerDependencies.TryGetValue(recognizerType, out list)) - { - list = new List>(); - recognizerDependencies[recognizerType] = list; - } - list.Add(dependency); - } - - foreach (KeyValuePair>> entry in recognizerDependencies) - { - //processingEnv.getMessager().printMessage(Diagnostic.Kind.NOTE, String.format("ANTLR 4: Validating {0} dependencies on rules in {1}.", entry.getValue().size(), entry.getKey().toString())); - CheckDependencies(entry.Value, entry.Key); - } - } - - MarkChecked(assembly); - } - - private static IEnumerable GetTypesToCheck(Assembly assembly) - { - return assembly.DefinedTypes; - } - - private static bool IsChecked(Assembly assembly) - { - lock (checkedAssemblies) - { - return checkedAssemblies.Contains(assembly.FullName); - } - } - - private static void MarkChecked(Assembly assembly) - { - lock (checkedAssemblies) - { - checkedAssemblies.Add(assembly.FullName); - } - } - - private static void CheckDependencies(IList> dependencies, TypeInfo recognizerType) - { - string[] ruleNames = GetRuleNames(recognizerType); - int[] ruleVersions = GetRuleVersions(recognizerType, ruleNames); - RuleDependencyChecker.RuleRelations relations = ExtractRuleRelations(recognizerType); - StringBuilder errors = new StringBuilder(); - foreach (Tuple dependency in dependencies) - { - if (!dependency.Item1.Recognizer.GetTypeInfo().IsAssignableFrom(recognizerType)) - { - continue; - } - // this is the rule in the dependency set with the highest version number - int effectiveRule = dependency.Item1.Rule; - if (effectiveRule < 0 || effectiveRule >= ruleVersions.Length) - { - string message = string.Format("Rule dependency on unknown rule {0}@{1} in {2}", dependency.Item1.Rule, dependency.Item1.Version, dependency.Item1.Recognizer.ToString()); - errors.AppendLine(dependency.Item2.ToString()); - errors.AppendLine(message); - continue; - } - Dependents dependents = Dependents.Self | dependency.Item1.Dependents; - ReportUnimplementedDependents(errors, dependency, dependents); - BitSet @checked = new BitSet(); - int highestRequiredDependency = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, effectiveRule, null); - if ((dependents & Dependents.Parents) != 0) - { - BitSet parents = relations.parents[dependency.Item1.Rule]; - for (int parent = parents.NextSetBit(0); parent >= 0; parent = parents.NextSetBit(parent + 1)) - { - if (parent < 0 || parent >= ruleVersions.Length || @checked.Get(parent)) - { - continue; - } - @checked.Set(parent); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, parent, "parent"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - if ((dependents & Dependents.Children) != 0) - { - BitSet children = relations.children[dependency.Item1.Rule]; - for (int child = children.NextSetBit(0); child >= 0; child = children.NextSetBit(child + 1)) - { - if (child < 0 || child >= ruleVersions.Length || @checked.Get(child)) - { - continue; - } - @checked.Set(child); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, child, "child"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - if ((dependents & Dependents.Ancestors) != 0) - { - BitSet ancestors = relations.GetAncestors(dependency.Item1.Rule); - for (int ancestor = ancestors.NextSetBit(0); ancestor >= 0; ancestor = ancestors.NextSetBit(ancestor + 1)) - { - if (ancestor < 0 || ancestor >= ruleVersions.Length || @checked.Get(ancestor)) - { - continue; - } - @checked.Set(ancestor); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, ancestor, "ancestor"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - if ((dependents & Dependents.Descendants) != 0) - { - BitSet descendants = relations.GetDescendants(dependency.Item1.Rule); - for (int descendant = descendants.NextSetBit(0); descendant >= 0; descendant = descendants.NextSetBit(descendant + 1)) - { - if (descendant < 0 || descendant >= ruleVersions.Length || @checked.Get(descendant)) - { - continue; - } - @checked.Set(descendant); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, descendant, "descendant"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - int declaredVersion = dependency.Item1.Version; - if (declaredVersion > highestRequiredDependency) - { - string message = string.Format("Rule dependency version mismatch: {0} has maximum dependency version {1} (expected {2}) in {3}", ruleNames[dependency.Item1.Rule], highestRequiredDependency, declaredVersion, dependency.Item1.Recognizer.ToString()); - errors.AppendLine(dependency.Item2.ToString()); - errors.AppendLine(message); - } - } - if (errors.Length > 0) - { - throw new InvalidOperationException(errors.ToString()); - } - } - - private static readonly Dependents ImplementedDependents = Dependents.Self | Dependents.Parents | Dependents.Children | Dependents.Ancestors | Dependents.Descendants; - - private static void ReportUnimplementedDependents(StringBuilder errors, Tuple dependency, Dependents dependents) - { - Dependents unimplemented = dependents; - unimplemented &= ~ImplementedDependents; - if (unimplemented != Dependents.None) - { - string message = string.Format("Cannot validate the following dependents of rule {0}: {1}", dependency.Item1.Rule, unimplemented); - errors.AppendLine(message); - } - } - - private static int CheckDependencyVersion(StringBuilder errors, Tuple dependency, string[] ruleNames, int[] ruleVersions, int relatedRule, string relation) - { - string ruleName = ruleNames[dependency.Item1.Rule]; - string path; - if (relation == null) - { - path = ruleName; - } - else - { - string mismatchedRuleName = ruleNames[relatedRule]; - path = string.Format("rule {0} ({1} of {2})", mismatchedRuleName, relation, ruleName); - } - int declaredVersion = dependency.Item1.Version; - int actualVersion = ruleVersions[relatedRule]; - if (actualVersion > declaredVersion) - { - string message = string.Format("Rule dependency version mismatch: {0} has version {1} (expected <= {2}) in {3}", path, actualVersion, declaredVersion, dependency.Item1.Recognizer.ToString()); - errors.AppendLine(dependency.Item2.ToString()); - errors.AppendLine(message); - } - return actualVersion; - } - - private static int[] GetRuleVersions(TypeInfo recognizerClass, string[] ruleNames) - { - int[] versions = new int[ruleNames.Length]; - IEnumerable fields = recognizerClass.DeclaredFields; - foreach (FieldInfo field in fields) - { - bool isStatic = field.IsStatic; - bool isInteger = field.FieldType == typeof(int); - if (isStatic && isInteger && field.Name.StartsWith("RULE_")) - { - try - { - string name = field.Name.Substring("RULE_".Length); - if (name.Length == 0 || !System.Char.IsLower(name[0])) - { - continue; - } - int index = (int)field.GetValue(null); - if (index < 0 || index >= versions.Length) - { - object[] @params = new object[] { index, field.Name, recognizerClass.Name }; -#if false - Logger.Log(Level.Warning, "Rule index {0} for rule ''{1}'' out of bounds for recognizer {2}.", @params); -#endif - continue; - } - MethodInfo ruleMethod = GetRuleMethod(recognizerClass, name); - if (ruleMethod == null) - { - object[] @params = new object[] { name, recognizerClass.Name }; -#if false - Logger.Log(Level.Warning, "Could not find rule method for rule ''{0}'' in recognizer {1}.", @params); -#endif - continue; - } - RuleVersionAttribute ruleVersion = ruleMethod.GetCustomAttribute(); - int version = ruleVersion != null ? ruleVersion.Version : 0; - versions[index] = version; - } - catch (ArgumentException) - { -#if false - Logger.Log(Level.Warning, null, ex); -#else - throw; -#endif - } - catch (MemberAccessException) - { -#if false - Logger.Log(Level.Warning, null, ex); -#else - throw; -#endif - } - } - } - return versions; - } - - private static MethodInfo GetRuleMethod(TypeInfo recognizerClass, string name) - { - IEnumerable declaredMethods = recognizerClass.DeclaredMethods; - foreach (MethodInfo method in declaredMethods) - { - if (method.Name.Equals(name) && method.GetCustomAttribute() != null) - { - return method; - } - } - return null; - } - - private static string[] GetRuleNames(TypeInfo recognizerClass) - { - FieldInfo ruleNames = recognizerClass.DeclaredFields.First(i => i.Name == "ruleNames"); - return (string[])ruleNames.GetValue(null); - } - - public static IList> GetDependencies(TypeInfo clazz) - { - IList> result = new List>(); - - GetElementDependencies(AsCustomAttributeProvider(clazz), result); - foreach (ConstructorInfo ctor in clazz.DeclaredConstructors) - { - GetElementDependencies(AsCustomAttributeProvider(ctor), result); - foreach (ParameterInfo parameter in ctor.GetParameters()) - GetElementDependencies(AsCustomAttributeProvider(parameter), result); - } - - foreach (FieldInfo field in clazz.DeclaredFields) - { - GetElementDependencies(AsCustomAttributeProvider(field), result); - } - - foreach (MethodInfo method in clazz.DeclaredMethods) - { - GetElementDependencies(AsCustomAttributeProvider(method), result); -#if COMPACT - if (method.ReturnTypeCustomAttributes != null) - GetElementDependencies(AsCustomAttributeProvider(method.ReturnTypeCustomAttributes), result); -#else - if (method.ReturnParameter != null) - GetElementDependencies(AsCustomAttributeProvider(method.ReturnParameter), result); -#endif - - foreach (ParameterInfo parameter in method.GetParameters()) - GetElementDependencies(AsCustomAttributeProvider(parameter), result); - } - - return result; - } - - private static void GetElementDependencies(ICustomAttributeProvider annotatedElement, IList> result) - { - foreach (RuleDependencyAttribute dependency in annotatedElement.GetCustomAttributes(typeof(RuleDependencyAttribute), true)) - { - result.Add(Tuple.Create(dependency, annotatedElement)); - } - } - - private static RuleDependencyChecker.RuleRelations ExtractRuleRelations(TypeInfo recognizer) - { - string serializedATN = GetSerializedATN(recognizer); - if (serializedATN == null) - { - return null; - } - ATN atn = new ATNDeserializer().Deserialize(serializedATN.ToCharArray()); - RuleDependencyChecker.RuleRelations relations = new RuleDependencyChecker.RuleRelations(atn.ruleToStartState.Length); - foreach (ATNState state in atn.states) - { - if (!state.epsilonOnlyTransitions) - { - continue; - } - foreach (Transition transition in state.transitions) - { - if (transition.TransitionType != TransitionType.RULE) - { - continue; - } - RuleTransition ruleTransition = (RuleTransition)transition; - relations.AddRuleInvocation(state.ruleIndex, ruleTransition.target.ruleIndex); - } - } - return relations; - } - - private static string GetSerializedATN(TypeInfo recognizerClass) - { - FieldInfo serializedAtnField = recognizerClass.DeclaredFields.First(i => i.Name == "_serializedATN"); - if (serializedAtnField != null) - return (string)serializedAtnField.GetValue(null); - - if (recognizerClass.BaseType != null) - return GetSerializedATN(recognizerClass.BaseType.GetTypeInfo()); - - return null; - } - - private sealed class RuleRelations - { - public readonly BitSet[] parents; - - public readonly BitSet[] children; - - public RuleRelations(int ruleCount) - { - parents = new BitSet[ruleCount]; - for (int i = 0; i < ruleCount; i++) - { - parents[i] = new BitSet(); - } - children = new BitSet[ruleCount]; - for (int i_1 = 0; i_1 < ruleCount; i_1++) - { - children[i_1] = new BitSet(); - } - } - - public bool AddRuleInvocation(int caller, int callee) - { - if (caller < 0) - { - // tokens rule - return false; - } - if (children[caller].Get(callee)) - { - // already added - return false; - } - children[caller].Set(callee); - parents[callee].Set(caller); - return true; - } - - public BitSet GetAncestors(int rule) - { - BitSet ancestors = new BitSet(); - ancestors.Or(parents[rule]); - while (true) - { - int cardinality = ancestors.Cardinality(); - for (int i = ancestors.NextSetBit(0); i >= 0; i = ancestors.NextSetBit(i + 1)) - { - ancestors.Or(parents[i]); - } - if (ancestors.Cardinality() == cardinality) - { - // nothing changed - break; - } - } - return ancestors; - } - - public BitSet GetDescendants(int rule) - { - BitSet descendants = new BitSet(); - descendants.Or(children[rule]); - while (true) - { - int cardinality = descendants.Cardinality(); - for (int i = descendants.NextSetBit(0); i >= 0; i = descendants.NextSetBit(i + 1)) - { - descendants.Or(children[i]); - } - if (descendants.Cardinality() == cardinality) - { - // nothing changed - break; - } - } - return descendants; - } - } - - private RuleDependencyChecker() - { - } - -#if PORTABLE || DOTNETCORE - public interface ICustomAttributeProvider - { - object[] GetCustomAttributes(Type attributeType, bool inherit); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(TypeInfo type) - { - return new TypeCustomAttributeProvider(type); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(MethodBase method) - { - return new MethodBaseCustomAttributeProvider(method); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(ParameterInfo parameter) - { - return new ParameterInfoCustomAttributeProvider(parameter); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(FieldInfo field) - { - return new FieldInfoCustomAttributeProvider(field); - } - - protected sealed class TypeCustomAttributeProvider : ICustomAttributeProvider - { - private readonly TypeInfo _provider; - - public TypeCustomAttributeProvider(TypeInfo provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); - } - } - - protected sealed class MethodBaseCustomAttributeProvider : ICustomAttributeProvider - { - private readonly MethodBase _provider; - - public MethodBaseCustomAttributeProvider(MethodBase provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); - } - } - - protected sealed class ParameterInfoCustomAttributeProvider : ICustomAttributeProvider - { - private readonly ParameterInfo _provider; - - public ParameterInfoCustomAttributeProvider(ParameterInfo provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); - } - } - - protected sealed class FieldInfoCustomAttributeProvider : ICustomAttributeProvider - { - private readonly FieldInfo _provider; - - public FieldInfoCustomAttributeProvider(FieldInfo provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); - } - } -#else - protected static ICustomAttributeProvider AsCustomAttributeProvider(ICustomAttributeProvider obj) - { - return obj; - } -#endif - } -} - -#else - -using System; -using System.Collections.Generic; -using System.Reflection; -using System.Security; -using System.Text; -using Antlr4.Runtime; -using Antlr4.Runtime.Atn; -using Antlr4.Runtime.Misc; -using Antlr4.Runtime.Sharpen; - -namespace Antlr4.Runtime.Misc -{ - /// Sam Harwell - public class RuleDependencyChecker - { -#if false - private static readonly Logger Logger = Logger.GetLogger(typeof(Antlr4.Runtime.Misc.RuleDependencyChecker).FullName); -#endif - - private const BindingFlags AllDeclaredStaticMembers = BindingFlags.DeclaredOnly | BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Static; - private const BindingFlags AllDeclaredMembers = AllDeclaredStaticMembers | BindingFlags.Instance; - private static readonly HashSet checkedAssemblies = new HashSet(); - - public static void CheckDependencies(Assembly assembly) - { - if (IsChecked(assembly)) - { - return; - } - - IList typesToCheck = GetTypesToCheck(assembly); - ArrayList> dependencies = new ArrayList>(); - foreach (Type clazz in typesToCheck) - { - dependencies.AddRange(GetDependencies(clazz)); - } - - if (dependencies.Count > 0) - { - IDictionary>> recognizerDependencies = new Dictionary>>(); - foreach (Tuple dependency in dependencies) - { - Type recognizerType = dependency.Item1.Recognizer; - IList> list; - if (!recognizerDependencies.TryGetValue(recognizerType, out list)) - { - list = new ArrayList>(); - recognizerDependencies[recognizerType] = list; - } - list.Add(dependency); - } - - foreach (KeyValuePair>> entry in recognizerDependencies) - { - //processingEnv.getMessager().printMessage(Diagnostic.Kind.NOTE, String.format("ANTLR 4: Validating {0} dependencies on rules in {1}.", entry.getValue().size(), entry.getKey().toString())); - CheckDependencies(entry.Value, entry.Key); - } - } - - MarkChecked(assembly); - } - - private static IList GetTypesToCheck(Assembly assembly) - { - return assembly.GetTypes(); - } - - private static bool IsChecked(Assembly assembly) - { - lock (checkedAssemblies) - { - return checkedAssemblies.Contains(assembly.FullName); - } - } - - private static void MarkChecked(Assembly assembly) - { - lock (checkedAssemblies) - { - checkedAssemblies.Add(assembly.FullName); - } - } - - private static void CheckDependencies(IList> dependencies, Type recognizerType) - { - string[] ruleNames = GetRuleNames(recognizerType); - int[] ruleVersions = GetRuleVersions(recognizerType, ruleNames); - RuleDependencyChecker.RuleRelations relations = ExtractRuleRelations(recognizerType); - StringBuilder errors = new StringBuilder(); - foreach (Tuple dependency in dependencies) - { -#if DOTNETCORE - if (!dependency.Item1.Recognizer.GetTypeInfo().IsAssignableFrom(recognizerType)) -#else - if (!dependency.Item1.Recognizer.IsAssignableFrom(recognizerType)) -#endif - { - continue; - } - // this is the rule in the dependency set with the highest version number - int effectiveRule = dependency.Item1.Rule; - if (effectiveRule < 0 || effectiveRule >= ruleVersions.Length) - { - string message = string.Format("Rule dependency on unknown rule {0}@{1} in {2}", dependency.Item1.Rule, dependency.Item1.Version, dependency.Item1.Recognizer.ToString()); - errors.AppendLine(dependency.Item2.ToString()); - errors.AppendLine(message); - continue; - } - Dependents dependents = Dependents.Self | dependency.Item1.Dependents; - ReportUnimplementedDependents(errors, dependency, dependents); - BitSet @checked = new BitSet(); - int highestRequiredDependency = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, effectiveRule, null); - if ((dependents & Dependents.Parents) != 0) - { - BitSet parents = relations.parents[dependency.Item1.Rule]; - for (int parent = parents.NextSetBit(0); parent >= 0; parent = parents.NextSetBit(parent + 1)) - { - if (parent < 0 || parent >= ruleVersions.Length || @checked.Get(parent)) - { - continue; - } - @checked.Set(parent); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, parent, "parent"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - if ((dependents & Dependents.Children) != 0) - { - BitSet children = relations.children[dependency.Item1.Rule]; - for (int child = children.NextSetBit(0); child >= 0; child = children.NextSetBit(child + 1)) - { - if (child < 0 || child >= ruleVersions.Length || @checked.Get(child)) - { - continue; - } - @checked.Set(child); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, child, "child"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - if ((dependents & Dependents.Ancestors) != 0) - { - BitSet ancestors = relations.GetAncestors(dependency.Item1.Rule); - for (int ancestor = ancestors.NextSetBit(0); ancestor >= 0; ancestor = ancestors.NextSetBit(ancestor + 1)) - { - if (ancestor < 0 || ancestor >= ruleVersions.Length || @checked.Get(ancestor)) - { - continue; - } - @checked.Set(ancestor); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, ancestor, "ancestor"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - if ((dependents & Dependents.Descendants) != 0) - { - BitSet descendants = relations.GetDescendants(dependency.Item1.Rule); - for (int descendant = descendants.NextSetBit(0); descendant >= 0; descendant = descendants.NextSetBit(descendant + 1)) - { - if (descendant < 0 || descendant >= ruleVersions.Length || @checked.Get(descendant)) - { - continue; - } - @checked.Set(descendant); - int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, descendant, "descendant"); - highestRequiredDependency = Math.Max(highestRequiredDependency, required); - } - } - int declaredVersion = dependency.Item1.Version; - if (declaredVersion > highestRequiredDependency) - { - string message = string.Format("Rule dependency version mismatch: {0} has maximum dependency version {1} (expected {2}) in {3}", ruleNames[dependency.Item1.Rule], highestRequiredDependency, declaredVersion, dependency.Item1.Recognizer.ToString()); - errors.AppendLine(dependency.Item2.ToString()); - errors.AppendLine(message); - } - } - if (errors.Length > 0) - { - throw new InvalidOperationException(errors.ToString()); - } - } - - private static readonly Dependents ImplementedDependents = Dependents.Self | Dependents.Parents | Dependents.Children | Dependents.Ancestors | Dependents.Descendants; - - private static void ReportUnimplementedDependents(StringBuilder errors, Tuple dependency, Dependents dependents) - { - Dependents unimplemented = dependents; - unimplemented &= ~ImplementedDependents; - if (unimplemented != Dependents.None) - { - string message = string.Format("Cannot validate the following dependents of rule {0}: {1}", dependency.Item1.Rule, unimplemented); - errors.AppendLine(message); - } - } - - private static int CheckDependencyVersion(StringBuilder errors, Tuple dependency, string[] ruleNames, int[] ruleVersions, int relatedRule, string relation) - { - string ruleName = ruleNames[dependency.Item1.Rule]; - string path; - if (relation == null) - { - path = ruleName; - } - else - { - string mismatchedRuleName = ruleNames[relatedRule]; - path = string.Format("rule {0} ({1} of {2})", mismatchedRuleName, relation, ruleName); - } - int declaredVersion = dependency.Item1.Version; - int actualVersion = ruleVersions[relatedRule]; - if (actualVersion > declaredVersion) - { - string message = string.Format("Rule dependency version mismatch: {0} has version {1} (expected <= {2}) in {3}", path, actualVersion, declaredVersion, dependency.Item1.Recognizer.ToString()); - errors.AppendLine(dependency.Item2.ToString()); - errors.AppendLine(message); - } - return actualVersion; - } - - private static int[] GetRuleVersions(Type recognizerClass, string[] ruleNames) - { - int[] versions = new int[ruleNames.Length]; -#if DOTNETCORE - FieldInfo[] fields = recognizerClass.GetTypeInfo().GetFields(); -#else - FieldInfo[] fields = recognizerClass.GetFields(); -#endif - foreach (FieldInfo field in fields) - { - bool isStatic = field.IsStatic; - bool isInteger = field.FieldType == typeof(int); - if (isStatic && isInteger && field.Name.StartsWith("RULE_")) - { - try - { - string name = field.Name.Substring("RULE_".Length); - if (name.Length == 0 || !System.Char.IsLower(name[0])) - { - continue; - } - int index = (int)field.GetValue(null); - if (index < 0 || index >= versions.Length) - { -#if false - object[] @params = new object[] { index, field.Name, recognizerClass.Name }; - Logger.Log(Level.Warning, "Rule index {0} for rule ''{1}'' out of bounds for recognizer {2}.", @params); -#endif - continue; - } - MethodInfo ruleMethod = GetRuleMethod(recognizerClass, name); - if (ruleMethod == null) - { -#if false - object[] @params = new object[] { name, recognizerClass.Name }; - Logger.Log(Level.Warning, "Could not find rule method for rule ''{0}'' in recognizer {1}.", @params); -#endif - continue; - } -#if DOTNETCORE - RuleVersionAttribute ruleVersion = ruleMethod.GetCustomAttribute(); -#else - RuleVersionAttribute ruleVersion = (RuleVersionAttribute)Attribute.GetCustomAttribute(ruleMethod, typeof(RuleVersionAttribute)); -#endif - int version = ruleVersion != null ? ruleVersion.Version : 0; - versions[index] = version; - } - catch (ArgumentException) - { -#if false - Logger.Log(Level.Warning, null, ex); -#else - throw; -#endif - } - catch (MemberAccessException) - { -#if false - Logger.Log(Level.Warning, null, ex); -#else - throw; -#endif - } - } - } - return versions; - } - - private static MethodInfo GetRuleMethod(Type recognizerClass, string name) - { -#if DOTNETCORE - MethodInfo[] declaredMethods = recognizerClass.GetTypeInfo().GetMethods(); -#else - MethodInfo[] declaredMethods = recognizerClass.GetMethods(); -#endif - foreach (MethodInfo method in declaredMethods) - { -#if DOTNETCORE - if (method.Name.Equals(name) && method.IsDefined(typeof(RuleVersionAttribute))) -#else - if (method.Name.Equals(name) && Attribute.IsDefined(method, typeof(RuleVersionAttribute))) -#endif - { - return method; - } - } - return null; - } - - private static string[] GetRuleNames(Type recognizerClass) - { -#if DOTNETCORE - FieldInfo ruleNames = recognizerClass.GetTypeInfo().GetField("ruleNames"); -#else - FieldInfo ruleNames = recognizerClass.GetField("ruleNames"); -#endif - return (string[])ruleNames.GetValue(null); - } - - public static IList> GetDependencies(Type clazz) - { - IList> result = new ArrayList>(); - -#if DOTNETCORE - GetElementDependencies(AsCustomAttributeProvider(clazz.GetTypeInfo()), result); -#else - GetElementDependencies(AsCustomAttributeProvider(clazz), result); -#endif -#if DOTNETCORE - foreach (ConstructorInfo ctor in clazz.GetTypeInfo().GetConstructors(AllDeclaredMembers)) -#else - foreach (ConstructorInfo ctor in clazz.GetConstructors(AllDeclaredMembers)) -#endif - { - GetElementDependencies(AsCustomAttributeProvider(ctor), result); - foreach (ParameterInfo parameter in ctor.GetParameters()) - GetElementDependencies(AsCustomAttributeProvider(parameter), result); - } - -#if DOTNETCORE - foreach (FieldInfo field in clazz.GetTypeInfo().GetFields(AllDeclaredMembers)) -#else - foreach (FieldInfo field in clazz.GetFields(AllDeclaredMembers)) -#endif - { - GetElementDependencies(AsCustomAttributeProvider(field), result); - } - -#if DOTNETCORE - foreach (MethodInfo method in clazz.GetTypeInfo().GetMethods(AllDeclaredMembers)) -#else - foreach (MethodInfo method in clazz.GetMethods(AllDeclaredMembers)) -#endif - { - GetElementDependencies(AsCustomAttributeProvider(method), result); -#if COMPACT - if (method.ReturnTypeCustomAttributes != null) - GetElementDependencies(AsCustomAttributeProvider(method.ReturnTypeCustomAttributes), result); -#else - if (method.ReturnParameter != null) - GetElementDependencies(AsCustomAttributeProvider(method.ReturnParameter), result); -#endif - - foreach (ParameterInfo parameter in method.GetParameters()) - GetElementDependencies(AsCustomAttributeProvider(parameter), result); - } - - return result; - } - - private static void GetElementDependencies(ICustomAttributeProvider annotatedElement, IList> result) - { - foreach (RuleDependencyAttribute dependency in annotatedElement.GetCustomAttributes(typeof(RuleDependencyAttribute), true)) - { - result.Add(Tuple.Create(dependency, annotatedElement)); - } - } - - private static RuleDependencyChecker.RuleRelations ExtractRuleRelations(Type recognizer) - { - string serializedATN = GetSerializedATN(recognizer); - if (serializedATN == null) - { - return null; - } - ATN atn = new ATNDeserializer().Deserialize(serializedATN.ToCharArray()); - RuleDependencyChecker.RuleRelations relations = new RuleDependencyChecker.RuleRelations(atn.ruleToStartState.Length); - foreach (ATNState state in atn.states) - { - if (!state.epsilonOnlyTransitions) - { - continue; - } - foreach (Transition transition in state.transitions) - { - if (transition.TransitionType != TransitionType.RULE) - { - continue; - } - RuleTransition ruleTransition = (RuleTransition)transition; - relations.AddRuleInvocation(state.ruleIndex, ruleTransition.target.ruleIndex); - } - } - return relations; - } - - private static string GetSerializedATN(Type recognizerClass) - { -#if DOTNETCORE - FieldInfo serializedAtnField = recognizerClass.GetTypeInfo().GetField("_serializedATN", AllDeclaredStaticMembers); -#else - FieldInfo serializedAtnField = recognizerClass.GetField("_serializedATN", AllDeclaredStaticMembers); -#endif - if (serializedAtnField != null) - return (string)serializedAtnField.GetValue(null); - -#if DOTNETCORE - if (recognizerClass.GetTypeInfo().BaseType != null) - return GetSerializedATN(recognizerClass.GetTypeInfo().BaseType); -#else - if (recognizerClass.BaseType != null) - return GetSerializedATN(recognizerClass.BaseType); -#endif - - return null; - } - - private sealed class RuleRelations - { - public readonly BitSet[] parents; - - public readonly BitSet[] children; - - public RuleRelations(int ruleCount) - { - parents = new BitSet[ruleCount]; - for (int i = 0; i < ruleCount; i++) - { - parents[i] = new BitSet(); - } - children = new BitSet[ruleCount]; - for (int i_1 = 0; i_1 < ruleCount; i_1++) - { - children[i_1] = new BitSet(); - } - } - - public bool AddRuleInvocation(int caller, int callee) - { - if (caller < 0) - { - // tokens rule - return false; - } - if (children[caller].Get(callee)) - { - // already added - return false; - } - children[caller].Set(callee); - parents[callee].Set(caller); - return true; - } - - public BitSet GetAncestors(int rule) - { - BitSet ancestors = new BitSet(); - ancestors.Or(parents[rule]); - while (true) - { - int cardinality = ancestors.Cardinality(); - for (int i = ancestors.NextSetBit(0); i >= 0; i = ancestors.NextSetBit(i + 1)) - { - ancestors.Or(parents[i]); - } - if (ancestors.Cardinality() == cardinality) - { - // nothing changed - break; - } - } - return ancestors; - } - - public BitSet GetDescendants(int rule) - { - BitSet descendants = new BitSet(); - descendants.Or(children[rule]); - while (true) - { - int cardinality = descendants.Cardinality(); - for (int i = descendants.NextSetBit(0); i >= 0; i = descendants.NextSetBit(i + 1)) - { - descendants.Or(children[i]); - } - if (descendants.Cardinality() == cardinality) - { - // nothing changed - break; - } - } - return descendants; - } - } - - private RuleDependencyChecker() - { - } - -#if PORTABLE - public interface ICustomAttributeProvider - { - object[] GetCustomAttributes(Type attributeType, bool inherit); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(Type type) - { - return new TypeCustomAttributeProvider(type); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(MethodBase method) - { - return new MethodBaseCustomAttributeProvider(method); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(ParameterInfo parameter) - { - return new ParameterInfoCustomAttributeProvider(parameter); - } - - protected static ICustomAttributeProvider AsCustomAttributeProvider(FieldInfo field) - { - return new FieldInfoCustomAttributeProvider(field); - } - - protected sealed class TypeCustomAttributeProvider : ICustomAttributeProvider - { - private readonly Type _provider; - - public TypeCustomAttributeProvider(Type provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return Attribute.GetCustomAttributes(_provider, attributeType, inherit); - } - } - - protected sealed class MethodBaseCustomAttributeProvider : ICustomAttributeProvider - { - private readonly MethodBase _provider; - - public MethodBaseCustomAttributeProvider(MethodBase provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return Attribute.GetCustomAttributes(_provider, attributeType, inherit); - } - } - - protected sealed class ParameterInfoCustomAttributeProvider : ICustomAttributeProvider - { - private readonly ParameterInfo _provider; - - public ParameterInfoCustomAttributeProvider(ParameterInfo provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return Attribute.GetCustomAttributes(_provider, attributeType, inherit); - } - } - - protected sealed class FieldInfoCustomAttributeProvider : ICustomAttributeProvider - { - private readonly FieldInfo _provider; - - public FieldInfoCustomAttributeProvider(FieldInfo provider) - { - _provider = provider; - } - - public object[] GetCustomAttributes(Type attributeType, bool inherit) - { - return Attribute.GetCustomAttributes(_provider, attributeType, inherit); - } - } -#else - protected static ICustomAttributeProvider AsCustomAttributeProvider(ICustomAttributeProvider obj) - { - return obj; - } -#endif - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs deleted file mode 100644 index 1a84599291..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -using System; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; - -// General Information about an assembly is controlled through the following -// set of attributes. Change these attribute values to modify the information -// associated with an assembly. -[assembly: AssemblyTitle("Antlr4.Runtime")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("Antlr organization")] -[assembly: AssemblyProduct("Antlr4.Runtime")] -[assembly: AssemblyCopyright("Antlr organization")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] -[assembly: CLSCompliant(true)] - -#if !PORTABLE || NET45PLUS -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from -// COM, set the ComVisible attribute to true on that type. -[assembly: ComVisible(false)] - -#if !PORTABLE -// The following GUID is for the ID of the typelib if this project is exposed to COM -[assembly: Guid("bc228eb9-e79c-4e5a-a1b9-0434ea566bab")] -#endif -#endif - -// Version information for an assembly consists of the following four values: -// -// Major Version -// Minor Version -// Build Number -// Revision -// -// You can specify all the values or you can default the Build and Revision Numbers -// by using the '*' as shown below: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("4.8")] -#if !COMPACT -[assembly: AssemblyFileVersion("4.8")] -[assembly: AssemblyInformationalVersion("4.8")] -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Collections.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Collections.cs deleted file mode 100644 index d39298f154..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Collections.cs +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -namespace Antlr4.Runtime.Sharpen -{ - using System; - using System.Collections.Generic; - using System.Collections.ObjectModel; - - internal static class Collections - { - public static T[] EmptyList() - { - return EmptyListImpl.Instance; - } - -#if NET45PLUS - public static ReadOnlyDictionary EmptyMap() -#else - public static IDictionary EmptyMap() -#endif - { - return EmptyMapImpl.Instance; - } - - public static ReadOnlyCollection SingletonList(T item) - { - return new ReadOnlyCollection(new T[] { item }); - } - -#if NET45PLUS - public static ReadOnlyDictionary SingletonMap(TKey key, TValue value) -#else - public static IDictionary SingletonMap(TKey key, TValue value) -#endif - { -#if NET45PLUS - return new ReadOnlyDictionary(new Dictionary { { key, value } }); -#else - return new Dictionary { { key, value } }; -#endif - } - - private static class EmptyListImpl - { - public static readonly T[] Instance = new T[0]; - } - - private static class EmptyMapImpl - { -#if NET45PLUS - public static readonly ReadOnlyDictionary Instance = - new ReadOnlyDictionary(new Dictionary()); -#else - public static IDictionary Instance - { - get - { - return new Dictionary(); - } - } -#endif - } - } - -} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/CollectionDebuggerView.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/CollectionDebuggerView.cs deleted file mode 100644 index b1c0c8664e..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/CollectionDebuggerView.cs +++ /dev/null @@ -1,83 +0,0 @@ -// -// CollectionDebuggerView.cs -// -// Authors: -// Marek Safar -// -// Copyright (C) 2009 Novell, Inc (http://www.novell.com) -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -#if !NET40PLUS || (PORTABLE && !WINRT) - -using System; -using System.Collections.Generic; -using System.Diagnostics; - -namespace Antlr4.Runtime.Sharpen -{ - // - // Custom debugger type proxy to display collections as arrays - // - internal sealed class CollectionDebuggerView - { - readonly ICollection c; - - public CollectionDebuggerView (ICollection col) - { - this.c = col; - } - -#if !COMPACT - [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)] -#endif - public T[] Items { - get { - var o = new T [c.Count]; - c.CopyTo (o, 0); - return o; - } - } - } - - internal sealed class CollectionDebuggerView - { - readonly ICollection> c; - - public CollectionDebuggerView (ICollection> col) - { - this.c = col; - } - -#if !COMPACT - [DebuggerBrowsable (DebuggerBrowsableState.RootHidden)] -#endif - public KeyValuePair[] Items { - get { - var o = new KeyValuePair [c.Count]; - c.CopyTo (o, 0); - return o; - } - } - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/ConcurrentDictionary.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/ConcurrentDictionary.cs deleted file mode 100644 index a79467d866..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/ConcurrentDictionary.cs +++ /dev/null @@ -1,468 +0,0 @@ -// ConcurrentDictionary.cs -// -// Copyright (c) 2009 Jérémie "Garuma" Laval -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -// -// - -#if !NET40PLUS || (PORTABLE && !WINRT) - -using System; -using System.Threading; -using System.Collections; -using System.Collections.Generic; -using System.Collections.ObjectModel; -using System.Runtime.Serialization; -using System.Diagnostics; - -// declare the namespace so Sharpen-generated using declarations will not produce errors -namespace System.Collections.Concurrent -{ -} - -namespace Antlr4.Runtime.Sharpen -{ -#if !COMPACT - [DebuggerDisplay ("Count={Count}")] - [DebuggerTypeProxy (typeof (CollectionDebuggerView<,>))] -#endif - public class ConcurrentDictionary : IDictionary, - ICollection>, IEnumerable>, - IDictionary, ICollection, IEnumerable - { - IEqualityComparer comparer; - - SplitOrderedList> internalDictionary; - - public ConcurrentDictionary () : this (EqualityComparer.Default) - { - } - - public ConcurrentDictionary (IEnumerable> collection) - : this (collection, EqualityComparer.Default) - { - } - - public ConcurrentDictionary (IEqualityComparer comparer) - { - this.comparer = comparer; - this.internalDictionary = new SplitOrderedList> (comparer); - } - - public ConcurrentDictionary (IEnumerable> collection, IEqualityComparer comparer) - : this (comparer) - { - foreach (KeyValuePair pair in collection) - Add (pair.Key, pair.Value); - } - - // Parameters unused - public ConcurrentDictionary (int concurrencyLevel, int capacity) - : this (EqualityComparer.Default) - { - - } - - public ConcurrentDictionary (int concurrencyLevel, - IEnumerable> collection, - IEqualityComparer comparer) - : this (collection, comparer) - { - - } - - // Parameters unused - public ConcurrentDictionary (int concurrencyLevel, int capacity, IEqualityComparer comparer) - : this (comparer) - { - - } - - void CheckKey (TKey key) - { - if (key == null) - throw new ArgumentNullException ("key"); - } - - void Add (TKey key, TValue value) - { - while (!TryAdd (key, value)); - } - - void IDictionary.Add (TKey key, TValue value) - { - Add (key, value); - } - - public bool TryAdd (TKey key, TValue value) - { - CheckKey (key); - return internalDictionary.Insert (Hash (key), key, Make (key, value)); - } - - void ICollection>.Add (KeyValuePair pair) - { - Add (pair.Key, pair.Value); - } - - public TValue AddOrUpdate (TKey key, Func addValueFactory, Func updateValueFactory) - { - CheckKey (key); - if (addValueFactory == null) - throw new ArgumentNullException ("addValueFactory"); - if (updateValueFactory == null) - throw new ArgumentNullException ("updateValueFactory"); - return internalDictionary.InsertOrUpdate (Hash (key), - key, - () => Make (key, addValueFactory (key)), - (e) => Make (key, updateValueFactory (key, e.Value))).Value; - } - - public TValue AddOrUpdate (TKey key, TValue addValue, Func updateValueFactory) - { - return AddOrUpdate (key, (_) => addValue, updateValueFactory); - } - - TValue AddOrUpdate (TKey key, TValue addValue, TValue updateValue) - { - CheckKey (key); - return internalDictionary.InsertOrUpdate (Hash (key), - key, - Make (key, addValue), - Make (key, updateValue)).Value; - } - - TValue GetValue (TKey key) - { - TValue temp; - if (!TryGetValue (key, out temp)) - throw new KeyNotFoundException (key.ToString ()); - return temp; - } - - public bool TryGetValue (TKey key, out TValue value) - { - CheckKey (key); - KeyValuePair pair; - bool result = internalDictionary.Find (Hash (key), key, out pair); - value = pair.Value; - - return result; - } - - public bool TryUpdate (TKey key, TValue newValue, TValue comparisonValue) - { - CheckKey (key); - return internalDictionary.CompareExchange (Hash (key), key, Make (key, newValue), (e) => e.Value.Equals (comparisonValue)); - } - - public TValue this[TKey key] { - get { - return GetValue (key); - } - set { - AddOrUpdate (key, value, value); - } - } - - public TValue GetOrAdd (TKey key, Func valueFactory) - { - CheckKey (key); - return internalDictionary.InsertOrGet (Hash (key), key, Make (key, default(TValue)), () => Make (key, valueFactory (key))).Value; - } - - public TValue GetOrAdd (TKey key, TValue value) - { - CheckKey (key); - return internalDictionary.InsertOrGet (Hash (key), key, Make (key, value), null).Value; - } - - public bool TryRemove (TKey key, out TValue value) - { - CheckKey (key); - KeyValuePair data; - bool result = internalDictionary.Delete (Hash (key), key, out data); - value = data.Value; - return result; - } - - bool Remove (TKey key) - { - TValue dummy; - - return TryRemove (key, out dummy); - } - - bool IDictionary.Remove (TKey key) - { - return Remove (key); - } - - bool ICollection>.Remove (KeyValuePair pair) - { - return Remove (pair.Key); - } - - public bool ContainsKey (TKey key) - { - CheckKey (key); - KeyValuePair dummy; - return internalDictionary.Find (Hash (key), key, out dummy); - } - - bool IDictionary.Contains (object key) - { - if (!(key is TKey)) - return false; - - return ContainsKey ((TKey)key); - } - - void IDictionary.Remove (object key) - { - if (!(key is TKey)) - return; - - Remove ((TKey)key); - } - - object IDictionary.this [object key] - { - get { - if (!(key is TKey)) - throw new ArgumentException ("key isn't of correct type", "key"); - - return this[(TKey)key]; - } - set { - if (!(key is TKey) || !(value is TValue)) - throw new ArgumentException ("key or value aren't of correct type"); - - this[(TKey)key] = (TValue)value; - } - } - - void IDictionary.Add (object key, object value) - { - if (!(key is TKey) || !(value is TValue)) - throw new ArgumentException ("key or value aren't of correct type"); - - Add ((TKey)key, (TValue)value); - } - - bool ICollection>.Contains (KeyValuePair pair) - { - return ContainsKey (pair.Key); - } - - public KeyValuePair[] ToArray () - { - // This is most certainly not optimum but there is - // not a lot of possibilities - - return new List> (this).ToArray (); - } - - public void Clear() - { - // Pronk - internalDictionary = new SplitOrderedList> (comparer); - } - - public int Count { - get { - return internalDictionary.Count; - } - } - - public bool IsEmpty { - get { - return Count == 0; - } - } - - bool ICollection>.IsReadOnly { - get { - return false; - } - } - - bool IDictionary.IsReadOnly { - get { - return false; - } - } - - public ICollection Keys { - get { - return GetPart ((kvp) => kvp.Key); - } - } - - public ICollection Values { - get { - return GetPart ((kvp) => kvp.Value); - } - } - - ICollection IDictionary.Keys { - get { - return (ICollection)Keys; - } - } - - ICollection IDictionary.Values { - get { - return (ICollection)Values; - } - } - - ICollection GetPart (Func, T> extractor) - { - List temp = new List (); - - foreach (KeyValuePair kvp in this) - temp.Add (extractor (kvp)); - - return new ReadOnlyCollection(temp); - } - - void ICollection.CopyTo (Array array, int startIndex) - { - KeyValuePair[] arr = array as KeyValuePair[]; - if (arr == null) - return; - - CopyTo (arr, startIndex, Count); - } - - void CopyTo (KeyValuePair[] array, int startIndex) - { - CopyTo (array, startIndex, Count); - } - - void ICollection>.CopyTo (KeyValuePair[] array, int startIndex) - { - CopyTo (array, startIndex); - } - - void CopyTo (KeyValuePair[] array, int startIndex, int num) - { - foreach (var kvp in this) { - array [startIndex++] = kvp; - - if (--num <= 0) - return; - } - } - - public IEnumerator> GetEnumerator () - { - return GetEnumeratorInternal (); - } - - IEnumerator IEnumerable.GetEnumerator () - { - return (IEnumerator)GetEnumeratorInternal (); - } - - IEnumerator> GetEnumeratorInternal () - { - return internalDictionary.GetEnumerator (); - } - - IDictionaryEnumerator IDictionary.GetEnumerator () - { - return new ConcurrentDictionaryEnumerator (GetEnumeratorInternal ()); - } - - class ConcurrentDictionaryEnumerator : IDictionaryEnumerator - { - IEnumerator> internalEnum; - - public ConcurrentDictionaryEnumerator (IEnumerator> internalEnum) - { - this.internalEnum = internalEnum; - } - - public bool MoveNext () - { - return internalEnum.MoveNext (); - } - - public void Reset () - { - internalEnum.Reset (); - } - - public object Current { - get { - return Entry; - } - } - - public DictionaryEntry Entry { - get { - KeyValuePair current = internalEnum.Current; - return new DictionaryEntry (current.Key, current.Value); - } - } - - public object Key { - get { - return internalEnum.Current.Key; - } - } - - public object Value { - get { - return internalEnum.Current.Value; - } - } - } - - object ICollection.SyncRoot { - get { - return this; - } - } - - bool IDictionary.IsFixedSize { - get { - return false; - } - } - - bool ICollection.IsSynchronized { - get { return true; } - } - - static KeyValuePair Make (U key, V value) - { - return new KeyValuePair (key, value); - } - - uint Hash (TKey key) - { - return (uint)comparer.GetHashCode (key); - } - } -} -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Funcs.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Funcs.cs deleted file mode 100644 index 9b1c974b66..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Funcs.cs +++ /dev/null @@ -1,41 +0,0 @@ -// -// System.Func.cs -// -// Authors: -// Alejandro Serrano "Serras" (trupill@yahoo.es) -// Marek Safar (marek.safar@gmail.com) -// - -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR TArg PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// - -#if !NET35PLUS - -using System.Runtime.CompilerServices; - -namespace Antlr4.Runtime.Sharpen { - - public delegate TResult Func (); - - public delegate TResult Func (T arg); - - public delegate TResult Func (T1 arg1, T2 arg2); -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/IStructuralComparable.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/IStructuralComparable.cs deleted file mode 100644 index 0f6b372054..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/IStructuralComparable.cs +++ /dev/null @@ -1,41 +0,0 @@ -// -// IStructuralComparable.cs -// -// Authors: -// Zoltan Varga (vargaz@gmail.com) -// -// Copyright (C) 2009 Novell -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -#if !NET40PLUS - -using System; -using System.Collections; - -namespace Antlr4.Runtime.Sharpen -{ - internal interface IStructuralComparable { - int CompareTo (object other, IComparer comparer); - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/IStructuralEquatable.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/IStructuralEquatable.cs deleted file mode 100644 index 7e8cb1605c..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/IStructuralEquatable.cs +++ /dev/null @@ -1,43 +0,0 @@ -// -// IStructuralEquatable.cs -// -// Authors: -// Zoltan Varga (vargaz@gmail.com) -// -// Copyright (C) 2009 Novell -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -#if !NET40PLUS - -using System; -using System.Collections; - -namespace Antlr4.Runtime.Sharpen -{ - internal interface IStructuralEquatable { - bool Equals (object other, IEqualityComparer comparer); - - int GetHashCode (IEqualityComparer comparer); - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/SerializableAttribute.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/SerializableAttribute.cs deleted file mode 100644 index 72b66c7fbe..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/SerializableAttribute.cs +++ /dev/null @@ -1,17 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#if PORTABLE || DOTNETCORE - -namespace System -{ - [AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Enum | AttributeTargets.Delegate, Inherited = false)] - internal sealed class SerializableAttribute : Attribute - { - } -} - -#endif - diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/SplitOrderedList.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/SplitOrderedList.cs deleted file mode 100644 index 4f9874b05b..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/SplitOrderedList.cs +++ /dev/null @@ -1,551 +0,0 @@ -// SplitOrderedList.cs -// -// Copyright (c) 2010 Jérémie "Garuma" Laval -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -// -// - -#if !NET40PLUS || (PORTABLE && !WINRT) - -using System; -using System.Threading; -using System.Collections; -using System.Collections.Generic; - -namespace Antlr4.Runtime.Sharpen -{ - internal class SplitOrderedList - { - class Node - { - public bool Marked; - public ulong Key; - public TKey SubKey; - public T Data; - public Node Next; - - public Node Init (ulong key, TKey subKey, T data) - { - this.Key = key; - this.SubKey = subKey; - this.Data = data; - - this.Marked = false; - this.Next = null; - - return this; - } - - // Used to create dummy node - public Node Init (ulong key) - { - this.Key = key; - this.Data = default (T); - - this.Next = null; - this.Marked = false; - this.SubKey = default (TKey); - - return this; - } - - // Used to create marked node - public Node Init (Node wrapped) - { - this.Marked = true; - this.Next = wrapped; - - this.Key = 0; - this.Data = default (T); - this.SubKey = default (TKey); - - return this; - } - } - - const int MaxLoad = 5; - const uint BucketSize = 512; - - Node head; - Node tail; - - Node[] buckets = new Node [BucketSize]; - int count; - int size = 2; - - SimpleRwLock slim = new SimpleRwLock (); - - readonly IEqualityComparer comparer; - - public SplitOrderedList (IEqualityComparer comparer) - { - this.comparer = comparer; - head = new Node ().Init (0); - tail = new Node ().Init (ulong.MaxValue); - head.Next = tail; - SetBucket (0, head); - } - - public int Count { - get { - return count; - } - } - - public T InsertOrUpdate (uint key, TKey subKey, Func addGetter, Func updateGetter) - { - Node current; - bool result = InsertInternal (key, subKey, default (T), addGetter, out current); - - if (result) - return current.Data; - - // FIXME: this should have a CAS-like behavior - return current.Data = updateGetter (current.Data); - } - - public T InsertOrUpdate (uint key, TKey subKey, T addValue, T updateValue) - { - Node current; - if (InsertInternal (key, subKey, addValue, null, out current)) - return current.Data; - - // FIXME: this should have a CAS-like behavior - return current.Data = updateValue; - } - - public bool Insert (uint key, TKey subKey, T data) - { - Node current; - return InsertInternal (key, subKey, data, null, out current); - } - - public T InsertOrGet (uint key, TKey subKey, T data, Func dataCreator) - { - Node current; - InsertInternal (key, subKey, data, dataCreator, out current); - return current.Data; - } - - bool InsertInternal (uint key, TKey subKey, T data, Func dataCreator, out Node current) - { - Node node = new Node ().Init (ComputeRegularKey (key), subKey, data); - - uint b = key % (uint)size; - Node bucket; - - if ((bucket = GetBucket (b)) == null) - bucket = InitializeBucket (b); - - if (!ListInsert (node, bucket, out current, dataCreator)) - return false; - - int csize = size; - if (Interlocked.Increment (ref count) / csize > MaxLoad && (csize & 0x40000000) == 0) - Interlocked.CompareExchange (ref size, 2 * csize, csize); - - current = node; - - return true; - } - - public bool Find (uint key, TKey subKey, out T data) - { - Node node; - uint b = key % (uint)size; - data = default (T); - Node bucket; - - if ((bucket = GetBucket (b)) == null) - bucket = InitializeBucket (b); - - if (!ListFind (ComputeRegularKey (key), subKey, bucket, out node)) - return false; - - data = node.Data; - - return !node.Marked; - } - - public bool CompareExchange (uint key, TKey subKey, T data, Func check) - { - Node node; - uint b = key % (uint)size; - Node bucket; - - if ((bucket = GetBucket (b)) == null) - bucket = InitializeBucket (b); - - if (!ListFind (ComputeRegularKey (key), subKey, bucket, out node)) - return false; - - if (!check (node.Data)) - return false; - - node.Data = data; - - return true; - } - - public bool Delete (uint key, TKey subKey, out T data) - { - uint b = key % (uint)size; - Node bucket; - - if ((bucket = GetBucket (b)) == null) - bucket = InitializeBucket (b); - - if (!ListDelete (bucket, ComputeRegularKey (key), subKey, out data)) - return false; - - Interlocked.Decrement (ref count); - return true; - } - - public IEnumerator GetEnumerator () - { - Node node = head.Next; - - while (node != tail) { - while (node.Marked || (node.Key & 1) == 0) { - node = node.Next; - if (node == tail) - yield break; - } - yield return node.Data; - node = node.Next; - } - } - - Node InitializeBucket (uint b) - { - Node current; - uint parent = GetParent (b); - Node bucket; - - if ((bucket = GetBucket (parent)) == null) - bucket = InitializeBucket (parent); - - Node dummy = new Node ().Init (ComputeDummyKey (b)); - if (!ListInsert (dummy, bucket, out current, null)) - return current; - - return SetBucket (b, dummy); - } - - // Turn v's MSB off - static uint GetParent (uint v) - { - uint t, tt; - - // Find MSB position in v - var pos = (tt = v >> 16) > 0 ? - (t = tt >> 8) > 0 ? 24 + logTable[t] : 16 + logTable[tt] : - (t = v >> 8) > 0 ? 8 + logTable[t] : logTable[v]; - - return (uint)(v & ~(1 << pos)); - } - - // Reverse integer bits and make sure LSB is set - static ulong ComputeRegularKey (uint key) - { - return ComputeDummyKey (key) | 1; - } - - // Reverse integer bits - static ulong ComputeDummyKey (uint key) - { - return ((ulong)(((uint)reverseTable[key & 0xff] << 24) | - ((uint)reverseTable[(key >> 8) & 0xff] << 16) | - ((uint)reverseTable[(key >> 16) & 0xff] << 8) | - ((uint)reverseTable[(key >> 24) & 0xff]))) << 1; - } - - // Bucket storage is abstracted in a simple two-layer tree to avoid too much memory resize - Node GetBucket (uint index) - { - if (index >= buckets.Length) - return null; - return buckets[index]; - } - - Node SetBucket (uint index, Node node) - { - try { - slim.EnterReadLock (); - CheckSegment (index, true); - - Interlocked.CompareExchange (ref buckets[index], node, null); - return buckets[index]; - } finally { - slim.ExitReadLock (); - } - } - - // When we run out of space for bucket storage, we use a lock-based array resize - void CheckSegment (uint segment, bool readLockTaken) - { - if (segment < buckets.Length) - return; - - if (readLockTaken) - slim.ExitReadLock (); - try { - slim.EnterWriteLock (); - while (segment >= buckets.Length) - Array.Resize (ref buckets, buckets.Length * 2); - } finally { - slim.ExitWriteLock (); - } - if (readLockTaken) - slim.EnterReadLock (); - } - - Node ListSearch (ulong key, TKey subKey, ref Node left, Node h) - { - Node leftNodeNext = null, rightNode = null; - - do { - Node t = h; - Node tNext = t.Next; - do { - if (!tNext.Marked) { - left = t; - leftNodeNext = tNext; - } - t = tNext.Marked ? tNext.Next : tNext; - if (t == tail) - break; - - tNext = t.Next; - } while (tNext.Marked || t.Key < key || (tNext.Key == key && !comparer.Equals (subKey, t.SubKey))); - - rightNode = t; - - if (leftNodeNext == rightNode) { - if (rightNode != tail && rightNode.Next.Marked) - continue; - else - return rightNode; - } - - if (Interlocked.CompareExchange (ref left.Next, rightNode, leftNodeNext) == leftNodeNext) { - if (rightNode != tail && rightNode.Next.Marked) - continue; - else - return rightNode; - } - } while (true); - } - - bool ListDelete (Node startPoint, ulong key, TKey subKey, out T data) - { - Node rightNode = null, rightNodeNext = null, leftNode = null; - data = default (T); - Node markedNode = null; - - do { - rightNode = ListSearch (key, subKey, ref leftNode, startPoint); - if (rightNode == tail || rightNode.Key != key || !comparer.Equals (subKey, rightNode.SubKey)) - return false; - - data = rightNode.Data; - rightNodeNext = rightNode.Next; - - if (!rightNodeNext.Marked) { - if (markedNode == null) - markedNode = new Node (); - markedNode.Init (rightNodeNext); - - if (Interlocked.CompareExchange (ref rightNode.Next, markedNode, rightNodeNext) == rightNodeNext) - break; - } - } while (true); - - if (Interlocked.CompareExchange (ref leftNode.Next, rightNodeNext, rightNode) != rightNode) - ListSearch (rightNode.Key, subKey, ref leftNode, startPoint); - - return true; - } - - bool ListInsert (Node newNode, Node startPoint, out Node current, Func dataCreator) - { - ulong key = newNode.Key; - Node rightNode = null, leftNode = null; - - do { - rightNode = current = ListSearch (key, newNode.SubKey, ref leftNode, startPoint); - if (rightNode != tail && rightNode.Key == key && comparer.Equals (newNode.SubKey, rightNode.SubKey)) - return false; - - newNode.Next = rightNode; - if (dataCreator != null) - newNode.Data = dataCreator (); - if (Interlocked.CompareExchange (ref leftNode.Next, newNode, rightNode) == rightNode) - return true; - } while (true); - } - - bool ListFind (ulong key, TKey subKey, Node startPoint, out Node data) - { - Node rightNode = null, leftNode = null; - data = null; - - rightNode = ListSearch (key, subKey, ref leftNode, startPoint); - data = rightNode; - - return rightNode != tail && rightNode.Key == key && comparer.Equals (subKey, rightNode.SubKey); - } - - static readonly byte[] reverseTable = { - 0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208, 48, 176, 112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216, 56, 184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148, 84, 212, 52, 180, 116, 244, 12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82, 210, 50, 178, 114, 242, 10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134, 70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246, 14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81, 209, 49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229, 21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173, 109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195, 35, 163, 99, 227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139, 75, 203, 43, 171, 107, 235, 27, 155, 91, 219, 59, 187, 123, 251, 7, 135, 71, 199, 39, 167, 103, 231, 23, 151, 87, 215, 55, 183, 119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159, 95, 223, 63, 191, 127, 255 - }; - - static readonly byte[] logTable = { - 0xFF, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - }; - - struct SimpleRwLock - { - const int RwWait = 1; - const int RwWrite = 2; - const int RwRead = 4; - - int rwlock; - - public void EnterReadLock () - { - SpinWait sw = new SpinWait (); - do { - while ((rwlock & (RwWrite | RwWait)) > 0) - sw.SpinOnce (); - -#if COMPACT - if ((InterlockedAdd (ref rwlock, RwRead) & (RwWait | RwWait)) == 0) - return; - - InterlockedAdd (ref rwlock, -RwRead); -#else - if ((Interlocked.Add (ref rwlock, RwRead) & (RwWait | RwWait)) == 0) - return; - - Interlocked.Add (ref rwlock, -RwRead); -#endif - } while (true); - } - - public void ExitReadLock () - { -#if COMPACT - InterlockedAdd (ref rwlock, -RwRead); -#else - Interlocked.Add (ref rwlock, -RwRead); -#endif - } - - public void EnterWriteLock () - { - SpinWait sw = new SpinWait (); - do { - int state = rwlock; - if (state < RwWrite) { - if (Interlocked.CompareExchange (ref rwlock, RwWrite, state) == state) - return; - state = rwlock; - } - // We register our interest in taking the Write lock (if upgradeable it's already done) - while ((state & RwWait) == 0 && Interlocked.CompareExchange (ref rwlock, state | RwWait, state) != state) - state = rwlock; - // Before falling to sleep - while (rwlock > RwWait) - sw.SpinOnce (); - } while (true); - } - - public void ExitWriteLock () - { -#if COMPACT - InterlockedAdd (ref rwlock, -RwWrite); -#else - Interlocked.Add (ref rwlock, -RwWrite); -#endif - } - -#if COMPACT - /// - /// Adds two 32-bit integers and replaces the first integer with the sum, as an atomic operation. - /// - /// A variable containing the first value to be added. The sum of the two values is stored in . - /// The value to be added to the integer at . - /// The new value stored at . - private static int InterlockedAdd(ref int location1, int value) - { -#if false // the code calling this private method will never make use of this optimization - if (value == 1) - return Interlocked.Increment(ref location1); - else if (value == -1) - return Interlocked.Decrement(ref location1); -#endif - - while (true) - { - int previous = location1; - if (Interlocked.CompareExchange(ref location1, previous + value, previous) == previous) - return previous + value; - } - } -#endif - } - } - - internal struct SpinWait - { - // The number of step until SpinOnce yield on multicore machine - const int step = 10; - const int maxTime = 200; -#if !COMPACT - static readonly bool isSingleCpu = (Environment.ProcessorCount == 1); -#endif - - int ntime; - - public void SpinOnce () - { - ntime += 1; -#if COMPACT - Thread.Sleep(0); -#else - ManualResetEvent mre = new ManualResetEvent (false); - if (isSingleCpu) { - // On a single-CPU system, spinning does no good - mre.WaitOne (0); - } else { - if (ntime % step == 0) - mre.WaitOne (0); - else - // Multi-CPU system might be hyper-threaded, let other thread run - mre.WaitOne (Math.Min (ntime, maxTime) << 1); - } -#endif - } - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Tuple.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Tuple.cs deleted file mode 100644 index ae7979d40d..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Tuple.cs +++ /dev/null @@ -1,115 +0,0 @@ -// -// Tuple.cs -// -// Authors: -// Zoltan Varga (vargaz@gmail.com) -// -// Copyright (C) 2009 Novell -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -#if !NET40PLUS - -using System; - -namespace Antlr4.Runtime.Sharpen -{ - internal static class Tuple - { - public static Tuple> Create - ( - T1 item1, - T2 item2, - T3 item3, - T4 item4, - T5 item5, - T6 item6, - T7 item7, - T8 item8) { - return new Tuple> (item1, item2, item3, item4, item5, item6, item7, new Tuple (item8)); - } - - public static Tuple Create - ( - T1 item1, - T2 item2, - T3 item3, - T4 item4, - T5 item5, - T6 item6, - T7 item7) { - return new Tuple (item1, item2, item3, item4, item5, item6, item7); - } - - public static Tuple Create - ( - T1 item1, - T2 item2, - T3 item3, - T4 item4, - T5 item5, - T6 item6) { - return new Tuple (item1, item2, item3, item4, item5, item6); - } - - public static Tuple Create - ( - T1 item1, - T2 item2, - T3 item3, - T4 item4, - T5 item5) { - return new Tuple (item1, item2, item3, item4, item5); - } - - public static Tuple Create - ( - T1 item1, - T2 item2, - T3 item3, - T4 item4) { - return new Tuple (item1, item2, item3, item4); - } - - public static Tuple Create - ( - T1 item1, - T2 item2, - T3 item3) { - return new Tuple (item1, item2, item3); - } - - public static Tuple Create - ( - T1 item1, - T2 item2) { - return new Tuple (item1, item2); - } - - public static Tuple Create - ( - T1 item1) { - return new Tuple (item1); - } - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Tuples.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Tuples.cs deleted file mode 100644 index dda3a9cce1..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Compat/Tuples.cs +++ /dev/null @@ -1,995 +0,0 @@ -// -// Tuples.cs -// -// Authors: -// Zoltan Varga (vargaz@gmail.com) -// Marek Safar (marek.safar@gmail.com) -// -// Copyright (C) 2009 Novell -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -#if !NET40PLUS - -using System; -using System.Collections; -using System.Collections.Generic; - -namespace Antlr4.Runtime.Sharpen -{ - internal partial class Tuple - { - public Tuple (T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, TRest rest) - { - this.item1 = item1; - this.item2 = item2; - this.item3 = item3; - this.item4 = item4; - this.item5 = item5; - this.item6 = item6; - this.item7 = item7; - this.rest = rest; - - bool ok = true; - if (!typeof (TRest).IsGenericType) - ok = false; - if (ok) { - Type t = typeof (TRest).GetGenericTypeDefinition (); - if (!(t == typeof (Tuple<>) || t == typeof (Tuple<,>) || t == typeof (Tuple<,,>) || t == typeof (Tuple<,,,>) || t == typeof (Tuple<,,,,>) || t == typeof (Tuple <,,,,,>) || t == typeof (Tuple<,,,,,,>) || t == typeof (Tuple<,,,,,,,>))) - ok = false; - } - if (!ok) - throw new ArgumentException ("rest", "The last element of an eight element tuple must be a Tuple."); - } - } - - /* The rest is generated by the script at the bottom */ - - [Serializable] - internal class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - - public Tuple (T1 item1) - { - this.item1 = item1; - } - - public T1 Item1 { - get { return item1; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - return comparer.Compare (item1, t.item1); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - return comparer.GetHashCode (item1); - } - - public override string ToString () - { - return String.Format ("({0})", item1); - } - } - - [Serializable] - public class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - - public Tuple (T1 item1, T2 item2) - { - this.item1 = item1; - this.item2 = item2; - } - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - return comparer.Compare (item2, t.item2); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1})", item1, item2); - } - } - - [Serializable] - internal class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - T3 item3; - - public Tuple (T1 item1, T2 item2, T3 item3) - { - this.item1 = item1; - this.item2 = item2; - this.item3 = item3; - } - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - public T3 Item3 { - get { return item3; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - res = comparer.Compare (item2, t.item2); - if (res != 0) return res; - return comparer.Compare (item3, t.item3); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2) && - comparer.Equals (item3, t.item3); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - h = (h << 5) - h + comparer.GetHashCode (item3); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1}, {2})", item1, item2, item3); - } - } - - [Serializable] - internal class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - T3 item3; - T4 item4; - - public Tuple (T1 item1, T2 item2, T3 item3, T4 item4) - { - this.item1 = item1; - this.item2 = item2; - this.item3 = item3; - this.item4 = item4; - } - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - public T3 Item3 { - get { return item3; } - } - - public T4 Item4 { - get { return item4; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - res = comparer.Compare (item2, t.item2); - if (res != 0) return res; - res = comparer.Compare (item3, t.item3); - if (res != 0) return res; - return comparer.Compare (item4, t.item4); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2) && - comparer.Equals (item3, t.item3) && - comparer.Equals (item4, t.item4); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - h = (h << 5) - h + comparer.GetHashCode (item3); - h = (h << 5) - h + comparer.GetHashCode (item4); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1}, {2}, {3})", item1, item2, item3, item4); - } - } - - [Serializable] - internal class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - T3 item3; - T4 item4; - T5 item5; - - public Tuple (T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) - { - this.item1 = item1; - this.item2 = item2; - this.item3 = item3; - this.item4 = item4; - this.item5 = item5; - } - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - public T3 Item3 { - get { return item3; } - } - - public T4 Item4 { - get { return item4; } - } - - public T5 Item5 { - get { return item5; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - res = comparer.Compare (item2, t.item2); - if (res != 0) return res; - res = comparer.Compare (item3, t.item3); - if (res != 0) return res; - res = comparer.Compare (item4, t.item4); - if (res != 0) return res; - return comparer.Compare (item5, t.item5); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2) && - comparer.Equals (item3, t.item3) && - comparer.Equals (item4, t.item4) && - comparer.Equals (item5, t.item5); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - h = (h << 5) - h + comparer.GetHashCode (item3); - h = (h << 5) - h + comparer.GetHashCode (item4); - h = (h << 5) - h + comparer.GetHashCode (item5); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1}, {2}, {3}, {4})", item1, item2, item3, item4, item5); - } - } - - [Serializable] - internal class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - T3 item3; - T4 item4; - T5 item5; - T6 item6; - - public Tuple (T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6) - { - this.item1 = item1; - this.item2 = item2; - this.item3 = item3; - this.item4 = item4; - this.item5 = item5; - this.item6 = item6; - } - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - public T3 Item3 { - get { return item3; } - } - - public T4 Item4 { - get { return item4; } - } - - public T5 Item5 { - get { return item5; } - } - - public T6 Item6 { - get { return item6; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - res = comparer.Compare (item2, t.item2); - if (res != 0) return res; - res = comparer.Compare (item3, t.item3); - if (res != 0) return res; - res = comparer.Compare (item4, t.item4); - if (res != 0) return res; - res = comparer.Compare (item5, t.item5); - if (res != 0) return res; - return comparer.Compare (item6, t.item6); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2) && - comparer.Equals (item3, t.item3) && - comparer.Equals (item4, t.item4) && - comparer.Equals (item5, t.item5) && - comparer.Equals (item6, t.item6); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - h = (h << 5) - h + comparer.GetHashCode (item3); - h = (h << 5) - h + comparer.GetHashCode (item4); - h = (h << 5) - h + comparer.GetHashCode (item5); - h = (h << 5) - h + comparer.GetHashCode (item6); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1}, {2}, {3}, {4}, {5})", item1, item2, item3, item4, item5, item6); - } - } - - [Serializable] - internal class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - T3 item3; - T4 item4; - T5 item5; - T6 item6; - T7 item7; - - public Tuple (T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7) - { - this.item1 = item1; - this.item2 = item2; - this.item3 = item3; - this.item4 = item4; - this.item5 = item5; - this.item6 = item6; - this.item7 = item7; - } - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - public T3 Item3 { - get { return item3; } - } - - public T4 Item4 { - get { return item4; } - } - - public T5 Item5 { - get { return item5; } - } - - public T6 Item6 { - get { return item6; } - } - - public T7 Item7 { - get { return item7; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - res = comparer.Compare (item2, t.item2); - if (res != 0) return res; - res = comparer.Compare (item3, t.item3); - if (res != 0) return res; - res = comparer.Compare (item4, t.item4); - if (res != 0) return res; - res = comparer.Compare (item5, t.item5); - if (res != 0) return res; - res = comparer.Compare (item6, t.item6); - if (res != 0) return res; - return comparer.Compare (item7, t.item7); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2) && - comparer.Equals (item3, t.item3) && - comparer.Equals (item4, t.item4) && - comparer.Equals (item5, t.item5) && - comparer.Equals (item6, t.item6) && - comparer.Equals (item7, t.item7); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - h = (h << 5) - h + comparer.GetHashCode (item3); - h = (h << 5) - h + comparer.GetHashCode (item4); - h = (h << 5) - h + comparer.GetHashCode (item5); - h = (h << 5) - h + comparer.GetHashCode (item6); - h = (h << 5) - h + comparer.GetHashCode (item7); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1}, {2}, {3}, {4}, {5}, {6})", item1, item2, item3, item4, item5, item6, item7); - } - } - - [Serializable] - internal partial class Tuple : IStructuralEquatable, IStructuralComparable, IComparable - { - T1 item1; - T2 item2; - T3 item3; - T4 item4; - T5 item5; - T6 item6; - T7 item7; - TRest rest; - - public T1 Item1 { - get { return item1; } - } - - public T2 Item2 { - get { return item2; } - } - - public T3 Item3 { - get { return item3; } - } - - public T4 Item4 { - get { return item4; } - } - - public T5 Item5 { - get { return item5; } - } - - public T6 Item6 { - get { return item6; } - } - - public T7 Item7 { - get { return item7; } - } - - public TRest Rest { - get { return rest; } - } - - int IComparable.CompareTo (object obj) - { - return ((IStructuralComparable) this).CompareTo (obj, Comparer.Default); - } - - int IStructuralComparable.CompareTo (object other, IComparer comparer) - { - var t = other as Tuple; - if (t == null) { - if (other == null) return 1; - throw new ArgumentException ("other"); - } - - int res = comparer.Compare (item1, t.item1); - if (res != 0) return res; - res = comparer.Compare (item2, t.item2); - if (res != 0) return res; - res = comparer.Compare (item3, t.item3); - if (res != 0) return res; - res = comparer.Compare (item4, t.item4); - if (res != 0) return res; - res = comparer.Compare (item5, t.item5); - if (res != 0) return res; - res = comparer.Compare (item6, t.item6); - if (res != 0) return res; - res = comparer.Compare (item7, t.item7); - if (res != 0) return res; - return comparer.Compare (rest, t.rest); - } - - public override bool Equals (object obj) - { - return ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default); - } - - bool IStructuralEquatable.Equals (object other, IEqualityComparer comparer) - { - var t = other as Tuple; - if (t == null) - return false; - - return comparer.Equals (item1, t.item1) && - comparer.Equals (item2, t.item2) && - comparer.Equals (item3, t.item3) && - comparer.Equals (item4, t.item4) && - comparer.Equals (item5, t.item5) && - comparer.Equals (item6, t.item6) && - comparer.Equals (item7, t.item7) && - comparer.Equals (rest, t.rest); - } - - public override int GetHashCode () - { - return ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default); - } - - int IStructuralEquatable.GetHashCode (IEqualityComparer comparer) - { - int h = comparer.GetHashCode (item1); - h = (h << 5) - h + comparer.GetHashCode (item2); - h = (h << 5) - h + comparer.GetHashCode (item3); - h = (h << 5) - h + comparer.GetHashCode (item4); - h = (h << 5) - h + comparer.GetHashCode (item5); - h = (h << 5) - h + comparer.GetHashCode (item6); - h = (h << 5) - h + comparer.GetHashCode (item7); - h = (h << 5) - h + comparer.GetHashCode (rest); - return h; - } - - public override string ToString () - { - return String.Format ("({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7})", item1, item2, item3, item4, item5, item6, item7, rest); - } - } - -} - -#endif - -#if FALSE - -// -// generator script -// - -using System; -using System.Text; - -public class TupleGen -{ - public static void Main () { - for (int arity = 1; arity < 9; ++arity) { - string type_name = GetTypeName (arity); - - Console.WriteLine ("\t[Serializable]"); - Console.Write ("\tpublic {0}class ", arity < 8 ? null : "partial "); - Console.Write (type_name); - Console.WriteLine (" : IStructuralEquatable, IStructuralComparable, IComparable"); - Console.WriteLine ("\t{"); - for (int i = 1; i <= arity; ++i) - Console.WriteLine ("\t\t{0} {1};", GetItemTypeName (i), GetItemName (i)); - - if (arity < 8) { - Console.WriteLine (); - Console.Write ("\t\tpublic Tuple ("); - for (int i = 1; i <= arity; ++i) { - Console.Write ("{0} {1}", GetItemTypeName (i), GetItemName (i)); - if (i < arity) - Console.Write (", "); - } - Console.WriteLine (")"); - Console.WriteLine ("\t\t{"); - for (int i = 1; i <= arity; ++i) - Console.WriteLine ("\t\t\t this.{0} = {0};", GetItemName (i)); - Console.WriteLine ("\t\t}"); - } - - for (int i = 1; i <= arity; ++i) { - Console.WriteLine (); - Console.WriteLine ("\t\tpublic {0} {1} {{", GetItemTypeName (i), - System.Globalization.CultureInfo.InvariantCulture.TextInfo.ToTitleCase (GetItemName (i))); - Console.Write ("\t\t\tget { "); - Console.WriteLine ("return {0}; }}", GetItemName (i)); - Console.WriteLine ("\t\t}"); - } - - Console.WriteLine (); - Console.WriteLine ("\t\tint IComparable.CompareTo (object obj)"); - Console.WriteLine ("\t\t{"); - Console.WriteLine ("\t\t\treturn ((IStructuralComparable) this).CompareTo (obj, Comparer.Default);"); - Console.WriteLine ("\t\t}"); - - Console.WriteLine (); - Console.WriteLine ("\t\tint IStructuralComparable.CompareTo (object other, IComparer comparer)"); - Console.WriteLine ("\t\t{"); - Console.WriteLine ("\t\t\tvar t = other as {0};", type_name); - Console.WriteLine ("\t\t\tif (t == null) {"); - Console.WriteLine ("\t\t\t\tif (other == null) return 1;"); - Console.WriteLine ("\t\t\t\tthrow new ArgumentException ("other");"); - Console.WriteLine ("\t\t\t}"); - Console.WriteLine (); - - for (int i = 1; i < arity; ++i) { - Console.Write ("\t\t\t"); - if (i == 1) - Console.Write ("int "); - - Console.WriteLine ("res = comparer.Compare ({0}, t.{0});", GetItemName (i)); - Console.WriteLine ("\t\t\tif (res != 0) return res;"); - } - Console.WriteLine ("\t\t\treturn comparer.Compare ({0}, t.{0});", GetItemName (arity)); - Console.WriteLine ("\t\t}"); - - Console.WriteLine (); - Console.WriteLine ("\t\tpublic override bool Equals (object obj)"); - Console.WriteLine ("\t\t{"); - Console.WriteLine ("\t\t\treturn ((IStructuralEquatable) this).Equals (obj, EqualityComparer.Default);"); - Console.WriteLine ("\t\t}"); - - Console.WriteLine (); - Console.WriteLine ("\t\tbool IStructuralEquatable.Equals (object other, IEqualityComparer comparer)"); - Console.WriteLine ("\t\t{"); - Console.WriteLine ("\t\t\tvar t = other as {0};", type_name); - Console.WriteLine ("\t\t\tif (t == null)"); - Console.WriteLine ("\t\t\t\treturn false;"); - Console.WriteLine (); - Console.Write ("\t\t\treturn"); - - for (int i = 1; i <= arity; ++i) { - if (i == 1) - Console.Write (" "); - else - Console.Write ("\t\t\t\t"); - - Console.Write ("comparer.Equals ({0}, t.{0})", GetItemName (i)); - if (i != arity) - Console.WriteLine (" &&"); - else - Console.WriteLine (";"); - } - Console.WriteLine ("\t\t}"); - - Console.WriteLine (); - Console.WriteLine ("\t\tpublic override int GetHashCode ()"); - Console.WriteLine ("\t\t{"); - Console.WriteLine ("\t\t\treturn ((IStructuralEquatable) this).GetHashCode (EqualityComparer.Default);"); - Console.WriteLine ("\t\t}"); - - Console.WriteLine (); - Console.WriteLine ("\t\tint IStructuralEquatable.GetHashCode (IEqualityComparer comparer)"); - Console.WriteLine ("\t\t{"); - if (arity == 1) { - Console.WriteLine ("\t\t\treturn comparer.GetHashCode ({0});", GetItemName (arity)); - } else { - Console.WriteLine ("\t\t\tint h = comparer.GetHashCode ({0});", GetItemName (1)); - for (int i = 2; i <= arity; ++i) - Console.WriteLine ("\t\t\th = (h << 5) - h + comparer.GetHashCode ({0});", GetItemName (i)); - Console.WriteLine ("\t\t\treturn h;"); - } - - Console.WriteLine ("\t\t}"); - - Console.WriteLine (); - Console.WriteLine ("\t\tpublic override string ToString ()"); - Console.WriteLine ("\t\t{"); - Console.Write ("\t\t\treturn String.Format (\"("); - for (int i = 1; i <= arity; ++i) { - Console.Write ("{" + (i - 1) + "}"); - if (i < arity) - Console.Write (", "); - } - Console.Write (")\", "); - for (int i = 1; i <= arity; ++i) { - Console.Write (GetItemName (i)); - if (i < arity) - Console.Write (", "); - } - Console.WriteLine (");"); - Console.WriteLine ("\t\t}"); - - Console.WriteLine ("\t}\n"); - } - } - - static string GetTypeName (int arity) - { - StringBuilder sb = new StringBuilder (); - sb.Append ("Tuple<"); - for (int i = 1; i <= arity; ++i) { - sb.Append (GetItemTypeName (i)); - if (i < arity) - sb.Append (", "); - } - sb.Append (">"); - - return sb.ToString (); - } - - static string GetItemName (int arity) - { - return arity < 8 ? "item" + arity.ToString () : "rest"; - } - - static string GetItemTypeName (int arity) - { - return arity < 8 ? "T" + arity.ToString () : "TRest"; - } -} - -#endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs deleted file mode 100644 index 515c9c0f9c..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -namespace Antlr4.Runtime.Tree -{ - public class ParseTreeWalker - { - public static readonly ParseTreeWalker Default = new ParseTreeWalker(); - - public virtual void Walk(IParseTreeListener listener, IParseTree t) - { - if (t is IErrorNode) - { - listener.VisitErrorNode((IErrorNode)t); - return; - } - else - { - if (t is ITerminalNode) - { - listener.VisitTerminal((ITerminalNode)t); - return; - } - } - IRuleNode r = (IRuleNode)t; - EnterRule(listener, r); - int n = r.ChildCount; - for (int i = 0; i < n; i++) - { - Walk(listener, r.GetChild(i)); - } - ExitRule(listener, r); - } - - /// - /// The discovery of a rule node, involves sending two events: the generic - /// - /// and a - /// - /// -specific event. First we trigger the generic and then - /// the rule specific. We to them in reverse order upon finishing the node. - /// - protected internal virtual void EnterRule(IParseTreeListener listener, IRuleNode r) - { - ParserRuleContext ctx = (ParserRuleContext)r.RuleContext; - listener.EnterEveryRule(ctx); - ctx.EnterRule(listener); - } - - protected internal virtual void ExitRule(IParseTreeListener listener, IRuleNode r) - { - ParserRuleContext ctx = (ParserRuleContext)r.RuleContext; - ctx.ExitRule(listener); - listener.ExitEveryRule(ctx); - } - } -} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs deleted file mode 100644 index abe5a1239c..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs +++ /dev/null @@ -1,148 +0,0 @@ -//------------------------------------------------------------------------------ -// -// This code was generated by a tool. -// ANTLR Version: 4.7 -// -// Changes to this file may cause incorrect behavior and will be lost if -// the code is regenerated. -// -//------------------------------------------------------------------------------ - -// Unreachable code detected -#pragma warning disable 0162 -// The variable '...' is assigned but its value is never used -#pragma warning disable 0219 -// Missing XML comment for publicly visible type or member '...' -#pragma warning disable 1591 -// Ambiguous reference in cref attribute -#pragma warning disable 419 - -using System; -using System.Text; -using Antlr4.Runtime; -using Antlr4.Runtime.Atn; -using Antlr4.Runtime.Misc; -using DFA = Antlr4.Runtime.Dfa.DFA; - -[System.CodeDom.Compiler.GeneratedCode("ANTLR", "4.8")] -[System.CLSCompliant(false)] -public partial class XPathLexer : Lexer -{ - protected static DFA[] decisionToDFA; - protected static PredictionContextCache sharedContextCache = new PredictionContextCache(); - public const int - TokenRef = 1, RuleRef = 2, Anywhere = 3, Root = 4, Wildcard = 5, Bang = 6, ID = 7, String = 8; - public static string[] channelNames = { - "DEFAULT_TOKEN_CHANNEL", "HIDDEN" - }; - - public static string[] modeNames = { - "DEFAULT_MODE" - }; - - public static readonly string[] ruleNames = { - "Anywhere", "Root", "Wildcard", "Bang", "ID", "NameChar", "NameStartChar", - "String" - }; - - - public XPathLexer(ICharStream input) - : base(input) - { - Interpreter = new LexerATNSimulator(this, _ATN, decisionToDFA, sharedContextCache); - } - - private static readonly string[] _LiteralNames = { - null, null, null, "'//'", "'/'", "'*'", "'!'" - }; - private static readonly string[] _SymbolicNames = { - null, "TokenRef", "RuleRef", "Anywhere", "Root", "Wildcard", "Bang", "ID", - "String" - }; - public static readonly IVocabulary DefaultVocabulary = new Vocabulary(_LiteralNames, _SymbolicNames); - - [NotNull] - public override IVocabulary Vocabulary - { - get - { - return DefaultVocabulary; - } - } - - public override string GrammarFileName { get { return "XPathLexer.g4"; } } - - public override string[] RuleNames { get { return ruleNames; } } - - public override string[] ChannelNames { get { return channelNames; } } - - public override string[] ModeNames { get { return modeNames; } } - - public override string SerializedAtn { get { return _serializedATN; } } - - static XPathLexer() - { - decisionToDFA = new DFA[_ATN.NumberOfDecisions]; - for (int i = 0; i < _ATN.NumberOfDecisions; i++) - { - decisionToDFA[i] = new DFA(_ATN.GetDecisionState(i), i); - } - } - public override void Action(RuleContext _localctx, int ruleIndex, int actionIndex) - { - switch (ruleIndex) - { - case 4: ID_action(_localctx, actionIndex); break; - } - } - private void ID_action(RuleContext _localctx, int actionIndex) - { - switch (actionIndex) - { - case 0: - String text = Text; - if (Char.IsUpper(text[0])) - Type = TokenRef; - else - Type = RuleRef; - break; - } - } - - private static string _serializedATN = _serializeATN(); - private static string _serializeATN() - { - StringBuilder sb = new StringBuilder(); - sb.Append("\x3\x430\xD6D1\x8206\xAD2D\x4417\xAEF1\x8D80\xAADD\x2\n\x34"); - sb.Append("\b\x1\x4\x2\t\x2\x4\x3\t\x3\x4\x4\t\x4\x4\x5\t\x5\x4\x6\t\x6"); - sb.Append("\x4\a\t\a\x4\b\t\b\x4\t\t\t\x3\x2\x3\x2\x3\x2\x3\x3\x3\x3\x3"); - sb.Append("\x4\x3\x4\x3\x5\x3\x5\x3\x6\x3\x6\a\x6\x1F\n\x6\f\x6\xE\x6\""); - sb.Append("\v\x6\x3\x6\x3\x6\x3\a\x3\a\x5\a(\n\a\x3\b\x3\b\x3\t\x3\t\a"); - sb.Append("\t.\n\t\f\t\xE\t\x31\v\t\x3\t\x3\t\x3/\x2\n\x3\x5\x5\x6\a\a"); - sb.Append("\t\b\v\t\r\x2\xF\x2\x11\n\x3\x2\x4\a\x2\x32;\x61\x61\xB9\xB9"); - sb.Append("\x302\x371\x2041\x2042\xF\x2\x43\\\x63|\xC2\xD8\xDA\xF8\xFA"); - sb.Append("\x301\x372\x37F\x381\x2001\x200E\x200F\x2072\x2191\x2C02\x2FF1"); - sb.Append("\x3003\xD801\xF902\xFDD1\xFDF2\xFFFF\x34\x2\x3\x3\x2\x2\x2\x2"); - sb.Append("\x5\x3\x2\x2\x2\x2\a\x3\x2\x2\x2\x2\t\x3\x2\x2\x2\x2\v\x3\x2"); - sb.Append("\x2\x2\x2\x11\x3\x2\x2\x2\x3\x13\x3\x2\x2\x2\x5\x16\x3\x2\x2"); - sb.Append("\x2\a\x18\x3\x2\x2\x2\t\x1A\x3\x2\x2\x2\v\x1C\x3\x2\x2\x2\r"); - sb.Append("\'\x3\x2\x2\x2\xF)\x3\x2\x2\x2\x11+\x3\x2\x2\x2\x13\x14\a\x31"); - sb.Append("\x2\x2\x14\x15\a\x31\x2\x2\x15\x4\x3\x2\x2\x2\x16\x17\a\x31"); - sb.Append("\x2\x2\x17\x6\x3\x2\x2\x2\x18\x19\a,\x2\x2\x19\b\x3\x2\x2\x2"); - sb.Append("\x1A\x1B\a#\x2\x2\x1B\n\x3\x2\x2\x2\x1C \x5\xF\b\x2\x1D\x1F"); - sb.Append("\x5\r\a\x2\x1E\x1D\x3\x2\x2\x2\x1F\"\x3\x2\x2\x2 \x1E\x3\x2"); - sb.Append("\x2\x2 !\x3\x2\x2\x2!#\x3\x2\x2\x2\" \x3\x2\x2\x2#$\b\x6\x2"); - sb.Append("\x2$\f\x3\x2\x2\x2%(\x5\xF\b\x2&(\t\x2\x2\x2\'%\x3\x2\x2\x2"); - sb.Append("\'&\x3\x2\x2\x2(\xE\x3\x2\x2\x2)*\t\x3\x2\x2*\x10\x3\x2\x2\x2"); - sb.Append("+/\a)\x2\x2,.\v\x2\x2\x2-,\x3\x2\x2\x2.\x31\x3\x2\x2\x2/\x30"); - sb.Append("\x3\x2\x2\x2/-\x3\x2\x2\x2\x30\x32\x3\x2\x2\x2\x31/\x3\x2\x2"); - sb.Append("\x2\x32\x33\a)\x2\x2\x33\x12\x3\x2\x2\x2\x6\x2 \'/\x3\x3\x6"); - sb.Append("\x2"); - return sb.ToString(); - } - - public static readonly ATN _ATN = - new ATNDeserializer().Deserialize(_serializedATN.ToCharArray()); - - -} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.g4 b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.g4 deleted file mode 100644 index d5cf9991ff..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.g4 +++ /dev/null @@ -1,69 +0,0 @@ -lexer grammar XPathLexer; - -@header { -using System; -} - -tokens { TokenRef, RuleRef } - -/* -path : separator? word (separator word)* EOF ; - -separator - : '/' '!' - | '//' '!' - | '/' - | '//' - ; - -word: TokenRef - | RuleRef - | String - | '*' - ; -*/ - -Anywhere : '//' ; -Root : '/' ; -Wildcard : '*' ; -Bang : '!' ; - -ID : NameStartChar NameChar* - { - String text = Text; - if ( Char.IsUpper(text[0]) ) - Type = TokenRef; - else - Type = RuleRef; - } - ; - -fragment -NameChar : NameStartChar - | '0'..'9' - | '_' - | '\u00B7' - | '\u0300'..'\u036F' - | '\u203F'..'\u2040' - ; - -fragment -NameStartChar - : 'A'..'Z' | 'a'..'z' - | '\u00C0'..'\u00D6' - | '\u00D8'..'\u00F6' - | '\u00F8'..'\u02FF' - | '\u0370'..'\u037D' - | '\u037F'..'\u1FFF' - | '\u200C'..'\u200D' - | '\u2070'..'\u218F' - | '\u2C00'..'\u2FEF' - | '\u3001'..'\uD7FF' - | '\uF900'..'\uFDCF' - | '\uFDF0'..'\uFFFD' - ; // ignores | ['\u10000-'\uEFFFF] ; - -String : '\'' .*? '\'' ; - -//Ws : [ \t\r\n]+ -> skip ; - diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln b/runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln deleted file mode 100644 index 27dc0c4a80..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln +++ /dev/null @@ -1,34 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.26114.2 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Antlr4.Runtime.dotnet", "Antlr4.Runtime\Antlr4.Runtime.dotnet.csproj", "{0F9F8436-A767-4407-8E81-F9C6270E2B5A}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|x64 = Release|x64 - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Debug|x64.ActiveCfg = Debug|x64 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Debug|x64.Build.0 = Debug|x64 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Debug|x86.ActiveCfg = Debug|x86 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Debug|x86.Build.0 = Debug|x86 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Release|Any CPU.ActiveCfg = Release|Any CPU - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Release|Any CPU.Build.0 = Release|Any CPU - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Release|x64.ActiveCfg = Release|x64 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Release|x64.Build.0 = Release|x64 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Release|x86.ActiveCfg = Release|x86 - {0F9F8436-A767-4407-8E81-F9C6270E2B5A}.Release|x86.Build.0 = Release|x86 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.mono.sln b/runtime/CSharp/runtime/CSharp/Antlr4.mono.sln deleted file mode 100644 index b5b7bc4558..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.mono.sln +++ /dev/null @@ -1,43 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2012 -VisualStudioVersion = 12.0.30110.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{47C0086D-577C-43DA-ADC7-544F27656E45}" - ProjectSection(SolutionItems) = preProject - ..\..\Readme.md = ..\..\Readme.md - EndProjectSection -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "build", "build", "{4CE79A54-058D-4940-875E-7F6AA9922A7D}" - ProjectSection(SolutionItems) = preProject - ..\..\build\Antlr4.nuspec = ..\..\build\Antlr4.nuspec - ..\..\build\Antlr4.Runtime.nuspec = ..\..\build\Antlr4.Runtime.nuspec - ..\..\build\Antlr4.VS2008.nuspec = ..\..\build\Antlr4.VS2008.nuspec - ..\..\build\build.ps1 = ..\..\build\build.ps1 - ..\..\build\check-key.ps1 = ..\..\build\check-key.ps1 - build\KeyReporting.targets = build\KeyReporting.targets - ..\..\build\keys.ps1 = ..\..\build\keys.ps1 - ..\..\build\push.ps1 = ..\..\build\push.ps1 - ..\..\build\version.ps1 = ..\..\build\version.ps1 - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Antlr4.Runtime.mono", "Antlr4.Runtime\Antlr4.Runtime.mono.csproj", "{E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Release|Any CPU.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(NestedProjects) = preSolution - {4CE79A54-058D-4940-875E-7F6AA9922A7D} = {47C0086D-577C-43DA-ADC7-544F27656E45} - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.vs2013.sln b/runtime/CSharp/runtime/CSharp/Antlr4.vs2013.sln deleted file mode 100644 index 2bf8cd6b96..0000000000 --- a/runtime/CSharp/runtime/CSharp/Antlr4.vs2013.sln +++ /dev/null @@ -1,23 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2013 -VisualStudioVersion = 12.0.31101.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{47C0086D-577C-43DA-ADC7-544F27656E45}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Antlr4.Runtime.vs2013", "Antlr4.Runtime\Antlr4.Runtime.vs2013.csproj", "{E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}.Release|Any CPU.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal \ No newline at end of file diff --git a/runtime/CSharp/src/Antlr4.csproj b/runtime/CSharp/src/Antlr4.csproj new file mode 100644 index 0000000000..e4bc830217 --- /dev/null +++ b/runtime/CSharp/src/Antlr4.csproj @@ -0,0 +1,46 @@ + + + The ANTLR Organization + 4.13.2 + en-US + netstandard2.0 + net45;netstandard2.0 + $(NoWarn);CS1591;CS1574;CS1580;CS1570;NU5048 + true + Antlr4.Runtime.Standard + Antlr4.snk + true + true + Antlr4.Runtime.Standard + ANTLR 4 .NET Standard Runtime + Eric Vergnaud, Terence Parr, Sam Harwell + The .NET Core C# ANTLR 4 runtime from the ANTLR Organization + The runtime library for parsers generated by the C# target of the standard ANTLR 4 tool. + Copyright (c) 2012-2020 The ANTLR Project. All rights reserved. + true + BSD-3-Clause + https://github.com/antlr/antlr4 + https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png + https://github.com/antlr/antlr4/releases + antlr parsing grammar + false + false + false + false + false + false + false + false + false + Antlr4.Runtime + true + + + true + full + false + + + true + + diff --git a/runtime/CSharp/runtime/Antlr4.snk b/runtime/CSharp/src/Antlr4.snk similarity index 100% rename from runtime/CSharp/runtime/Antlr4.snk rename to runtime/CSharp/src/Antlr4.snk diff --git a/runtime/CSharp/src/AntlrFileStream.cs b/runtime/CSharp/src/AntlrFileStream.cs new file mode 100644 index 0000000000..e74df8c83f --- /dev/null +++ b/runtime/CSharp/src/AntlrFileStream.cs @@ -0,0 +1,60 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +using Encoding = System.Text.Encoding; +using File = System.IO.File; + +namespace Antlr4.Runtime +{ + /// + /// This is an + /// + /// that is loaded from a file all at once + /// when you construct the object. + /// + public class AntlrFileStream : AntlrInputStream + { + protected internal string fileName; + + /// + public AntlrFileStream(string fileName) + : this(fileName, null) + { + } + + /// + public AntlrFileStream(string fileName, Encoding encoding) + { + this.fileName = fileName; + Load(fileName, encoding); + } + + /// + public virtual void Load(string fileName, Encoding encoding) + { + if (fileName == null) + { + return; + } + + string text; + if (encoding != null) + text = File.ReadAllText(fileName, encoding); + else + text = File.ReadAllText(fileName); + + data = text.ToCharArray(); + n = data.Length; + } + + public override string SourceName + { + get + { + return fileName; + } + } + } +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs b/runtime/CSharp/src/AntlrInputStream.cs similarity index 99% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs rename to runtime/CSharp/src/AntlrInputStream.cs index a35dc3452a..023dd671e5 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs +++ b/runtime/CSharp/src/AntlrInputStream.cs @@ -48,8 +48,7 @@ public virtual void Consume() System.Diagnostics.Debug.Assert(LA(1) == IntStreamConstants.EOF); throw new InvalidOperationException("cannot consume EOF"); } - //System.out.println("prev p="+p+", c="+(char)data[p]); - if (p < n) + else { p++; } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATN.cs b/runtime/CSharp/src/Atn/ATN.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATN.cs rename to runtime/CSharp/src/Atn/ATN.cs index 8f2a20f3be..966ded44b4 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATN.cs +++ b/runtime/CSharp/src/Atn/ATN.cs @@ -77,10 +77,10 @@ public class ATN private readonly PredictionContextCache contextCache = new PredictionContextCache(); [NotNull] - public DFA[] decisionToDFA = new DFA[0]; + public DFA[] decisionToDFA = Collections.EmptyList(); [NotNull] - public DFA[] modeToDFA = new DFA[0]; + public DFA[] modeToDFA = Collections.EmptyList(); protected internal readonly ConcurrentDictionary LL1Table = new ConcurrentDictionary(); @@ -104,7 +104,7 @@ public virtual PredictionContext GetCachedContext(PredictionContext context) /// If /// /// is - /// + /// /// , the set of tokens will not include what can follow /// the rule surrounding /// diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNConfig.cs b/runtime/CSharp/src/Atn/ATNConfig.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNConfig.cs rename to runtime/CSharp/src/Atn/ATNConfig.cs index 11e3e6bfb3..7ca6e6235f 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNConfig.cs +++ b/runtime/CSharp/src/Atn/ATNConfig.cs @@ -81,7 +81,7 @@ public ATNConfig(ATNConfig old) public ATNConfig(ATNState state, int alt, PredictionContext context) - : this(state, alt, context, SemanticContext.NONE) + : this(state, alt, context, SemanticContext.Empty.Instance) { } @@ -229,7 +229,7 @@ public String ToString(IRecognizer recog, bool showAlt) buf.Append(context.ToString()); buf.Append("]"); } - if (semanticContext != null && semanticContext != SemanticContext.NONE) + if (semanticContext != null && semanticContext != SemanticContext.Empty.Instance) { buf.Append(","); buf.Append(semanticContext); diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNConfigSet.cs b/runtime/CSharp/src/Atn/ATNConfigSet.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNConfigSet.cs rename to runtime/CSharp/src/Atn/ATNConfigSet.cs index 2ea9fe4b30..d740036f55 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNConfigSet.cs +++ b/runtime/CSharp/src/Atn/ATNConfigSet.cs @@ -95,7 +95,7 @@ public bool Add(ATNConfig config, MergeCache mergeCache) { if (readOnly) throw new Exception("This set is readonly"); - if (config.semanticContext != SemanticContext.NONE) + if (config.semanticContext != SemanticContext.Empty.Instance) { hasSemanticContext = true; } @@ -171,7 +171,7 @@ public List GetPredicates() List preds = new List(); foreach (ATNConfig c in configs) { - if (c.semanticContext != SemanticContext.NONE) + if (c.semanticContext != SemanticContext.Empty.Instance) { preds.Add(c.semanticContext); } @@ -311,7 +311,7 @@ public override String ToString() buf.Append(']'); if (hasSemanticContext) buf.Append(",hasSemanticContext=") - .Append(hasSemanticContext); + .Append(hasSemanticContext.ToString().ToLower()); if (uniqueAlt != ATN.INVALID_ALT_NUMBER) buf.Append(",uniqueAlt=") .Append(uniqueAlt); diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializationOptions.cs b/runtime/CSharp/src/Atn/ATNDeserializationOptions.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializationOptions.cs rename to runtime/CSharp/src/Atn/ATNDeserializationOptions.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs b/runtime/CSharp/src/Atn/ATNDeserializer.cs similarity index 88% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs rename to runtime/CSharp/src/Atn/ATNDeserializer.cs index 3ce2e87d2c..7f47677062 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs +++ b/runtime/CSharp/src/Atn/ATNDeserializer.cs @@ -5,8 +5,6 @@ using System; using System.Collections.Generic; using System.Globalization; -using System.IO; -using Antlr4.Runtime.Atn; using Antlr4.Runtime.Dfa; using Antlr4.Runtime.Misc; using Antlr4.Runtime.Sharpen; @@ -16,53 +14,11 @@ namespace Antlr4.Runtime.Atn /// Sam Harwell public class ATNDeserializer { - public static readonly int SerializedVersion = 3; - - /// This is the earliest supported serialized UUID. - /// This is the earliest supported serialized UUID. - private static readonly Guid BaseSerializedUuid; - - /// - /// This UUID indicates the serialized ATN contains two sets of - /// IntervalSets, where the second set's values are encoded as - /// 32-bit integers to support the full Unicode SMP range up to U+10FFFF. - /// - /// - /// This UUID indicates the serialized ATN contains two sets of - /// IntervalSets, where the second set's values are encoded as - /// 32-bit integers to support the full Unicode SMP range up to U+10FFFF. - /// - private static readonly Guid AddedUnicodeSmp; - - /// - /// This list contains all of the currently supported UUIDs, ordered by when - /// the feature first appeared in this branch. - /// - /// - /// This list contains all of the currently supported UUIDs, ordered by when - /// the feature first appeared in this branch. - /// - private static readonly IList SupportedUuids; - - /// This is the current serialized UUID. - /// This is the current serialized UUID. - public static readonly Guid SerializedUuid; - - static ATNDeserializer() - { - BaseSerializedUuid = new Guid("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"); - AddedUnicodeSmp = new Guid("59627784-3BE5-417A-B9EB-8131A7286089"); - SupportedUuids = new List(); - SupportedUuids.Add(BaseSerializedUuid); - SupportedUuids.Add(AddedUnicodeSmp); - SerializedUuid = AddedUnicodeSmp; - } + public static readonly int SerializedVersion = 4; [NotNull] private readonly ATNDeserializationOptions deserializationOptions; - private Guid uuid; - public ATNDeserializer() : this(ATNDeserializationOptions.Default) { @@ -77,66 +33,20 @@ public ATNDeserializer(ATNDeserializationOptions deserializationOptions) this.deserializationOptions = deserializationOptions; } - /// - /// Determines if a particular serialized representation of an ATN supports - /// a particular feature, identified by the - /// - /// used for serializing - /// the ATN at the time the feature was first introduced. - /// - /// - /// The - /// - /// marking the first time the feature was - /// supported in the serialized ATN. - /// - /// - /// The - /// - /// of the actual serialized ATN which is - /// currently being deserialized. - /// - /// - /// - /// - /// if the - /// - /// value represents a - /// serialized ATN at or after the feature identified by - /// - /// was - /// introduced; otherwise, - /// - /// . - /// - protected internal virtual bool IsFeatureSupported(Guid feature, Guid actualUuid) - { - int featureIndex = SupportedUuids.IndexOf(feature); - if (featureIndex < 0) - { - return false; - } - return SupportedUuids.IndexOf(actualUuid) >= featureIndex; - } - - char[] data; + int[] data; int p; - public virtual ATN Deserialize(char[] data) + public virtual ATN Deserialize(int[] data) { - Reset (data); + this.data = data; CheckVersion (); - CheckUUID (); ATN atn = ReadATN (); ReadStates (atn); ReadRules (atn); ReadModes (atn); IList sets = new List(); - ReadSets (atn, sets, this.ReadInt); - if (IsFeatureSupported(AddedUnicodeSmp, uuid)) { - ReadSets (atn, sets, this.ReadInt32); - } - ReadEdges (atn, sets); + ReadSets (atn, sets); + ReadEdges (atn, sets); ReadDecisions (atn); ReadLexerActions (atn); MarkPrecedenceDecisions(atn); @@ -279,15 +189,7 @@ protected internal virtual void ReadLexerActions(ATN atn) { LexerActionType actionType = (LexerActionType)ReadInt(); int data1 = ReadInt(); - if (data1 == unchecked((int)(0xFFFF))) - { - data1 = -1; - } int data2 = ReadInt(); - if (data2 == unchecked((int)(0xFFFF))) - { - data2 = -1; - } ILexerAction lexerAction = LexerActionFactory(actionType, data1, data2); atn.lexerActions[i_10] = lexerAction; } @@ -398,7 +300,7 @@ protected internal virtual void ReadEdges(ATN atn, IList sets) } } - protected internal virtual void ReadSets(ATN atn, IList sets, Func readUnicode) + protected internal virtual void ReadSets(ATN atn, IList sets) { // // SETS @@ -416,7 +318,7 @@ protected internal virtual void ReadSets(ATN atn, IList sets, Func< } for (int j = 0; j < nintervals; j++) { - set.Add(readUnicode(), readUnicode()); + set.Add(ReadInt(), ReadInt()); } } } @@ -458,9 +360,6 @@ protected internal virtual void ReadRules(ATN atn) atn.ruleToStartState[i_5] = startState; if (atn.grammarType == ATNType.Lexer) { int tokenType = ReadInt (); - if (tokenType == unchecked((int)(0xFFFF))) { - tokenType = TokenConstants.EOF; - } atn.ruleToTokenType [i_5] = tokenType; } } @@ -546,16 +445,6 @@ protected internal virtual ATN ReadATN() return new ATN(grammarType, maxTokenType); } - protected internal virtual void CheckUUID() - { - uuid = ReadUUID(); - if (!SupportedUuids.Contains(uuid)) - { - string reason = string.Format(CultureInfo.CurrentCulture, "Could not deserialize ATN with UUID {0} (expected {1} or a legacy UUID).", uuid, SerializedUuid); - throw new NotSupportedException(reason); - } - } - protected internal virtual void CheckVersion() { int version = ReadInt(); @@ -566,18 +455,6 @@ protected internal virtual void CheckVersion() } } - protected internal virtual void Reset(char[] data) - { - this.data = new char[data.Length]; - // don't adjust the first value since that's the version number - this.data[0] = data[0]; - for (int i = 1; i < data.Length; i++) - { - this.data[i] = (char)(data[i] - 2); - } - this.p = 0; - } - /// /// Analyze the /// @@ -1078,30 +955,6 @@ protected internal int ReadInt() return data[p++]; } - protected internal int ReadInt32() - { - return (int)data[p++] | ((int)data[p++] << 16); - } - - protected internal long ReadLong() - { - long lowOrder = ReadInt32() & unchecked((long)(0x00000000FFFFFFFFL)); - return lowOrder | ((long)ReadInt32() << 32); - } - - protected internal Guid ReadUUID() - { - byte[] d = BitConverter.GetBytes (ReadLong ()); - if(BitConverter.IsLittleEndian) - { - Array.Reverse(d); - } - short c = (short)ReadInt(); - short b = (short)ReadInt(); - int a = ReadInt32(); - return new Guid(a, b, c, d); - } - [return: NotNull] protected internal virtual Transition EdgeFactory(ATN atn, TransitionType type, int src, int trg, int arg1, int arg2, int arg3, IList sets) { diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNSimulator.cs b/runtime/CSharp/src/Atn/ATNSimulator.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNSimulator.cs rename to runtime/CSharp/src/Atn/ATNSimulator.cs index 53eb97e51c..c9af293699 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNSimulator.cs +++ b/runtime/CSharp/src/Atn/ATNSimulator.cs @@ -75,13 +75,6 @@ public virtual void ClearDFA() throw new Exception("This ATN simulator does not support clearing the DFA."); } - protected void ConsoleWriteLine(string format, params object[] arg) - { -#if !PORTABLE - System.Console.WriteLine(format, arg); -#endif - } - public PredictionContextCache getSharedContextCache() { return sharedContextCache; diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNState.cs b/runtime/CSharp/src/Atn/ATNState.cs similarity index 99% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNState.cs rename to runtime/CSharp/src/Atn/ATNState.cs index 3f5c61a470..52541d52b2 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNState.cs +++ b/runtime/CSharp/src/Atn/ATNState.cs @@ -95,9 +95,7 @@ public virtual void AddTransition(int index, Antlr4.Runtime.Atn.Transition e) { if (epsilonOnlyTransitions != e.IsEpsilon) { -#if !PORTABLE System.Console.Error.WriteLine("ATN state {0} has both epsilon and non-epsilon transitions.", stateNumber); -#endif epsilonOnlyTransitions = false; } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNType.cs b/runtime/CSharp/src/Atn/ATNType.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNType.cs rename to runtime/CSharp/src/Atn/ATNType.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AbstractPredicateTransition.cs b/runtime/CSharp/src/Atn/AbstractPredicateTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AbstractPredicateTransition.cs rename to runtime/CSharp/src/Atn/AbstractPredicateTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ActionTransition.cs b/runtime/CSharp/src/Atn/ActionTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ActionTransition.cs rename to runtime/CSharp/src/Atn/ActionTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AmbiguityInfo.cs b/runtime/CSharp/src/Atn/AmbiguityInfo.cs similarity index 75% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AmbiguityInfo.cs rename to runtime/CSharp/src/Atn/AmbiguityInfo.cs index fbe0bd7e89..7017db797e 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AmbiguityInfo.cs +++ b/runtime/CSharp/src/Atn/AmbiguityInfo.cs @@ -41,6 +41,9 @@ namespace Antlr4.Runtime.Atn /// 4.3 public class AmbiguityInfo : DecisionEventInfo { + /** The set of alternative numbers for this decision event that lead to a valid parse. */ + public BitSet ambigAlts; + /// /// Constructs a new instance of the /// @@ -48,19 +51,30 @@ public class AmbiguityInfo : DecisionEventInfo /// specified detailed ambiguity information. /// /// The decision number - /// - /// The final simulator state identifying the ambiguous + /// The final configuration set identifying the ambiguous /// alternatives for the current input /// + /// The set of alternatives in the decision that lead to a valid parse. + /// The predicted alt is the min(ambigAlts) + /// /// The input token stream /// The start index for the current prediction /// /// The index at which the ambiguity was identified during /// prediction /// - public AmbiguityInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex) - : base(decision, state, input, startIndex, stopIndex, state.useContext) + /// @code true} if the ambiguity was identified during LL + /// prediction; otherwise, {@code false} if the ambiguity was identified + /// during SLL prediction + /// + public AmbiguityInfo(int decision, + ATNConfigSet configs, + BitSet ambigAlts, + ITokenStream input, int startIndex, int stopIndex, + bool fullCtx) + : base(decision, configs, input, startIndex, stopIndex, fullCtx) { + this.ambigAlts = ambigAlts; } } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ArrayPredictionContext.cs b/runtime/CSharp/src/Atn/ArrayPredictionContext.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ArrayPredictionContext.cs rename to runtime/CSharp/src/Atn/ArrayPredictionContext.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AtomTransition.cs b/runtime/CSharp/src/Atn/AtomTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/AtomTransition.cs rename to runtime/CSharp/src/Atn/AtomTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BasicBlockStartState.cs b/runtime/CSharp/src/Atn/BasicBlockStartState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BasicBlockStartState.cs rename to runtime/CSharp/src/Atn/BasicBlockStartState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BasicState.cs b/runtime/CSharp/src/Atn/BasicState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BasicState.cs rename to runtime/CSharp/src/Atn/BasicState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BlockEndState.cs b/runtime/CSharp/src/Atn/BlockEndState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BlockEndState.cs rename to runtime/CSharp/src/Atn/BlockEndState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BlockStartState.cs b/runtime/CSharp/src/Atn/BlockStartState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/BlockStartState.cs rename to runtime/CSharp/src/Atn/BlockStartState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ConflictInfo.cs b/runtime/CSharp/src/Atn/ConflictInfo.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ConflictInfo.cs rename to runtime/CSharp/src/Atn/ConflictInfo.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ContextSensitivityInfo.cs b/runtime/CSharp/src/Atn/ContextSensitivityInfo.cs similarity index 86% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ContextSensitivityInfo.cs rename to runtime/CSharp/src/Atn/ContextSensitivityInfo.cs index b978c6786f..454a72ef53 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ContextSensitivityInfo.cs +++ b/runtime/CSharp/src/Atn/ContextSensitivityInfo.cs @@ -35,9 +35,8 @@ public class ContextSensitivityInfo : DecisionEventInfo /// with the specified detailed context sensitivity information. /// /// The decision number - /// - /// The final simulator state containing the unique - /// alternative identified by full-context prediction + /// The final configuration set identifying the ambiguous + /// alternatives for the current input /// /// The input token stream /// The start index for the current prediction @@ -45,8 +44,8 @@ public class ContextSensitivityInfo : DecisionEventInfo /// The index at which the context sensitivity was /// identified during full-context prediction /// - public ContextSensitivityInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex) - : base(decision, state, input, startIndex, stopIndex, true) + public ContextSensitivityInfo(int decision, ATNConfigSet configs, ITokenStream input, int startIndex, int stopIndex) + : base(decision, configs, input, startIndex, stopIndex, true) { } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionEventInfo.cs b/runtime/CSharp/src/Atn/DecisionEventInfo.cs similarity index 77% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionEventInfo.cs rename to runtime/CSharp/src/Atn/DecisionEventInfo.cs index ee2098cb7c..6ab88c4a15 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionEventInfo.cs +++ b/runtime/CSharp/src/Atn/DecisionEventInfo.cs @@ -25,15 +25,13 @@ public class DecisionEventInfo /// public readonly int decision; - /// - /// The simulator state containing additional information relevant to the - /// prediction state when the current event occurred, or - /// - /// if no - /// additional information is relevant or available. - /// - [Nullable] - public readonly SimulatorState state; + /// The configuration set containing additional information relevant to the + /// prediction state when the current event occurred, or {@code null} if no + /// additional information is relevant or available. + /// The configuration set containing additional information relevant to the + /// prediction state when the current event occurred, or {@code null} if no + /// additional information is relevant or available. + public readonly ATNConfigSet configs; /// The input token stream which is being parsed. /// The input token stream which is being parsed. @@ -63,14 +61,17 @@ public class DecisionEventInfo /// public readonly bool fullCtx; - public DecisionEventInfo(int decision, SimulatorState state, ITokenStream input, int startIndex, int stopIndex, bool fullCtx) + public DecisionEventInfo(int decision, + ATNConfigSet configs, + ITokenStream input, int startIndex, int stopIndex, + bool fullCtx) { this.decision = decision; this.fullCtx = fullCtx; this.stopIndex = stopIndex; this.input = input; this.startIndex = startIndex; - this.state = state; + this.configs = configs; } } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionInfo.cs b/runtime/CSharp/src/Atn/DecisionInfo.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionInfo.cs rename to runtime/CSharp/src/Atn/DecisionInfo.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionState.cs b/runtime/CSharp/src/Atn/DecisionState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/DecisionState.cs rename to runtime/CSharp/src/Atn/DecisionState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/EmptyPredictionContext.cs b/runtime/CSharp/src/Atn/EmptyPredictionContext.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/EmptyPredictionContext.cs rename to runtime/CSharp/src/Atn/EmptyPredictionContext.cs index 937092d88b..dfc971b751 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/EmptyPredictionContext.cs +++ b/runtime/CSharp/src/Atn/EmptyPredictionContext.cs @@ -12,14 +12,13 @@ namespace Antlr4.Runtime.Atn #pragma warning disable 0659 // 'class' overrides Object.Equals(object o) but does not override Object.GetHashCode() public sealed class EmptyPredictionContext : SingletonPredictionContext { + public static readonly EmptyPredictionContext Instance = new EmptyPredictionContext(); internal EmptyPredictionContext() : base(null, EMPTY_RETURN_STATE) { } - - public override PredictionContext GetParent(int index) { return null; diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/EpsilonTransition.cs b/runtime/CSharp/src/Atn/EpsilonTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/EpsilonTransition.cs rename to runtime/CSharp/src/Atn/EpsilonTransition.cs diff --git a/runtime/CSharp/src/Atn/ErrorInfo.cs b/runtime/CSharp/src/Atn/ErrorInfo.cs new file mode 100644 index 0000000000..93676d2103 --- /dev/null +++ b/runtime/CSharp/src/Atn/ErrorInfo.cs @@ -0,0 +1,48 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +using Antlr4.Runtime; +using Antlr4.Runtime.Atn; +using Antlr4.Runtime.Sharpen; + +namespace Antlr4.Runtime.Atn +{ + /// + /// This class represents profiling event information for a syntax error + /// identified during prediction. + /// + /// + /// This class represents profiling event information for a syntax error + /// identified during prediction. Syntax errors occur when the prediction + /// algorithm is unable to identify an alternative which would lead to a + /// successful parse. + /// + /// + /// + /// 4.3 + public class ErrorInfo : DecisionEventInfo + { + /// + /// Constructs a new instance of the + /// + /// class with the + /// specified detailed syntax error information. + /// + /// The decision number + /// The final configuration set reached during prediction + /// prior to reaching the {@link ATNSimulator#ERROR} state + /// + /// The input token stream + /// The start index for the current prediction + /// The index at which the syntax error was identified + /// {@code true} if the syntax error was identified during LL + /// prediction; otherwise, {@code false} if the syntax error was identified + /// during SLL prediction + /// + public ErrorInfo(int decision, ATNConfigSet configs, ITokenStream input, int startIndex, int stopIndex, bool fullCtx) + : base(decision, configs, input, startIndex, stopIndex, fullCtx) + { + } + } +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ILexerAction.cs b/runtime/CSharp/src/Atn/ILexerAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ILexerAction.cs rename to runtime/CSharp/src/Atn/ILexerAction.cs diff --git a/runtime/CSharp/src/Atn/LL1Analyzer.cs b/runtime/CSharp/src/Atn/LL1Analyzer.cs new file mode 100644 index 0000000000..87239c8d3c --- /dev/null +++ b/runtime/CSharp/src/Atn/LL1Analyzer.cs @@ -0,0 +1,252 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +using System.Collections.Generic; +using Antlr4.Runtime.Misc; +using Antlr4.Runtime.Sharpen; + +namespace Antlr4.Runtime.Atn +{ + public class LL1Analyzer + { + /** Special value added to the lookahead sets to indicate that we hit + * a predicate during analysis if {@code seeThruPreds==false}. + */ + public const int HitPred = TokenConstants.InvalidType; + + [NotNull] + public readonly ATN atn; + + public LL1Analyzer(ATN atn) + { + this.atn = atn; + } + + /** + * Calculates the SLL(1) expected lookahead set for each outgoing transition + * of an {@link ATNState}. The returned array has one element for each + * outgoing transition in {@code s}. If the closure from transition + * i leads to a semantic predicate before matching a symbol, the + * element at index i of the result will be {@code null}. + * + * @param s the ATN state + * @return the expected symbols for each outgoing transition of {@code s}. + */ + [return: Nullable] + public virtual IntervalSet[] GetDecisionLookahead(ATNState s) + { + // System.out.println("LOOK("+s.stateNumber+")"); + if (s == null) + { + return null; + } + IntervalSet[] look = new IntervalSet[s.NumberOfTransitions]; + for (int alt = 0; alt < s.NumberOfTransitions; alt++) + { + look[alt] = new IntervalSet(); + HashSet lookBusy = new HashSet(); + bool seeThruPreds = false; + // fail to get lookahead upon pred + Look_(s.Transition(alt).target, null, EmptyPredictionContext.Instance, look[alt], lookBusy, new BitSet(), seeThruPreds, false); + // Wipe out lookahead for this alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if (look[alt].Count == 0 || look[alt].Contains(HitPred)) + { + look[alt] = null; + } + } + return look; + } + + /** + * Compute set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + * + *

    If {@code ctx} is {@code null} and the end of the rule containing + * {@code s} is reached, {@link Token#EPSILON} is added to the result set. + * If {@code ctx} is not {@code null} and the end of the outermost rule is + * reached, {@link Token#EOF} is added to the result set.

    + * + * @param s the ATN state + * @param ctx the complete parser context, or {@code null} if the context + * should be ignored + * + * @return The set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + */ + [return: NotNull] + public virtual IntervalSet Look(ATNState s, RuleContext ctx) + { + return Look(s, null, ctx); + } + + /** + * Compute set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + * + *

    If {@code ctx} is {@code null} and the end of the rule containing + * {@code s} is reached, {@link Token#EPSILON} is added to the result set. + * If {@code ctx} is not {@code null} and the end of the outermost rule is + * reached, {@link Token#EOF} is added to the result set.

    + * + * @param s the ATN state + * @param stopState the ATN state to stop at. This can be a + * {@link BlockEndState} to detect epsilon paths through a closure. + * @param ctx the complete parser context, or {@code null} if the context + * should be ignored + * + * @return The set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + */ + [return: NotNull] + public virtual IntervalSet Look(ATNState s, ATNState stopState, RuleContext ctx) + { + IntervalSet r = new IntervalSet(); + bool seeThruPreds = true; + PredictionContext lookContext = ctx != null ? PredictionContext.FromRuleContext(s.atn, ctx) : null; + Look_(s, stopState, lookContext, r, new HashSet(), new BitSet(), seeThruPreds, true); + return r; + } + + /** + * Compute set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + * + *

    If {@code ctx} is {@code null} and {@code stopState} or the end of the + * rule containing {@code s} is reached, {@link Token#EPSILON} is added to + * the result set. If {@code ctx} is not {@code null} and {@code addEOF} is + * {@code true} and {@code stopState} or the end of the outermost rule is + * reached, {@link Token#EOF} is added to the result set.

    + * + * @param s the ATN state. + * @param stopState the ATN state to stop at. This can be a + * {@link BlockEndState} to detect epsilon paths through a closure. + * @param ctx The outer context, or {@code null} if the outer context should + * not be used. + * @param look The result lookahead set. + * @param lookBusy A set used for preventing epsilon closures in the ATN + * from causing a stack overflow. Outside code should pass + * {@code new HashSet} for this argument. + * @param calledRuleStack A set used for preventing left recursion in the + * ATN from causing a stack overflow. Outside code should pass + * {@code new BitSet()} for this argument. + * @param seeThruPreds {@code true} to true semantic predicates as + * implicitly {@code true} and "see through them", otherwise {@code false} + * to treat semantic predicates as opaque and add {@link #HIT_PRED} to the + * result if one is encountered. + * @param addEOF Add {@link Token#EOF} to the result if the end of the + * outermost context is reached. This parameter has no effect if {@code ctx} + * is {@code null}. + */ + protected internal virtual void Look_(ATNState s, ATNState stopState, PredictionContext ctx, IntervalSet look, HashSet lookBusy, BitSet calledRuleStack, bool seeThruPreds, bool addEOF) + { + ATNConfig c = new ATNConfig(s, 0, ctx); + if (!lookBusy.Add(c)) + { + return; + } + if (s == stopState) + { + if (ctx == null) + { + look.Add(TokenConstants.EPSILON); + return; + } + else if (ctx.IsEmpty && addEOF) + { + look.Add(TokenConstants.EOF); + return; + } + } + if (s is RuleStopState) + { + if (ctx == null) + { + look.Add(TokenConstants.EPSILON); + return; + } + else if (ctx.IsEmpty && addEOF) + { + look.Add(TokenConstants.EOF); + return; + } + if (ctx != EmptyPredictionContext.Instance) + { + bool removed = calledRuleStack.Get(s.ruleIndex); + try + { + calledRuleStack.Clear(s.ruleIndex); + for (int i = 0; i < ctx.Size; i++) + { + ATNState returnState = atn.states[ctx.GetReturnState(i)]; + Look_(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF); + } + } + finally + { + if (removed) + { + calledRuleStack.Set(s.ruleIndex); + } + } + return; + } + } + int n = s.NumberOfTransitions; + for (int i_1 = 0; i_1 < n; i_1++) + { + Transition t = s.Transition(i_1); + if (t.GetType() == typeof(RuleTransition)) + { + RuleTransition ruleTransition = (RuleTransition)t; + if (calledRuleStack.Get(ruleTransition.ruleIndex)) + { + continue; + } + PredictionContext newContext = SingletonPredictionContext.Create(ctx, ruleTransition.followState.stateNumber); + try + { + calledRuleStack.Set(ruleTransition.target.ruleIndex); + Look_(t.target, stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); + } + finally + { + calledRuleStack.Clear(ruleTransition.target.ruleIndex); + } + } + else if (t is AbstractPredicateTransition) + { + if (seeThruPreds) + { + Look_(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); + } + else + { + look.Add(HitPred); + } + } + else if (t.IsEpsilon) + { + Look_(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); + } + else if (t.GetType() == typeof(WildcardTransition)) + { + look.AddAll(IntervalSet.Of(TokenConstants.MinUserTokenType, atn.maxTokenType)); + } + else + { + IntervalSet set = t.Label; + if (set != null) + { + if (t is NotSetTransition) + { + set = set.Complement(IntervalSet.Of(TokenConstants.MinUserTokenType, atn.maxTokenType)); + } + look.AddAll(set); + } + } + } + } + } +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerATNConfig.cs b/runtime/CSharp/src/Atn/LexerATNConfig.cs similarity index 96% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerATNConfig.cs rename to runtime/CSharp/src/Atn/LexerATNConfig.cs index 1004a49641..aa78cffe51 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerATNConfig.cs +++ b/runtime/CSharp/src/Atn/LexerATNConfig.cs @@ -20,7 +20,7 @@ public class LexerATNConfig : ATNConfig public LexerATNConfig(ATNState state, int alt, PredictionContext context) - : base(state, alt, context/*, SemanticContext.NONE*/) // TODO + : base(state, alt, context/*, SemanticContext.Empty.Instance*/) // TODO { this.passedThroughNonGreedyDecision = false; this.lexerActionExecutor = null; @@ -30,7 +30,7 @@ public LexerATNConfig(ATNState state, int alt, PredictionContext context, LexerActionExecutor lexerActionExecutor) - : base(state, alt, context, SemanticContext.NONE) + : base(state, alt, context, SemanticContext.Empty.Instance) { this.lexerActionExecutor = lexerActionExecutor; this.passedThroughNonGreedyDecision = false; diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerATNSimulator.cs b/runtime/CSharp/src/Atn/LexerATNSimulator.cs similarity index 95% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerATNSimulator.cs rename to runtime/CSharp/src/Atn/LexerATNSimulator.cs index e84c20de34..4644decd4f 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerATNSimulator.cs +++ b/runtime/CSharp/src/Atn/LexerATNSimulator.cs @@ -45,8 +45,6 @@ public class LexerATNSimulator : ATNSimulator readonly SimState prevAccept = new SimState(); - public static int match_calls = 0; - public LexerATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) : this(null, atn, decisionToDFA, sharedContextCache) @@ -74,7 +72,6 @@ public void CopyState(LexerATNSimulator simulator) public int Match(ICharStream input, int mode) { - match_calls++; this.mode = mode; int mark = input.Mark(); try @@ -119,7 +116,7 @@ protected int MatchATN(ICharStream input) ATNState startState = atn.modeToStartState[mode]; if (debug) { - ConsoleWriteLine("matchATN mode " + mode + " start: " + startState); + Console.WriteLine("matchATN mode " + mode + " start: " + startState); } int old_mode = mode; @@ -136,7 +133,7 @@ protected int MatchATN(ICharStream input) int predict = ExecATN(input, next); if (debug) { - ConsoleWriteLine("DFA after matchATN: " + decisionToDFA[old_mode].ToString()); + Console.WriteLine("DFA after matchATN: " + decisionToDFA[old_mode].ToString()); } return predict; } @@ -146,7 +143,7 @@ protected int ExecATN(ICharStream input, DFAState ds0) //System.out.println("enter exec index "+input.index()+" from "+ds0.configs); if (debug) { - ConsoleWriteLine("start state closure=" + ds0.configSet); + Console.WriteLine("start state closure=" + ds0.configSet); } if (ds0.isAcceptState) { @@ -162,7 +159,7 @@ protected int ExecATN(ICharStream input, DFAState ds0) { // while more work if (debug) { - ConsoleWriteLine("execATN loop starting closure: " + s.configSet); + Console.WriteLine("execATN loop starting closure: " + s.configSet); } // As we move src->trg, src->trg, we keep track of the previous trg to // avoid looking up the DFA state again, which is expensive. @@ -239,7 +236,7 @@ protected DFAState GetExistingTargetState(DFAState s, int t) DFAState target = s.edges[t - MIN_DFA_EDGE]; if (debug && target != null) { - ConsoleWriteLine("reuse state " + s.stateNumber + " edge to " + target.stateNumber); + Console.WriteLine("reuse state " + s.stateNumber + " edge to " + target.stateNumber); } return target; @@ -323,7 +320,7 @@ protected void GetReachableConfigSet(ICharStream input, ATNConfigSet closure, AT if (debug) { - ConsoleWriteLine("testing " + GetTokenName(t) + " at " + c.ToString(recog, true)); + Console.WriteLine("testing " + GetTokenName(t) + " at " + c.ToString(recog, true)); } int n = c.state.NumberOfTransitions; @@ -357,7 +354,7 @@ protected void Accept(ICharStream input, LexerActionExecutor lexerActionExecutor { if (debug) { - ConsoleWriteLine("ACTION " + lexerActionExecutor); + Console.WriteLine("ACTION " + lexerActionExecutor); } // seek to after last char in token @@ -386,7 +383,7 @@ protected ATNState GetReachableTarget(Transition trans, int t) protected ATNConfigSet ComputeStartState(ICharStream input, ATNState p) { - PredictionContext initialContext = PredictionContext.EMPTY; + PredictionContext initialContext = EmptyPredictionContext.Instance; ATNConfigSet configs = new OrderedATNConfigSet(); for (int i = 0; i < p.NumberOfTransitions; i++) { @@ -411,7 +408,7 @@ protected bool Closure(ICharStream input, LexerATNConfig config, ATNConfigSet co { if (debug) { - ConsoleWriteLine("closure(" + config.ToString(recog, true) + ")"); + Console.WriteLine("closure(" + config.ToString(recog, true) + ")"); } if (config.state is RuleStopState) @@ -420,10 +417,10 @@ protected bool Closure(ICharStream input, LexerATNConfig config, ATNConfigSet co { if (recog != null) { - ConsoleWriteLine("closure at " + recog.RuleNames[config.state.ruleIndex] + " rule stop " + config); + Console.WriteLine("closure at " + recog.RuleNames[config.state.ruleIndex] + " rule stop " + config); } else { - ConsoleWriteLine("closure at rule stop " + config); + Console.WriteLine("closure at rule stop " + config); } } @@ -435,7 +432,7 @@ protected bool Closure(ICharStream input, LexerATNConfig config, ATNConfigSet co return true; } else { - configs.Add(new LexerATNConfig(config, config.state, PredictionContext.EMPTY)); + configs.Add(new LexerATNConfig(config, config.state, EmptyPredictionContext.Instance)); currentAltReachedAcceptState = true; } } @@ -523,7 +520,7 @@ states reached by traversing predicates. Since this is when we PredicateTransition pt = (PredicateTransition)t; if (debug) { - ConsoleWriteLine("EVAL rule " + pt.ruleIndex + ":" + pt.predIndex); + Console.WriteLine("EVAL rule " + pt.ruleIndex + ":" + pt.predIndex); } configs.hasSemanticContext = true; if (EvaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative)) @@ -682,7 +679,7 @@ protected void AddDFAEdge(DFAState p, int t, DFAState q) if (debug) { - ConsoleWriteLine("EDGE " + p + " -> " + q + " upon " + ((char)t)); + Console.WriteLine("EDGE " + p + " -> " + q + " upon " + ((char)t)); } lock (p) @@ -782,7 +779,7 @@ public int Column } - public void Consume(ICharStream input) + public virtual void Consume(ICharStream input) { int curChar = input.LA(1); if (curChar == '\n') diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerActionExecutor.cs b/runtime/CSharp/src/Atn/LexerActionExecutor.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerActionExecutor.cs rename to runtime/CSharp/src/Atn/LexerActionExecutor.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerActionType.cs b/runtime/CSharp/src/Atn/LexerActionType.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerActionType.cs rename to runtime/CSharp/src/Atn/LexerActionType.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerChannelAction.cs b/runtime/CSharp/src/Atn/LexerChannelAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerChannelAction.cs rename to runtime/CSharp/src/Atn/LexerChannelAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerCustomAction.cs b/runtime/CSharp/src/Atn/LexerCustomAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerCustomAction.cs rename to runtime/CSharp/src/Atn/LexerCustomAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerIndexedCustomAction.cs b/runtime/CSharp/src/Atn/LexerIndexedCustomAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerIndexedCustomAction.cs rename to runtime/CSharp/src/Atn/LexerIndexedCustomAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerModeAction.cs b/runtime/CSharp/src/Atn/LexerModeAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerModeAction.cs rename to runtime/CSharp/src/Atn/LexerModeAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerMoreAction.cs b/runtime/CSharp/src/Atn/LexerMoreAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerMoreAction.cs rename to runtime/CSharp/src/Atn/LexerMoreAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerPopModeAction.cs b/runtime/CSharp/src/Atn/LexerPopModeAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerPopModeAction.cs rename to runtime/CSharp/src/Atn/LexerPopModeAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerPushModeAction.cs b/runtime/CSharp/src/Atn/LexerPushModeAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerPushModeAction.cs rename to runtime/CSharp/src/Atn/LexerPushModeAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerSkipAction.cs b/runtime/CSharp/src/Atn/LexerSkipAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerSkipAction.cs rename to runtime/CSharp/src/Atn/LexerSkipAction.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerTypeAction.cs b/runtime/CSharp/src/Atn/LexerTypeAction.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LexerTypeAction.cs rename to runtime/CSharp/src/Atn/LexerTypeAction.cs diff --git a/runtime/CSharp/src/Atn/LookaheadEventInfo.cs b/runtime/CSharp/src/Atn/LookaheadEventInfo.cs new file mode 100644 index 0000000000..8d8da4a0f2 --- /dev/null +++ b/runtime/CSharp/src/Atn/LookaheadEventInfo.cs @@ -0,0 +1,58 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +using Antlr4.Runtime; +using Antlr4.Runtime.Atn; +using Antlr4.Runtime.Sharpen; + +namespace Antlr4.Runtime.Atn +{ + /// + /// This class represents profiling event information for tracking the lookahead + /// depth required in order to make a prediction. + /// + /// + /// This class represents profiling event information for tracking the lookahead + /// depth required in order to make a prediction. + /// + /// 4.3 + public class LookaheadEventInfo : DecisionEventInfo + { + /// The alternative chosen by adaptivePredict(), not necessarily + /// the outermost alt shown for a rule; left-recursive rules have + /// user-level alts that differ from the rewritten rule with a (...) block + /// and a (..)* loop. + /// + public int predictedAlt; + + /// + /// Constructs a new instance of the + /// + /// class with + /// the specified detailed lookahead information. + /// + /// The decision number + /// The final configuration set containing the necessary + /// information to determine the result of a prediction, or {@code null} if + /// the final configuration set is not available + /// + /// + /// The input token stream + /// The start index for the current prediction + /// The index at which the prediction was finally made + /// + /// + /// if the current lookahead is part of an LL + /// prediction; otherwise, + /// + /// if the current lookahead is part of + /// an SLL prediction + /// + public LookaheadEventInfo(int decision, ATNConfigSet configs, int predictedAlt, ITokenStream input, int startIndex, int stopIndex, bool fullCtx) + : base(decision, configs, input, startIndex, stopIndex, fullCtx) + { + this.predictedAlt = predictedAlt; + } + } +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LoopEndState.cs b/runtime/CSharp/src/Atn/LoopEndState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/LoopEndState.cs rename to runtime/CSharp/src/Atn/LoopEndState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/MergeCache.cs b/runtime/CSharp/src/Atn/MergeCache.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/MergeCache.cs rename to runtime/CSharp/src/Atn/MergeCache.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/NotSetTransition.cs b/runtime/CSharp/src/Atn/NotSetTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/NotSetTransition.cs rename to runtime/CSharp/src/Atn/NotSetTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ParseInfo.cs b/runtime/CSharp/src/Atn/ParseInfo.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ParseInfo.cs rename to runtime/CSharp/src/Atn/ParseInfo.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ParserATNSimulator.cs b/runtime/CSharp/src/Atn/ParserATNSimulator.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ParserATNSimulator.cs rename to runtime/CSharp/src/Atn/ParserATNSimulator.cs index a7d914a711..8e3ec8d5da 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ParserATNSimulator.cs +++ b/runtime/CSharp/src/Atn/ParserATNSimulator.cs @@ -241,7 +241,7 @@ namespace Antlr4.Runtime.Atn public class ParserATNSimulator : ATNSimulator { public static readonly bool debug = false; - public static readonly bool debug_list_atn_decisions = false; + public static bool trace_atn_sim = false; public static readonly bool dfa_debug = false; public static readonly bool retry_debug = false; @@ -284,8 +284,8 @@ public ParserATNSimulator(Parser parser, ATN atn, this.parser = parser; this.decisionToDFA = decisionToDFA; // DOTGenerator dot = new DOTGenerator(null); - // ConsoleWriteLine(dot.getDOT(atn.rules.get(0), parser.getRuleNames())); - // ConsoleWriteLine(dot.getDOT(atn.rules.get(1), parser.getRuleNames())); + // Console.WriteLine(dot.getDOT(atn.rules.get(0), parser.getRuleNames())); + // Console.WriteLine(dot.getDOT(atn.rules.get(1), parser.getRuleNames())); } public override void Reset() @@ -304,9 +304,9 @@ public override void ClearDFA() public virtual int AdaptivePredict(ITokenStream input, int decision, ParserRuleContext outerContext) { - if (debug || debug_list_atn_decisions) + if (debug || trace_atn_sim) { - ConsoleWriteLine("adaptivePredict decision " + decision + + Console.WriteLine("adaptivePredict decision " + decision + " exec LA(1)==" + GetLookaheadName(input) + " line " + input.LT(1).Line + ":" + input.LT(1).Column); } @@ -339,9 +339,9 @@ public virtual int AdaptivePredict(ITokenStream input, int decision, if (s0 == null) { if (outerContext == null) outerContext = ParserRuleContext.EmptyContext; - if (debug || debug_list_atn_decisions) + if (debug) { - ConsoleWriteLine("predictATN decision " + dfa.decision + + Console.WriteLine("predictATN decision " + dfa.decision + " exec LA(1)==" + GetLookaheadName(input) + ", outerContext=" + outerContext.ToString(parser)); } @@ -373,7 +373,7 @@ public virtual int AdaptivePredict(ITokenStream input, int decision, int alt = ExecATN(dfa, s0, input, index, outerContext); if (debug) - ConsoleWriteLine("DFA after predictATN: " + dfa.ToString(parser.Vocabulary)); + Console.WriteLine("DFA after predictATN: " + dfa.ToString(parser.Vocabulary)); return alt; } finally @@ -419,16 +419,17 @@ protected int ExecATN(DFA dfa, DFAState s0, ITokenStream input, int startIndex, ParserRuleContext outerContext) { - if (debug || debug_list_atn_decisions) + if (debug || trace_atn_sim) { - ConsoleWriteLine("execATN decision " + dfa.decision + - " exec LA(1)==" + GetLookaheadName(input) + - " line " + input.LT(1).Line + ":" + input.LT(1).Column); + Console.WriteLine("execATN decision " + dfa.decision + + ", DFA state " + s0 + + ", LA(1)==" + GetLookaheadName(input) + + " line " + input.LT(1).Line + ":" + input.LT(1).Column); } DFAState previousD = s0; - if (debug) ConsoleWriteLine("s0 = " + s0); + if (debug) Console.WriteLine("s0 = " + s0); int t = input.LA(1); @@ -467,7 +468,7 @@ protected int ExecATN(DFA dfa, DFAState s0, BitSet conflictingAlts = D.configSet.conflictingAlts; if (D.predicates != null) { - if (debug) ConsoleWriteLine("DFA state has preds in DFA sim LL failover"); + if (debug) Console.WriteLine("DFA state has preds in DFA sim LL failover"); int conflictIndex = input.Index; if (conflictIndex != startIndex) { @@ -477,7 +478,7 @@ protected int ExecATN(DFA dfa, DFAState s0, conflictingAlts = EvalSemanticContext(D.predicates, outerContext, true); if (conflictingAlts.Cardinality() == 1) { - if (debug) ConsoleWriteLine("Full LL avoided"); + if (debug) Console.WriteLine("Full LL avoided"); return conflictingAlts.NextSetBit(0); } @@ -489,7 +490,7 @@ protected int ExecATN(DFA dfa, DFAState s0, } } - if (dfa_debug) ConsoleWriteLine("ctx sensitive state " + outerContext + " in " + D); + if (dfa_debug) Console.WriteLine("ctx sensitive state " + outerContext + " in " + D); bool fullCtx = true; ATNConfigSet s0_closure = ComputeStartState(dfa.atnStartState, outerContext, fullCtx); @@ -587,7 +588,7 @@ protected virtual DFAState ComputeTargetState(DFA dfa, DFAState previousD, int t if (debug) { ICollection altSubSets = PredictionMode.GetConflictingAltSubsets(reach.configs); - ConsoleWriteLine("SLL altSubSets=" + altSubSets + + Console.WriteLine("SLL altSubSets=" + StaticUtils.ToString(altSubSets) + ", configs=" + reach + ", predict=" + predictedAlt + ", allSubsetsConflict=" + PredictionMode.AllSubsetsConflict(altSubSets) + ", conflictingAlts=" + @@ -654,9 +655,9 @@ protected int ExecATNWithFullContext(DFA dfa, ITokenStream input, int startIndex, ParserRuleContext outerContext) { - if (debug || debug_list_atn_decisions) + if (debug || trace_atn_sim) { - ConsoleWriteLine("execATNWithFullContext " + s0); + Console.WriteLine("execATNWithFullContext " + s0); } bool fullCtx = true; bool foundExactAmbig = false; @@ -667,7 +668,7 @@ protected int ExecATNWithFullContext(DFA dfa, int predictedAlt; while (true) { // while more work - // ConsoleWriteLine("LL REACH "+GetLookaheadName(input)+ + // Console.WriteLine("LL REACH "+GetLookaheadName(input)+ // " from configs.size="+previous.size()+ // " line "+input.LT(1)Line+":"+input.LT(1).Column); reach = ComputeReachSet(previous, t, fullCtx); @@ -695,13 +696,13 @@ protected int ExecATNWithFullContext(DFA dfa, ICollection altSubSets = PredictionMode.GetConflictingAltSubsets(reach.configs); if (debug) { - ConsoleWriteLine("LL altSubSets=" + altSubSets + + Console.WriteLine("LL altSubSets=" + altSubSets + ", predict=" + PredictionMode.GetUniqueAlt(altSubSets) + ", ResolvesToJustOneViableAlt=" + PredictionMode.ResolvesToJustOneViableAlt(altSubSets)); } - // ConsoleWriteLine("altSubSets: "+altSubSets); + // Console.WriteLine("altSubSets: "+altSubSets); // System.err.println("reach="+reach+", "+reach.conflictingAlts); reach.uniqueAlt = GetUniqueAlt(reach); // unique prediction? @@ -785,7 +786,7 @@ sure that there is an ambiguity without looking further. protected virtual ATNConfigSet ComputeReachSet(ATNConfigSet closure, int t, bool fullCtx) { if (debug) - ConsoleWriteLine("in computeReachSet, starting closure: " + closure); + Console.WriteLine("in computeReachSet, starting closure: " + closure); if (mergeCache == null) { @@ -809,7 +810,7 @@ protected virtual ATNConfigSet ComputeReachSet(ATNConfigSet closure, int t, bool // First figure out where we can reach on input t foreach (ATNConfig c in closure.configs) { - if (debug) ConsoleWriteLine("testing " + GetTokenName(t) + " at " + c.ToString()); + if (debug) Console.WriteLine("testing " + GetTokenName(t) + " at " + c.ToString()); if (c.state is RuleStopState) { @@ -921,7 +922,11 @@ protected virtual ATNConfigSet ComputeReachSet(ATNConfigSet closure, int t, bool } } - if (reach.Empty) + if ( trace_atn_sim ) { + Console.WriteLine("computeReachSet "+closure+" -> "+reach); + } + + if (reach.Empty) return null; return reach; } @@ -985,6 +990,11 @@ protected ATNConfigSet ComputeStartState(ATNState p, PredictionContext initialContext = PredictionContext.FromRuleContext(atn, ctx); ATNConfigSet configs = new ATNConfigSet(fullCtx); + if ( trace_atn_sim ) { + Console.WriteLine("computeStartState from ATN state "+p+" initialContext="+initialContext); + } + + for (int i = 0; i < p.NumberOfTransitions; i++) { ATNState target = p.Transition(i).target; @@ -1238,11 +1248,11 @@ protected SemanticContext[] GetPredsForAmbigAlts(BitSet ambigAlts, /* altToPred starts as an array of all null contexts. The entry at index i * corresponds to alternative i. altToPred[i] may have one of three values: * 1. null: no ATNConfig c is found such that c.alt==i - * 2. SemanticContext.NONE: At least one ATNConfig c exists such that - * c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + * 2. SemanticContext.Empty.Instance: At least one ATNConfig c exists such that + * c.alt==i and c.semanticContext==SemanticContext.Empty.Instance. In other words, * alt i has at least one unpredicated config. * 3. Non-NONE Semantic Context: There exists at least one, and for all - * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.Empty.Instance. * * From this, it is clear that NONE||anything==NONE. */ @@ -1260,9 +1270,9 @@ protected SemanticContext[] GetPredsForAmbigAlts(BitSet ambigAlts, { if (altToPred[i] == null) { - altToPred[i] = SemanticContext.NONE; + altToPred[i] = SemanticContext.Empty.Instance; } - else if (altToPred[i] != SemanticContext.NONE) + else if (altToPred[i] != SemanticContext.Empty.Instance) { nPredAlts++; } @@ -1275,7 +1285,7 @@ protected SemanticContext[] GetPredsForAmbigAlts(BitSet ambigAlts, // nonambig alts are null in altToPred if (nPredAlts == 0) altToPred = null; - if (debug) ConsoleWriteLine("getPredsForAmbigAlts result " + Arrays.ToString(altToPred)); + if (debug) Console.WriteLine("getPredsForAmbigAlts result " + Arrays.ToString(altToPred)); return altToPred; } @@ -1288,13 +1298,13 @@ protected PredPrediction[] GetPredicatePredictions(BitSet ambigAlts, { SemanticContext pred = altToPred[i]; - // unpredicated is indicated by SemanticContext.NONE + // unpredicated is indicated by SemanticContext.Empty.Instance if (ambigAlts != null && ambigAlts[i]) { pairs.Add(new PredPrediction(pred, i)); } - if (pred != SemanticContext.NONE) containsPredicate = true; + if (pred != SemanticContext.Empty.Instance) containsPredicate = true; } if (!containsPredicate) @@ -1302,7 +1312,7 @@ protected PredPrediction[] GetPredicatePredictions(BitSet ambigAlts, return null; } - // ConsoleWriteLine(Arrays.toString(altToPred)+"->"+pairs); + // Console.WriteLine(Arrays.toString(altToPred)+"->"+pairs); return pairs.ToArray(); } @@ -1407,7 +1417,7 @@ protected Pair SplitAccordingToSemanticValidity( ATNConfigSet failed = new ATNConfigSet(configSet.fullCtx); foreach (ATNConfig c in configSet.configs) { - if (c.semanticContext != SemanticContext.NONE) + if (c.semanticContext != SemanticContext.Empty.Instance) { bool predicateEvaluationResult = EvalSemanticContext(c.semanticContext, outerContext, c.alt, configSet.fullCtx); if (predicateEvaluationResult) @@ -1438,7 +1448,7 @@ protected virtual BitSet EvalSemanticContext(PredPrediction[] predPredictions, BitSet predictions = new BitSet(); foreach (PredPrediction pair in predPredictions) { - if (pair.pred == SemanticContext.NONE) + if (pair.pred == SemanticContext.Empty.Instance) { predictions[pair.alt] = true; if (!complete) @@ -1452,12 +1462,12 @@ protected virtual BitSet EvalSemanticContext(PredPrediction[] predPredictions, bool predicateEvaluationResult = EvalSemanticContext(pair.pred, outerContext, pair.alt, fullCtx); if (debug || dfa_debug) { - ConsoleWriteLine("eval pred " + pair + "=" + predicateEvaluationResult); + Console.WriteLine("eval pred " + pair + "=" + predicateEvaluationResult); } if (predicateEvaluationResult) { - if (debug || dfa_debug) ConsoleWriteLine("PREDICT " + pair.alt); + if (debug || dfa_debug) Console.WriteLine("PREDICT " + pair.alt); predictions[pair.alt] = true; if (!complete) { @@ -1532,8 +1542,8 @@ protected void ClosureCheckingStopState(ATNConfig config, int depth, bool treatEofAsEpsilon) { - if (debug) - ConsoleWriteLine("closure(" + config.ToString(parser, true) + ")"); + if (trace_atn_sim) + Console.WriteLine("closure(" + config.ToString(parser, true) + ")"); if (config.state is RuleStopState) { @@ -1547,12 +1557,12 @@ protected void ClosureCheckingStopState(ATNConfig config, { if (fullCtx) { - configSet.Add(new ATNConfig(config, config.state, PredictionContext.EMPTY), mergeCache); + configSet.Add(new ATNConfig(config, config.state, EmptyPredictionContext.Instance), mergeCache); continue; } else { // we have no context info, just chase follow links (if greedy) - if (debug) ConsoleWriteLine("FALLING off rule " + + if (debug) Console.WriteLine("FALLING off rule " + GetRuleName(config.state.ruleIndex)); Closure_(config, configSet, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon); @@ -1583,7 +1593,7 @@ protected void ClosureCheckingStopState(ATNConfig config, } else { // else if we have no context info, just chase follow links (if greedy) - if (debug) ConsoleWriteLine("FALLING off rule " + + if (debug) Console.WriteLine("FALLING off rule " + GetRuleName(config.state.ruleIndex)); } } @@ -1608,7 +1618,7 @@ protected void Closure_(ATNConfig config, configs.Add(config, mergeCache); // make sure to not return here, because EOF transitions can act as // both epsilon transitions and non-epsilon transitions. - // if ( debug ) ConsoleWriteLine("added config "+configs); + // if ( debug ) Console.WriteLine("added config "+configs); } for (int i = 0; i < p.NumberOfTransitions; i++) @@ -1650,7 +1660,7 @@ protected void Closure_(ATNConfig config, configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method newDepth--; if (debug) - ConsoleWriteLine("dips into outer ctx: " + c); + Console.WriteLine("dips into outer ctx: " + c); } else { @@ -1898,7 +1908,7 @@ protected ATNConfig GetEpsilonTarget(ATNConfig config, protected ATNConfig ActionTransition(ATNConfig config, ActionTransition t) { - if (debug) ConsoleWriteLine("ACTION edge " + t.ruleIndex + ":" + t.actionIndex); + if (debug) Console.WriteLine("ACTION edge " + t.ruleIndex + ":" + t.actionIndex); return new ATNConfig(config, t.target); } @@ -1911,13 +1921,13 @@ public ATNConfig PrecedenceTransition(ATNConfig config, { if (debug) { - ConsoleWriteLine("PRED (collectPredicates=" + collectPredicates + ") " + + Console.WriteLine("PRED (collectPredicates=" + collectPredicates + ") " + pt.precedence + ">=_p" + ", ctx dependent=true"); if (parser != null) { - ConsoleWriteLine("context surrounding pred is " + - parser.GetRuleInvocationStack()); + Console.WriteLine("context surrounding pred is " + + StaticUtils.ToString(parser.GetRuleInvocationStack())); } } @@ -1948,7 +1958,7 @@ public ATNConfig PrecedenceTransition(ATNConfig config, c = new ATNConfig(config, pt.target); } - if (debug) ConsoleWriteLine("config from pred transition=" + c); + if (debug) Console.WriteLine("config from pred transition=" + c); return c; } @@ -1961,13 +1971,13 @@ protected ATNConfig PredTransition(ATNConfig config, { if (debug) { - ConsoleWriteLine("PRED (collectPredicates=" + collectPredicates + ") " + + Console.WriteLine("PRED (collectPredicates=" + collectPredicates + ") " + pt.ruleIndex + ":" + pt.predIndex + ", ctx dependent=" + pt.isCtxDependent); if (parser != null) { - ConsoleWriteLine("context surrounding pred is " + - parser.GetRuleInvocationStack()); + Console.WriteLine("context surrounding pred is " + + StaticUtils.ToString(parser.GetRuleInvocationStack())); } } @@ -1999,7 +2009,7 @@ protected ATNConfig PredTransition(ATNConfig config, c = new ATNConfig(config, pt.target); } - if (debug) ConsoleWriteLine("config from pred transition=" + c); + if (debug) Console.WriteLine("config from pred transition=" + c); return c; } @@ -2008,7 +2018,7 @@ protected ATNConfig RuleTransition(ATNConfig config, RuleTransition t) { if (debug) { - ConsoleWriteLine("CALL rule " + GetRuleName(t.target.ruleIndex) + + Console.WriteLine("CALL rule " + GetRuleName(t.target.ruleIndex) + ", ctx=" + config.context); } @@ -2112,9 +2122,7 @@ public string GetLookaheadName(ITokenStream input) */ public void DumpDeadEndConfigs(NoViableAltException nvae) { -#if !PORTABLE System.Console.Error.WriteLine("dead end configs: "); -#endif foreach (ATNConfig c in nvae.DeadEndConfigs.configs) { String trans = "no edges"; @@ -2133,9 +2141,8 @@ public void DumpDeadEndConfigs(NoViableAltException nvae) trans = (not ? "~" : "") + "Set " + st.set.ToString(); } } -#if !PORTABLE + System.Console.Error.WriteLine(c.ToString(parser, true) + ":" + trans); -#endif } } @@ -2195,7 +2202,7 @@ protected DFAState AddDFAEdge(DFA dfa, { if (debug) { - ConsoleWriteLine("EDGE " + from + " -> " + to + " upon " + GetTokenName(t)); + Console.WriteLine("EDGE " + from + " -> " + to + " upon " + GetTokenName(t)); } if (to == null) @@ -2221,7 +2228,7 @@ protected DFAState AddDFAEdge(DFA dfa, if (debug) { - ConsoleWriteLine("DFA=\n" + dfa.ToString(parser != null ? parser.Vocabulary : Vocabulary.EmptyVocabulary)); + Console.WriteLine("DFA=\n" + dfa.ToString(parser != null ? parser.Vocabulary : Vocabulary.EmptyVocabulary)); } return to; @@ -2252,7 +2259,10 @@ protected DFAState AddDFAState(DFA dfa, DFAState D) lock (dfa.states) { DFAState existing = dfa.states.Get(D); - if (existing != null) return existing; + if (existing != null) { + if ( trace_atn_sim ) Console.WriteLine("addDFAState " + D + " exists"); + return existing; + } D.stateNumber = dfa.states.Count; if (!D.configSet.IsReadOnly) @@ -2261,7 +2271,8 @@ protected DFAState AddDFAState(DFA dfa, DFAState D) D.configSet.IsReadOnly = true; } dfa.states.Put(D, D); - if (debug) ConsoleWriteLine("adding new DFA state: " + D); + + if ( trace_atn_sim ) Console.WriteLine("addDFAState new " + D); return D; } } @@ -2271,11 +2282,11 @@ protected virtual void ReportAttemptingFullContext(DFA dfa, BitSet conflictingAl if (debug || retry_debug) { Interval interval = Interval.Of(startIndex, stopIndex); - ConsoleWriteLine("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + + Console.WriteLine("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + ", input=" + parser.TokenStream.GetText(interval)); } if (parser != null) - parser.ErrorListenerDispatch.ReportAttemptingFullContext(parser, dfa, startIndex, stopIndex, conflictingAlts, null /*configs*/); + parser.ErrorListenerDispatch.ReportAttemptingFullContext(parser, dfa, startIndex, stopIndex, conflictingAlts, configs); } protected virtual void ReportContextSensitivity(DFA dfa, int prediction, ATNConfigSet configs, int startIndex, int stopIndex) @@ -2283,10 +2294,10 @@ protected virtual void ReportContextSensitivity(DFA dfa, int prediction, ATNConf if (debug || retry_debug) { Interval interval = Interval.Of(startIndex, stopIndex); - ConsoleWriteLine("ReportContextSensitivity decision=" + dfa.decision + ":" + configs + + Console.WriteLine("ReportContextSensitivity decision=" + dfa.decision + ":" + configs + ", input=" + parser.TokenStream.GetText(interval)); } - if (parser != null) parser.ErrorListenerDispatch.ReportContextSensitivity(parser, dfa, startIndex, stopIndex, prediction, null /*configs*/); + if (parser != null) parser.ErrorListenerDispatch.ReportContextSensitivity(parser, dfa, startIndex, stopIndex, prediction, configs); } /** If context sensitive parsing, we know it's ambiguity not conflict */ @@ -2300,7 +2311,7 @@ protected virtual void ReportAmbiguity(DFA dfa, if (debug || retry_debug) { Interval interval = Interval.Of(startIndex, stopIndex); - ConsoleWriteLine("ReportAmbiguity " + + Console.WriteLine("ReportAmbiguity " + ambigAlts + ":" + configs + ", input=" + parser.TokenStream.GetText(interval)); } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PlusBlockStartState.cs b/runtime/CSharp/src/Atn/PlusBlockStartState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PlusBlockStartState.cs rename to runtime/CSharp/src/Atn/PlusBlockStartState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PlusLoopbackState.cs b/runtime/CSharp/src/Atn/PlusLoopbackState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PlusLoopbackState.cs rename to runtime/CSharp/src/Atn/PlusLoopbackState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PrecedencePredicateTransition.cs b/runtime/CSharp/src/Atn/PrecedencePredicateTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PrecedencePredicateTransition.cs rename to runtime/CSharp/src/Atn/PrecedencePredicateTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredicateEvalInfo.cs b/runtime/CSharp/src/Atn/PredicateEvalInfo.cs similarity index 84% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredicateEvalInfo.cs rename to runtime/CSharp/src/Atn/PredicateEvalInfo.cs index 467ac40f21..93d3e3e980 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredicateEvalInfo.cs +++ b/runtime/CSharp/src/Atn/PredicateEvalInfo.cs @@ -31,7 +31,7 @@ public class PredicateEvalInfo : DecisionEventInfo /// . Note that other ATN /// configurations may predict the same alternative which are guarded by /// other semantic contexts and/or - /// + /// /// . /// public readonly int predictedAlt; @@ -49,7 +49,6 @@ public class PredicateEvalInfo : DecisionEventInfo /// class with the /// specified detailed predicate evaluation information. /// - /// The simulator state /// The decision number /// The input token stream /// The start index for the current prediction @@ -68,10 +67,15 @@ public class PredicateEvalInfo : DecisionEventInfo /// /// for more information. /// + /// {@code true} if the semantic context was + /// evaluated during LL prediction; otherwise, {@code false} if the semantic + /// context was evaluated during SLL prediction + /// + /// /// /// - public PredicateEvalInfo(SimulatorState state, int decision, ITokenStream input, int startIndex, int stopIndex, SemanticContext semctx, bool evalResult, int predictedAlt) - : base(decision, state, input, startIndex, stopIndex, state.useContext) + public PredicateEvalInfo(int decision, ITokenStream input, int startIndex, int stopIndex, SemanticContext semctx, bool evalResult, int predictedAlt, bool fullCtx) + : base(decision, new ATNConfigSet(), input, startIndex, stopIndex, fullCtx) { this.semctx = semctx; this.evalResult = evalResult; diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredicateTransition.cs b/runtime/CSharp/src/Atn/PredicateTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredicateTransition.cs rename to runtime/CSharp/src/Atn/PredicateTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionContext.cs b/runtime/CSharp/src/Atn/PredictionContext.cs similarity index 91% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionContext.cs rename to runtime/CSharp/src/Atn/PredictionContext.cs index ac5dd9eb88..30796fefe1 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionContext.cs +++ b/runtime/CSharp/src/Atn/PredictionContext.cs @@ -2,6 +2,7 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +using System; using System.Collections.Generic; using System.Text; using Antlr4.Runtime.Misc; @@ -13,8 +14,6 @@ public abstract class PredictionContext { public static readonly int EMPTY_RETURN_STATE = int.MaxValue; - public static readonly EmptyPredictionContext EMPTY = new EmptyPredictionContext(); - private static readonly int INITIAL_HASH = 1; protected internal static int CalculateEmptyHashCode() @@ -60,7 +59,7 @@ public static PredictionContext FromRuleContext(ATN atn, RuleContext outerContex if (outerContext == null) outerContext = ParserRuleContext.EMPTY; if (outerContext.Parent == null || outerContext == ParserRuleContext.EMPTY) - return PredictionContext.EMPTY; + return EmptyPredictionContext.Instance; PredictionContext parent = PredictionContext.FromRuleContext(atn, outerContext.Parent); ATNState state = atn.states[outerContext.invokingState]; RuleTransition transition = (RuleTransition)state.Transition(0); @@ -80,7 +79,7 @@ public virtual bool IsEmpty { get { - return this == EMPTY; + return this == EmptyPredictionContext.Instance; } } @@ -224,19 +223,23 @@ public static PredictionContext MergeSingletons( } public static PredictionContext MergeArrays( - ArrayPredictionContext a, - ArrayPredictionContext b, - bool rootIsWildcard, - MergeCache mergeCache) + ArrayPredictionContext a, + ArrayPredictionContext b, + bool rootIsWildcard, + MergeCache mergeCache) { if (mergeCache != null) { PredictionContext previous = mergeCache.Get(a, b); - if (previous != null) + if (previous != null) { + if ( ParserATNSimulator.trace_atn_sim ) Console.WriteLine("mergeArrays a="+a+",b="+b+" -> previous"); return previous; + } previous = mergeCache.Get(b, a); - if (previous != null) + if (previous != null) { + if ( ParserATNSimulator.trace_atn_sim ) Console.WriteLine("mergeArrays a="+a+",b="+b+" -> previous"); return previous; + } } // merge sorted payloads a + b => M @@ -329,17 +332,21 @@ public static PredictionContext MergeArrays( { if (mergeCache != null) mergeCache.Put(a, b, a); + if ( ParserATNSimulator.trace_atn_sim ) Console.WriteLine("mergeArrays a="+a+",b="+b+" -> a"); return a; } if (M.Equals(b)) { if (mergeCache != null) mergeCache.Put(a, b, b); + if ( ParserATNSimulator.trace_atn_sim ) Console.WriteLine("mergeArrays a="+a+",b="+b+" -> b"); return b; } CombineCommonParents(mergedParents); + if ( ParserATNSimulator.trace_atn_sim ) Console.WriteLine("mergeArrays a="+a+",b="+b+" -> "+M); + if (mergeCache != null) mergeCache.Put(a, b, M); return M; @@ -372,14 +379,14 @@ public static PredictionContext MergeRoot(SingletonPredictionContext a, { if (rootIsWildcard) { - if (a == PredictionContext.EMPTY) - return PredictionContext.EMPTY; // * + b = * - if (b == PredictionContext.EMPTY) - return PredictionContext.EMPTY; // a + * = * + if (a == EmptyPredictionContext.Instance) + return EmptyPredictionContext.Instance; // * + b = * + if (b == EmptyPredictionContext.Instance) + return EmptyPredictionContext.Instance; // a + * = * } else { - if (a == EMPTY && b == EMPTY) return EMPTY; // $ + $ = $ - if (a == EMPTY) + if (a == EmptyPredictionContext.Instance && b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // $ + $ = $ + if (a == EmptyPredictionContext.Instance) { // $ + x = [$,x] int[] payloads = { b.returnState, EMPTY_RETURN_STATE }; PredictionContext[] parents = { b.parent, null }; @@ -387,7 +394,7 @@ public static PredictionContext MergeRoot(SingletonPredictionContext a, new ArrayPredictionContext(parents, payloads); return joined; } - if (b == EMPTY) + if (b == EmptyPredictionContext.Instance) { // x + $ = [$,x] ($ is always first if present) int[] payloads = { a.returnState, EMPTY_RETURN_STATE }; PredictionContext[] parents = { a.parent, null }; @@ -452,7 +459,7 @@ public static PredictionContext GetCachedContext(PredictionContext context, Pred PredictionContext updated; if (parents.Length == 0) { - updated = EMPTY; + updated = EmptyPredictionContext.Instance; } else if (parents.Length == 1) { @@ -478,7 +485,7 @@ public virtual PredictionContext GetChild(int returnState) public virtual string[] ToStrings(IRecognizer recognizer, int currentState) { - return ToStrings(recognizer, PredictionContext.EMPTY, currentState); + return ToStrings(recognizer, EmptyPredictionContext.Instance, currentState); } public virtual string[] ToStrings(IRecognizer recognizer, PredictionContext stop, int currentState) diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionContextCache.cs b/runtime/CSharp/src/Atn/PredictionContextCache.cs similarity index 92% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionContextCache.cs rename to runtime/CSharp/src/Atn/PredictionContextCache.cs index c07ee7871b..8a36db59a0 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionContextCache.cs +++ b/runtime/CSharp/src/Atn/PredictionContextCache.cs @@ -19,8 +19,8 @@ public class PredictionContextCache */ public PredictionContext Add(PredictionContext ctx) { - if (ctx == PredictionContext.EMPTY) - return PredictionContext.EMPTY; + if (ctx == EmptyPredictionContext.Instance) + return EmptyPredictionContext.Instance; PredictionContext existing = cache.Get(ctx); if (existing != null) { diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionMode.cs b/runtime/CSharp/src/Atn/PredictionMode.cs similarity index 99% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionMode.cs rename to runtime/CSharp/src/Atn/PredictionMode.cs index 8cd7d6e76e..1d801c9560 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/PredictionMode.cs +++ b/runtime/CSharp/src/Atn/PredictionMode.cs @@ -258,7 +258,7 @@ public static bool HasSLLConflictTerminatingPrediction(PredictionMode mode, ATNC ATNConfigSet dup = new ATNConfigSet(); foreach (ATNConfig c in configSet.configs) { - dup.Add(new ATNConfig(c, SemanticContext.NONE)); + dup.Add(new ATNConfig(c, SemanticContext.Empty.Instance)); } configSet = dup; } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ProfilingATNSimulator.cs b/runtime/CSharp/src/Atn/ProfilingATNSimulator.cs similarity index 87% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ProfilingATNSimulator.cs rename to runtime/CSharp/src/Atn/ProfilingATNSimulator.cs index 8c5ac6b990..a88b819d96 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ProfilingATNSimulator.cs +++ b/runtime/CSharp/src/Atn/ProfilingATNSimulator.cs @@ -71,7 +71,7 @@ public override int AdaptivePredict(ITokenStream input, int decision, ParserRule { decisions[decision].SLL_MaxLook = SLL_k; decisions[decision].SLL_MaxLookEvent = - new LookaheadEventInfo(decision, null/*, alt*/, input, startIndex, sllStopIndex, false); + new LookaheadEventInfo(decision, null, alt, input, startIndex, sllStopIndex, false); } if (llStopIndex >= 0) @@ -83,7 +83,7 @@ public override int AdaptivePredict(ITokenStream input, int decision, ParserRule { decisions[decision].LL_MaxLook = LL_k; decisions[decision].LL_MaxLookEvent = - new LookaheadEventInfo(decision, null/*, alt*/, input, startIndex, llStopIndex, true); + new LookaheadEventInfo(decision, null, alt, input, startIndex, llStopIndex, true); } } @@ -108,7 +108,7 @@ protected override DFAState GetExistingTargetState(DFAState previousD, int t) if (existingTargetState == ERROR) { decisions[currentDecision].errors.Add( - new ErrorInfo(currentDecision, null /*previousD.configs*/, input, startIndex, sllStopIndex) + new ErrorInfo(currentDecision, previousD.configSet, input, startIndex, sllStopIndex, false) ); } } @@ -143,7 +143,7 @@ protected override ATNConfigSet ComputeReachSet(ATNConfigSet closure, int t, boo else { // no reach on current lookahead symbol. ERROR. // TODO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule() decisions[currentDecision].errors.Add( - new ErrorInfo(currentDecision, null /*closure*/, input, startIndex, llStopIndex) + new ErrorInfo(currentDecision, closure, input, startIndex, llStopIndex, true) ); } } @@ -154,7 +154,7 @@ protected override ATNConfigSet ComputeReachSet(ATNConfigSet closure, int t, boo } else { // no reach on current lookahead symbol. ERROR. decisions[currentDecision].errors.Add( - new ErrorInfo(currentDecision, null /*closure*/, input, startIndex, sllStopIndex) + new ErrorInfo(currentDecision, closure, input, startIndex, sllStopIndex, false) ); } } @@ -168,7 +168,7 @@ protected override bool EvalSemanticContext(SemanticContext pred, ParserRuleCont bool fullContext = llStopIndex >= 0; int stopIndex = fullContext ? llStopIndex : sllStopIndex; decisions[currentDecision].predicateEvals.Add( - new PredicateEvalInfo(null , currentDecision, input, startIndex, stopIndex, pred, result, alt/*, fullCtx*/) + new PredicateEvalInfo(currentDecision, input, startIndex, stopIndex, pred, result, alt, fullCtx) ); } @@ -193,14 +193,14 @@ protected override void ReportContextSensitivity(DFA dfa, int prediction, ATNCon if (prediction != conflictingAltResolvedBySLL) { decisions[currentDecision].contextSensitivities.Add( - new ContextSensitivityInfo(currentDecision, null /*configs*/, input, startIndex, stopIndex) + new ContextSensitivityInfo(currentDecision, configs, input, startIndex, stopIndex) ); } base.ReportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex); } protected override void ReportAmbiguity(DFA dfa, DFAState D, int startIndex, int stopIndex, bool exact, - BitSet ambigAlts, ATNConfigSet configSet) + BitSet ambigAlts, ATNConfigSet configs) { int prediction; if (ambigAlts != null) @@ -208,22 +208,22 @@ protected override void ReportAmbiguity(DFA dfa, DFAState D, int startIndex, int prediction = ambigAlts.NextSetBit(0); } else { - prediction = configSet.GetAlts().NextSetBit(0); + prediction = configs.GetAlts().NextSetBit(0); } - if (configSet.fullCtx && prediction != conflictingAltResolvedBySLL) + if (configs.fullCtx && prediction != conflictingAltResolvedBySLL) { // Even though this is an ambiguity we are reporting, we can // still detect some context sensitivities. Both SLL and LL // are showing a conflict, hence an ambiguity, but if they resolve // to different minimum alternatives we have also identified a // context sensitivity. - decisions[currentDecision].contextSensitivities.Add( new ContextSensitivityInfo(currentDecision, null /*configs*/, input, startIndex, stopIndex) ); + decisions[currentDecision].contextSensitivities.Add( new ContextSensitivityInfo(currentDecision, configs, input, startIndex, stopIndex) ); } decisions[currentDecision].ambiguities.Add( - new AmbiguityInfo(currentDecision, null /*configs, ambigAlts*/, - input, startIndex, stopIndex/*, configs.IsFullContext*/) + new AmbiguityInfo(currentDecision, configs, ambigAlts, + input, startIndex, stopIndex, configs.fullCtx) ); - base.ReportAmbiguity(dfa, D, startIndex, stopIndex, exact, ambigAlts, configSet); + base.ReportAmbiguity(dfa, D, startIndex, stopIndex, exact, ambigAlts, configs); } // --------------------------------------------------------------------- diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RangeTransition.cs b/runtime/CSharp/src/Atn/RangeTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RangeTransition.cs rename to runtime/CSharp/src/Atn/RangeTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RuleStartState.cs b/runtime/CSharp/src/Atn/RuleStartState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RuleStartState.cs rename to runtime/CSharp/src/Atn/RuleStartState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RuleStopState.cs b/runtime/CSharp/src/Atn/RuleStopState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RuleStopState.cs rename to runtime/CSharp/src/Atn/RuleStopState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RuleTransition.cs b/runtime/CSharp/src/Atn/RuleTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/RuleTransition.cs rename to runtime/CSharp/src/Atn/RuleTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SemanticContext.cs b/runtime/CSharp/src/Atn/SemanticContext.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SemanticContext.cs rename to runtime/CSharp/src/Atn/SemanticContext.cs index c6cf0cda6e..33db8649d9 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SemanticContext.cs +++ b/runtime/CSharp/src/Atn/SemanticContext.cs @@ -12,8 +12,6 @@ namespace Antlr4.Runtime.Atn { public abstract class SemanticContext { - public static readonly SemanticContext NONE = new SemanticContext.Predicate(); - public abstract bool Eval(Recognizer parser, RuleContext parserCallStack) where ATNInterpreter : ATNSimulator; @@ -23,6 +21,16 @@ public virtual SemanticContext EvalPrecedence(Recognizer return this; } + public class Empty : SemanticContext + { + public static readonly SemanticContext Instance = new Empty(); + + public override bool Eval(Recognizer parser, RuleContext parserCallStack) + { + return false; + } + } + public class Predicate : SemanticContext { public readonly int ruleIndex; @@ -105,7 +113,7 @@ public override SemanticContext EvalPrecedence(Recognize { if (parser.Precpred(parserCallStack, precedence)) { - return SemanticContext.NONE; + return SemanticContext.Empty.Instance; } else { @@ -243,7 +251,7 @@ public override SemanticContext EvalPrecedence(Recognize } else { - if (evaluated != NONE) + if (evaluated != Empty.Instance) { // Reduce the result by skipping true elements operands.Add(evaluated); @@ -257,7 +265,7 @@ public override SemanticContext EvalPrecedence(Recognize if (operands.Count == 0) { // all elements were true, so the AND context is true - return NONE; + return Empty.Instance; } SemanticContext result = operands[0]; for (int i = 1; i < operands.Count; i++) @@ -354,10 +362,10 @@ public override SemanticContext EvalPrecedence(Recognize { SemanticContext evaluated = context.EvalPrecedence(parser, parserCallStack); differs |= (evaluated != context); - if (evaluated == NONE) + if (evaluated == Empty.Instance) { // The OR context is true if any element is true - return NONE; + return Empty.Instance; } else { @@ -393,11 +401,11 @@ public override string ToString() public static SemanticContext AndOp(SemanticContext a, SemanticContext b) { - if (a == null || a == NONE) + if (a == null || a == Empty.Instance) { return b; } - if (b == null || b == NONE) + if (b == null || b == Empty.Instance) { return a; } @@ -419,9 +427,9 @@ public static SemanticContext OrOp(SemanticContext a, SemanticContext b) { return a; } - if (a == NONE || b == NONE) + if (a == Empty.Instance || b == Empty.Instance) { - return NONE; + return Empty.Instance; } SemanticContext.OR result = new SemanticContext.OR(a, b); if (result.opnds.Length == 1) @@ -437,11 +445,7 @@ public static SemanticContext OrOp(SemanticContext a, SemanticContext b) Collections.EmptyList(); List result = collection.OfType().ToList(); -#if NET40PLUS collection.ExceptWith(result); -#else - collection.ExceptWith(result.Cast()); -#endif return result; } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SetTransition.cs b/runtime/CSharp/src/Atn/SetTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SetTransition.cs rename to runtime/CSharp/src/Atn/SetTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SimulatorState.cs b/runtime/CSharp/src/Atn/SimulatorState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SimulatorState.cs rename to runtime/CSharp/src/Atn/SimulatorState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs b/runtime/CSharp/src/Atn/SingletonPredictionContext.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs rename to runtime/CSharp/src/Atn/SingletonPredictionContext.cs index 2f8f1bb5db..c378554a2d 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs +++ b/runtime/CSharp/src/Atn/SingletonPredictionContext.cs @@ -15,7 +15,7 @@ public static PredictionContext Create(PredictionContext parent, int returnState if (returnState == EMPTY_RETURN_STATE && parent == null) { // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY; + return EmptyPredictionContext.Instance; } return new SingletonPredictionContext(parent, returnState); } @@ -79,7 +79,7 @@ public override bool Equals(object o) return false; } Antlr4.Runtime.Atn.SingletonPredictionContext other = (Antlr4.Runtime.Atn.SingletonPredictionContext)o; - return returnState == other.returnState && parent.Equals(other.parent); + return returnState == other.returnState && (parent != null && parent.Equals(other.parent)); } public override string ToString() diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StarBlockStartState.cs b/runtime/CSharp/src/Atn/StarBlockStartState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StarBlockStartState.cs rename to runtime/CSharp/src/Atn/StarBlockStartState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StarLoopEntryState.cs b/runtime/CSharp/src/Atn/StarLoopEntryState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StarLoopEntryState.cs rename to runtime/CSharp/src/Atn/StarLoopEntryState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StarLoopbackState.cs b/runtime/CSharp/src/Atn/StarLoopbackState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StarLoopbackState.cs rename to runtime/CSharp/src/Atn/StarLoopbackState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StateType.cs b/runtime/CSharp/src/Atn/StateType.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/StateType.cs rename to runtime/CSharp/src/Atn/StateType.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/TokensStartState.cs b/runtime/CSharp/src/Atn/TokensStartState.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/TokensStartState.cs rename to runtime/CSharp/src/Atn/TokensStartState.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/Transition.cs b/runtime/CSharp/src/Atn/Transition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/Transition.cs rename to runtime/CSharp/src/Atn/Transition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/TransitionType.cs b/runtime/CSharp/src/Atn/TransitionType.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/TransitionType.cs rename to runtime/CSharp/src/Atn/TransitionType.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/WildcardTransition.cs b/runtime/CSharp/src/Atn/WildcardTransition.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/WildcardTransition.cs rename to runtime/CSharp/src/Atn/WildcardTransition.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BailErrorStrategy.cs b/runtime/CSharp/src/BailErrorStrategy.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BailErrorStrategy.cs rename to runtime/CSharp/src/BailErrorStrategy.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BaseErrorListener.cs b/runtime/CSharp/src/BaseErrorListener.cs similarity index 95% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BaseErrorListener.cs rename to runtime/CSharp/src/BaseErrorListener.cs index 02b433e98f..6e451d5476 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BaseErrorListener.cs +++ b/runtime/CSharp/src/BaseErrorListener.cs @@ -28,11 +28,11 @@ public virtual void ReportAmbiguity(Parser recognizer, DFA dfa, int startIndex, { } - public virtual void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, SimulatorState conflictState) + public virtual void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { } - public virtual void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, SimulatorState acceptState) + public virtual void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs) { } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BufferedTokenStream.cs b/runtime/CSharp/src/BufferedTokenStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/BufferedTokenStream.cs rename to runtime/CSharp/src/BufferedTokenStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CharStreams.cs b/runtime/CSharp/src/CharStreams.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CharStreams.cs rename to runtime/CSharp/src/CharStreams.cs index c5e20b153c..4106f31625 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CharStreams.cs +++ b/runtime/CSharp/src/CharStreams.cs @@ -85,7 +85,7 @@ public static ICharStream fromStream(Stream stream, Encoding encoding) /// Creates an given a . /// - public static ICharStream fromstring(string s) + public static ICharStream fromString(string s) { return new CodePointCharStream(s); } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CommonToken.cs b/runtime/CSharp/src/CommonToken.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CommonToken.cs rename to runtime/CSharp/src/CommonToken.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CommonTokenFactory.cs b/runtime/CSharp/src/CommonTokenFactory.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CommonTokenFactory.cs rename to runtime/CSharp/src/CommonTokenFactory.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CommonTokenStream.cs b/runtime/CSharp/src/CommonTokenStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/CommonTokenStream.cs rename to runtime/CSharp/src/CommonTokenStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ConsoleErrorListener.cs b/runtime/CSharp/src/ConsoleErrorListener.cs similarity index 95% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ConsoleErrorListener.cs rename to runtime/CSharp/src/ConsoleErrorListener.cs index 84bbeea4a8..4849fe80cc 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ConsoleErrorListener.cs +++ b/runtime/CSharp/src/ConsoleErrorListener.cs @@ -3,10 +3,6 @@ * can be found in the LICENSE.txt file in the project root. */ -#if !PORTABLE - -using Antlr4.Runtime; -using Antlr4.Runtime.Sharpen; using System.IO; namespace Antlr4.Runtime @@ -44,6 +40,4 @@ public virtual void SyntaxError(TextWriter output, IRecognizer recognizer, Symbo output.WriteLine("line " + line + ":" + charPositionInLine + " " + msg); } } -} - -#endif +} \ No newline at end of file diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/DefaultErrorStrategy.cs b/runtime/CSharp/src/DefaultErrorStrategy.cs similarity index 96% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/DefaultErrorStrategy.cs rename to runtime/CSharp/src/DefaultErrorStrategy.cs index 019055e257..f208c610ff 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/DefaultErrorStrategy.cs +++ b/runtime/CSharp/src/DefaultErrorStrategy.cs @@ -42,6 +42,21 @@ public class DefaultErrorStrategy : IAntlrErrorStrategy protected internal IntervalSet lastErrorStates; + /** + * This field is used to propagate information about the lookahead following + * the previous match. Since prediction prefers completing the current rule + * to error recovery efforts, error reporting may occur later than the + * original point where it was discoverable. The original context is used to + * compute the true expected sets as though the reporting occurred as early + * as possible. + */ + protected ParserRuleContext nextTokensContext; + + /** + * @see #nextTokensContext + */ + protected int nextTokensState; + /// /// ///

    The default implementation simply calls @@ -160,9 +175,7 @@ public virtual void ReportError(Parser recognizer, RecognitionException e) } else { -#if !PORTABLE System.Console.Error.WriteLine("unknown recognition error type: " + e.GetType().FullName); -#endif NotifyErrorListeners(recognizer, e.Message, e); } } @@ -266,8 +279,22 @@ public virtual void Sync(Parser recognizer) int la = tokens.LA(1); // try cheaper subset first; might get lucky. seems to shave a wee bit off var nextTokens = recognizer.Atn.NextTokens(s); - if (nextTokens.Contains(TokenConstants.EPSILON) || nextTokens.Contains(la)) + if (nextTokens.Contains(la)) + { + nextTokensContext = null; + nextTokensState = ATNState.InvalidStateNumber; + return; + } + + if (nextTokens.Contains(TokenConstants.EPSILON)) { + if (nextTokensContext == null) + { + // It's possible the next token won't match; information tracked + // by sync is restricted for performance. + nextTokensContext = recognizer.Context; + nextTokensState = recognizer.State; + } return; } switch (s.StateType) diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dependents.cs b/runtime/CSharp/src/Dependents.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dependents.cs rename to runtime/CSharp/src/Dependents.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/AbstractEdgeMap.cs b/runtime/CSharp/src/Dfa/AbstractEdgeMap.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/AbstractEdgeMap.cs rename to runtime/CSharp/src/Dfa/AbstractEdgeMap.cs index b9c55f4728..e298c3c5e6 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/AbstractEdgeMap.cs +++ b/runtime/CSharp/src/Dfa/AbstractEdgeMap.cs @@ -4,8 +4,6 @@ */ using System.Collections; using System.Collections.Generic; -using Antlr4.Runtime.Dfa; -using Antlr4.Runtime.Sharpen; namespace Antlr4.Runtime.Dfa { @@ -78,11 +76,7 @@ public abstract int Count get; } -#if NET45PLUS public abstract IReadOnlyDictionary ToMap(); -#else - public abstract IDictionary ToMap(); -#endif public virtual IEnumerator> GetEnumerator() { diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/AcceptStateInfo.cs b/runtime/CSharp/src/Dfa/AcceptStateInfo.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/AcceptStateInfo.cs rename to runtime/CSharp/src/Dfa/AcceptStateInfo.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/ArrayEdgeMap.cs b/runtime/CSharp/src/Dfa/ArrayEdgeMap.cs similarity index 86% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/ArrayEdgeMap.cs rename to runtime/CSharp/src/Dfa/ArrayEdgeMap.cs index b59120bbf4..81617c5f49 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/ArrayEdgeMap.cs +++ b/runtime/CSharp/src/Dfa/ArrayEdgeMap.cs @@ -5,15 +5,9 @@ using System; using System.Collections.Generic; using System.Collections.ObjectModel; -using Antlr4.Runtime.Dfa; -using Antlr4.Runtime.Sharpen; using Interlocked = System.Threading.Interlocked; -#if NET45PLUS using Volatile = System.Threading.Volatile; -#elif !PORTABLE && !COMPACT -using Thread = System.Threading.Thread; -#endif namespace Antlr4.Runtime.Dfa { @@ -35,13 +29,7 @@ public override int Count { get { -#if NET45PLUS return Volatile.Read(ref size); -#elif !PORTABLE && !COMPACT - return Thread.VolatileRead(ref size); -#else - return Interlocked.CompareExchange(ref size, 0, 0); -#endif } } @@ -67,11 +55,7 @@ public override T this[int key] return null; } -#if NET45PLUS return Volatile.Read(ref arrayData[key - minIndex]); -#else - return Interlocked.CompareExchange(ref arrayData[key - minIndex], null, null); -#endif } } @@ -156,24 +140,15 @@ public override AbstractEdgeMap Clear() return new EmptyEdgeMap(minIndex, maxIndex); } -#if NET45PLUS public override IReadOnlyDictionary ToMap() -#else - public override IDictionary ToMap() -#endif { if (IsEmpty) { return Sharpen.Collections.EmptyMap(); } -#if COMPACT - IDictionary result = new SortedList(); -#elif PORTABLE && !NET45PLUS - IDictionary result = new Dictionary(); -#else IDictionary result = new SortedDictionary(); -#endif + for (int i = 0; i < arrayData.Length; i++) { T element = arrayData[i]; @@ -183,11 +158,8 @@ public override IDictionary ToMap() } result[i + minIndex] = element; } -#if NET45PLUS + return new ReadOnlyDictionary(result); -#else - return result; -#endif } } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFA.cs b/runtime/CSharp/src/Dfa/DFA.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFA.cs rename to runtime/CSharp/src/Dfa/DFA.cs index de3ca19ac1..18be55a703 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFA.cs +++ b/runtime/CSharp/src/Dfa/DFA.cs @@ -46,7 +46,7 @@ public DFA(DecisionState atnStartState, int decision) { this.precedenceDfa = true; DFAState precedenceState = new DFAState(new ATNConfigSet()); - precedenceState.edges = new DFAState[0]; + precedenceState.edges = Collections.EmptyList(); precedenceState.isAcceptState = false; precedenceState.requiresFullContext = false; this.s0 = precedenceState; diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFASerializer.cs b/runtime/CSharp/src/Dfa/DFASerializer.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFASerializer.cs rename to runtime/CSharp/src/Dfa/DFASerializer.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFAState.cs b/runtime/CSharp/src/Dfa/DFAState.cs similarity index 99% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFAState.cs rename to runtime/CSharp/src/Dfa/DFAState.cs index 0986ac6c03..4e2df69cfd 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/DFAState.cs +++ b/runtime/CSharp/src/Dfa/DFAState.cs @@ -168,7 +168,7 @@ public override String ToString() public class PredPrediction { - public SemanticContext pred; // never null; at least SemanticContext.NONE + public SemanticContext pred; // never null; at least SemanticContext.Empty.Instance public int alt; public PredPrediction(SemanticContext pred, int alt) { diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/EmptyEdgeMap.cs b/runtime/CSharp/src/Dfa/EmptyEdgeMap.cs similarity index 85% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/EmptyEdgeMap.cs rename to runtime/CSharp/src/Dfa/EmptyEdgeMap.cs index 9098de65ce..76dcbd073d 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/EmptyEdgeMap.cs +++ b/runtime/CSharp/src/Dfa/EmptyEdgeMap.cs @@ -3,11 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ using System.Collections.Generic; -using Antlr4.Runtime.Sharpen; - -#if NET45PLUS using System.Collections.ObjectModel; -#endif namespace Antlr4.Runtime.Dfa { @@ -74,18 +70,9 @@ public override T this[int key] } } -#if NET45PLUS public override IReadOnlyDictionary ToMap() -#else - public override IDictionary ToMap() -#endif { - Dictionary result = new Dictionary(); -#if NET45PLUS - return new ReadOnlyDictionary(result); -#else - return result; -#endif + return new ReadOnlyDictionary(new Dictionary()); } } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/IEdgeMap.cs b/runtime/CSharp/src/Dfa/IEdgeMap.cs similarity index 91% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/IEdgeMap.cs rename to runtime/CSharp/src/Dfa/IEdgeMap.cs index f8cd641af1..fc76a3063b 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/IEdgeMap.cs +++ b/runtime/CSharp/src/Dfa/IEdgeMap.cs @@ -39,12 +39,7 @@ T this[int key] [return: NotNull] IEdgeMap Clear(); -#if NET45PLUS [return: NotNull] IReadOnlyDictionary ToMap(); -#else - [return: NotNull] - IDictionary ToMap(); -#endif } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/LexerDFASerializer.cs b/runtime/CSharp/src/Dfa/LexerDFASerializer.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/LexerDFASerializer.cs rename to runtime/CSharp/src/Dfa/LexerDFASerializer.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/SingletonEdgeMap.cs b/runtime/CSharp/src/Dfa/SingletonEdgeMap.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/SingletonEdgeMap.cs rename to runtime/CSharp/src/Dfa/SingletonEdgeMap.cs index 5757684cf5..fbc6c2c0db 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/SingletonEdgeMap.cs +++ b/runtime/CSharp/src/Dfa/SingletonEdgeMap.cs @@ -3,7 +3,6 @@ * can be found in the LICENSE.txt file in the project root. */ using System.Collections.Generic; -using Antlr4.Runtime.Sharpen; namespace Antlr4.Runtime.Dfa { @@ -123,17 +122,13 @@ public override AbstractEdgeMap Clear() return this; } -#if NET45PLUS public override IReadOnlyDictionary ToMap() -#else - public override IDictionary ToMap() -#endif { if (IsEmpty) { return Sharpen.Collections.EmptyMap(); } - return Antlr4.Runtime.Sharpen.Collections.SingletonMap(key, value); + return Sharpen.Collections.SingletonMap(key, value); } } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/SparseEdgeMap.cs b/runtime/CSharp/src/Dfa/SparseEdgeMap.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/SparseEdgeMap.cs rename to runtime/CSharp/src/Dfa/SparseEdgeMap.cs index 05a0ac84f3..2f1eb74251 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Dfa/SparseEdgeMap.cs +++ b/runtime/CSharp/src/Dfa/SparseEdgeMap.cs @@ -181,11 +181,7 @@ public override AbstractEdgeMap Clear() return new EmptyEdgeMap(minIndex, maxIndex); } -#if NET45PLUS public override IReadOnlyDictionary ToMap() -#else - public override IDictionary ToMap() -#endif { if (IsEmpty) { @@ -193,22 +189,14 @@ public override IDictionary ToMap() } lock (this) { -#if COMPACT - IDictionary result = new SortedList(); -#elif PORTABLE && !NET45PLUS - IDictionary result = new Dictionary(); -#else IDictionary result = new SortedDictionary(); -#endif + for (int i = 0; i < Count; i++) { result[keys[i]] = values[i]; } -#if NET45PLUS + return new ReadOnlyDictionary(result); -#else - return result; -#endif } } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/DiagnosticErrorListener.cs b/runtime/CSharp/src/DiagnosticErrorListener.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/DiagnosticErrorListener.cs rename to runtime/CSharp/src/DiagnosticErrorListener.cs index a673b2548d..104b97488c 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/DiagnosticErrorListener.cs +++ b/runtime/CSharp/src/DiagnosticErrorListener.cs @@ -85,7 +85,7 @@ public override void ReportAmbiguity(Parser recognizer, DFA dfa, int startIndex, recognizer.NotifyErrorListeners(message); } - public override void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, SimulatorState conflictState) + public override void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { string format = "reportAttemptingFullContext d={0}, input='{1}'"; string decision = GetDecisionDescription(recognizer, dfa); @@ -94,7 +94,7 @@ public override void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int recognizer.NotifyErrorListeners(message); } - public override void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, SimulatorState acceptState) + public override void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs) { string format = "reportContextSensitivity d={0}, input='{1}'"; string decision = GetDecisionDescription(recognizer, dfa); diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/FailedPredicateException.cs b/runtime/CSharp/src/FailedPredicateException.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/FailedPredicateException.cs rename to runtime/CSharp/src/FailedPredicateException.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IAntlrErrorListener.cs b/runtime/CSharp/src/IAntlrErrorListener.cs similarity index 95% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IAntlrErrorListener.cs rename to runtime/CSharp/src/IAntlrErrorListener.cs index fe8b0150ea..01e76e617e 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IAntlrErrorListener.cs +++ b/runtime/CSharp/src/IAntlrErrorListener.cs @@ -2,19 +2,14 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -using Antlr4.Runtime; -using Antlr4.Runtime.Sharpen; + using System.IO; namespace Antlr4.Runtime { ///

    How to emit recognition errors. /// How to emit recognition errors. -#if COMPACT - public interface IAntlrErrorListener -#else public interface IAntlrErrorListener -#endif { /// Upon syntax error, notify any interested parties. /// diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IAntlrErrorStrategy.cs b/runtime/CSharp/src/IAntlrErrorStrategy.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IAntlrErrorStrategy.cs rename to runtime/CSharp/src/IAntlrErrorStrategy.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ICharStream.cs b/runtime/CSharp/src/ICharStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ICharStream.cs rename to runtime/CSharp/src/ICharStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IIntStream.cs b/runtime/CSharp/src/IIntStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IIntStream.cs rename to runtime/CSharp/src/IIntStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IParserErrorListener.cs b/runtime/CSharp/src/IParserErrorListener.cs similarity index 95% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IParserErrorListener.cs rename to runtime/CSharp/src/IParserErrorListener.cs index 8d1765a47d..9a75acde9a 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IParserErrorListener.cs +++ b/runtime/CSharp/src/IParserErrorListener.cs @@ -118,11 +118,11 @@ public interface IParserErrorListener : IAntlrErrorListener /// configs /// . /// - /// - /// the simulator state when the SLL conflict was - /// detected + /// + /// the ATN configuration set where the ambiguity was + /// identified /// - void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, SimulatorState conflictState); + void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs); /// /// This method is called by the parser when a full-context prediction has a @@ -169,10 +169,10 @@ public interface IParserErrorListener : IAntlrErrorListener /// finally determined /// /// the unambiguous result of the full-context prediction - /// - /// the simulator state when the unambiguous prediction + /// + /// the ATN configuration set where the unambiguous prediction /// was determined /// - void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, SimulatorState acceptState); + void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs); } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IRecognizer.cs b/runtime/CSharp/src/IRecognizer.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IRecognizer.cs rename to runtime/CSharp/src/IRecognizer.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IToken.cs b/runtime/CSharp/src/IToken.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IToken.cs rename to runtime/CSharp/src/IToken.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ITokenFactory.cs b/runtime/CSharp/src/ITokenFactory.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ITokenFactory.cs rename to runtime/CSharp/src/ITokenFactory.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ITokenSource.cs b/runtime/CSharp/src/ITokenSource.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ITokenSource.cs rename to runtime/CSharp/src/ITokenSource.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ITokenStream.cs b/runtime/CSharp/src/ITokenStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ITokenStream.cs rename to runtime/CSharp/src/ITokenStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IVocabulary.cs b/runtime/CSharp/src/IVocabulary.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IVocabulary.cs rename to runtime/CSharp/src/IVocabulary.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IWritableToken.cs b/runtime/CSharp/src/IWritableToken.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/IWritableToken.cs rename to runtime/CSharp/src/IWritableToken.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/InputMismatchException.cs b/runtime/CSharp/src/InputMismatchException.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/InputMismatchException.cs rename to runtime/CSharp/src/InputMismatchException.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/InterpreterRuleContext.cs b/runtime/CSharp/src/InterpreterRuleContext.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/InterpreterRuleContext.cs rename to runtime/CSharp/src/InterpreterRuleContext.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Lexer.cs b/runtime/CSharp/src/Lexer.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Lexer.cs rename to runtime/CSharp/src/Lexer.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/LexerInterpreter.cs b/runtime/CSharp/src/LexerInterpreter.cs similarity index 96% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/LexerInterpreter.cs rename to runtime/CSharp/src/LexerInterpreter.cs index edf0c4e6f9..6739eacc55 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/LexerInterpreter.cs +++ b/runtime/CSharp/src/LexerInterpreter.cs @@ -33,7 +33,7 @@ public class LexerInterpreter: Lexer [Obsolete("Use constructor with channelNames argument")] public LexerInterpreter(string grammarFileName, IVocabulary vocabulary, IEnumerable ruleNames, IEnumerable modeNames, ATN atn, ICharStream input) - : this(grammarFileName, vocabulary, ruleNames, new string[0], modeNames, atn, input) + : this(grammarFileName, vocabulary, ruleNames, Collections.EmptyList(), modeNames, atn, input) { } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/LexerNoViableAltException.cs b/runtime/CSharp/src/LexerNoViableAltException.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/LexerNoViableAltException.cs rename to runtime/CSharp/src/LexerNoViableAltException.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ListTokenSource.cs b/runtime/CSharp/src/ListTokenSource.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ListTokenSource.cs rename to runtime/CSharp/src/ListTokenSource.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Args.cs b/runtime/CSharp/src/Misc/Args.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Args.cs rename to runtime/CSharp/src/Misc/Args.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/ArrayList.cs b/runtime/CSharp/src/Misc/ArrayList.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/ArrayList.cs rename to runtime/CSharp/src/Misc/ArrayList.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/IIntSet.cs b/runtime/CSharp/src/Misc/IIntSet.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/IIntSet.cs rename to runtime/CSharp/src/Misc/IIntSet.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Interval.cs b/runtime/CSharp/src/Misc/Interval.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Interval.cs rename to runtime/CSharp/src/Misc/Interval.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/IntervalSet.cs b/runtime/CSharp/src/Misc/IntervalSet.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/IntervalSet.cs rename to runtime/CSharp/src/Misc/IntervalSet.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/MultiMap.cs b/runtime/CSharp/src/Misc/MultiMap.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/MultiMap.cs rename to runtime/CSharp/src/Misc/MultiMap.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/MurmurHash.cs b/runtime/CSharp/src/Misc/MurmurHash.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/MurmurHash.cs rename to runtime/CSharp/src/Misc/MurmurHash.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/NotNullAttribute.cs b/runtime/CSharp/src/Misc/NotNullAttribute.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/NotNullAttribute.cs rename to runtime/CSharp/src/Misc/NotNullAttribute.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/NullableAttribute.cs b/runtime/CSharp/src/Misc/NullableAttribute.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/NullableAttribute.cs rename to runtime/CSharp/src/Misc/NullableAttribute.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Pair.cs b/runtime/CSharp/src/Misc/Pair.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Pair.cs rename to runtime/CSharp/src/Misc/Pair.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/ParseCanceledException.cs b/runtime/CSharp/src/Misc/ParseCanceledException.cs similarity index 92% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/ParseCanceledException.cs rename to runtime/CSharp/src/Misc/ParseCanceledException.cs index e0e5ee39b0..6dc7200e60 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/ParseCanceledException.cs +++ b/runtime/CSharp/src/Misc/ParseCanceledException.cs @@ -4,10 +4,6 @@ */ using System; -#if COMPACT -using OperationCanceledException = System.Exception; -#endif - namespace Antlr4.Runtime.Misc { /// This exception is thrown to cancel a parsing operation. @@ -22,7 +18,7 @@ namespace Antlr4.Runtime.Misc /// response to a parse error. /// /// Sam Harwell - [System.Serializable] + [Serializable] public class ParseCanceledException : OperationCanceledException { public ParseCanceledException() diff --git a/runtime/CSharp/src/Misc/RuleDependencyChecker.cs b/runtime/CSharp/src/Misc/RuleDependencyChecker.cs new file mode 100644 index 0000000000..63a0a4a56d --- /dev/null +++ b/runtime/CSharp/src/Misc/RuleDependencyChecker.cs @@ -0,0 +1,538 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text; +using Antlr4.Runtime.Atn; +using Antlr4.Runtime.Sharpen; + +namespace Antlr4.Runtime.Misc +{ + /// Sam Harwell + public class RuleDependencyChecker + { + private static readonly HashSet checkedAssemblies = new HashSet(); + + public static void CheckDependencies(Assembly assembly) + { + if (IsChecked(assembly)) + { + return; + } + + IEnumerable typesToCheck = GetTypesToCheck(assembly); + List> dependencies = new List>(); + foreach (TypeInfo clazz in typesToCheck) + { + dependencies.AddRange(GetDependencies(clazz)); + } + + if (dependencies.Count > 0) + { + IDictionary>> recognizerDependencies = new Dictionary>>(); + foreach (Tuple dependency in dependencies) + { + TypeInfo recognizerType = dependency.Item1.Recognizer.GetTypeInfo(); + IList> list; + if (!recognizerDependencies.TryGetValue(recognizerType, out list)) + { + list = new List>(); + recognizerDependencies[recognizerType] = list; + } + list.Add(dependency); + } + + foreach (KeyValuePair>> entry in recognizerDependencies) + { + //processingEnv.getMessager().printMessage(Diagnostic.Kind.NOTE, String.format("ANTLR 4: Validating {0} dependencies on rules in {1}.", entry.getValue().size(), entry.getKey().toString())); + CheckDependencies(entry.Value, entry.Key); + } + } + + MarkChecked(assembly); + } + + private static IEnumerable GetTypesToCheck(Assembly assembly) + { + return assembly.DefinedTypes; + } + + private static bool IsChecked(Assembly assembly) + { + lock (checkedAssemblies) + { + return checkedAssemblies.Contains(assembly.FullName); + } + } + + private static void MarkChecked(Assembly assembly) + { + lock (checkedAssemblies) + { + checkedAssemblies.Add(assembly.FullName); + } + } + + private static void CheckDependencies(IList> dependencies, TypeInfo recognizerType) + { + string[] ruleNames = GetRuleNames(recognizerType); + int[] ruleVersions = GetRuleVersions(recognizerType, ruleNames); + RuleRelations relations = ExtractRuleRelations(recognizerType); + StringBuilder errors = new StringBuilder(); + foreach (Tuple dependency in dependencies) + { + if (!dependency.Item1.Recognizer.GetTypeInfo().IsAssignableFrom(recognizerType)) + { + continue; + } + // this is the rule in the dependency set with the highest version number + int effectiveRule = dependency.Item1.Rule; + if (effectiveRule < 0 || effectiveRule >= ruleVersions.Length) + { + string message = string.Format("Rule dependency on unknown rule {0}@{1} in {2}", dependency.Item1.Rule, dependency.Item1.Version, dependency.Item1.Recognizer.ToString()); + errors.AppendLine(dependency.Item2.ToString()); + errors.AppendLine(message); + continue; + } + Dependents dependents = Dependents.Self | dependency.Item1.Dependents; + ReportUnimplementedDependents(errors, dependency, dependents); + BitSet @checked = new BitSet(); + int highestRequiredDependency = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, effectiveRule, null); + if ((dependents & Dependents.Parents) != 0) + { + BitSet parents = relations.parents[dependency.Item1.Rule]; + for (int parent = parents.NextSetBit(0); parent >= 0; parent = parents.NextSetBit(parent + 1)) + { + if (parent < 0 || parent >= ruleVersions.Length || @checked.Get(parent)) + { + continue; + } + @checked.Set(parent); + int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, parent, "parent"); + highestRequiredDependency = Math.Max(highestRequiredDependency, required); + } + } + if ((dependents & Dependents.Children) != 0) + { + BitSet children = relations.children[dependency.Item1.Rule]; + for (int child = children.NextSetBit(0); child >= 0; child = children.NextSetBit(child + 1)) + { + if (child < 0 || child >= ruleVersions.Length || @checked.Get(child)) + { + continue; + } + @checked.Set(child); + int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, child, "child"); + highestRequiredDependency = Math.Max(highestRequiredDependency, required); + } + } + if ((dependents & Dependents.Ancestors) != 0) + { + BitSet ancestors = relations.GetAncestors(dependency.Item1.Rule); + for (int ancestor = ancestors.NextSetBit(0); ancestor >= 0; ancestor = ancestors.NextSetBit(ancestor + 1)) + { + if (ancestor < 0 || ancestor >= ruleVersions.Length || @checked.Get(ancestor)) + { + continue; + } + @checked.Set(ancestor); + int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, ancestor, "ancestor"); + highestRequiredDependency = Math.Max(highestRequiredDependency, required); + } + } + if ((dependents & Dependents.Descendants) != 0) + { + BitSet descendants = relations.GetDescendants(dependency.Item1.Rule); + for (int descendant = descendants.NextSetBit(0); descendant >= 0; descendant = descendants.NextSetBit(descendant + 1)) + { + if (descendant < 0 || descendant >= ruleVersions.Length || @checked.Get(descendant)) + { + continue; + } + @checked.Set(descendant); + int required = CheckDependencyVersion(errors, dependency, ruleNames, ruleVersions, descendant, "descendant"); + highestRequiredDependency = Math.Max(highestRequiredDependency, required); + } + } + int declaredVersion = dependency.Item1.Version; + if (declaredVersion > highestRequiredDependency) + { + string message = string.Format("Rule dependency version mismatch: {0} has maximum dependency version {1} (expected {2}) in {3}", ruleNames[dependency.Item1.Rule], highestRequiredDependency, declaredVersion, dependency.Item1.Recognizer.ToString()); + errors.AppendLine(dependency.Item2.ToString()); + errors.AppendLine(message); + } + } + if (errors.Length > 0) + { + throw new InvalidOperationException(errors.ToString()); + } + } + + private static readonly Dependents ImplementedDependents = Dependents.Self | Dependents.Parents | Dependents.Children | Dependents.Ancestors | Dependents.Descendants; + + private static void ReportUnimplementedDependents(StringBuilder errors, Tuple dependency, Dependents dependents) + { + Dependents unimplemented = dependents; + unimplemented &= ~ImplementedDependents; + if (unimplemented != Dependents.None) + { + string message = string.Format("Cannot validate the following dependents of rule {0}: {1}", dependency.Item1.Rule, unimplemented); + errors.AppendLine(message); + } + } + + private static int CheckDependencyVersion(StringBuilder errors, Tuple dependency, string[] ruleNames, int[] ruleVersions, int relatedRule, string relation) + { + string ruleName = ruleNames[dependency.Item1.Rule]; + string path; + if (relation == null) + { + path = ruleName; + } + else + { + string mismatchedRuleName = ruleNames[relatedRule]; + path = string.Format("rule {0} ({1} of {2})", mismatchedRuleName, relation, ruleName); + } + int declaredVersion = dependency.Item1.Version; + int actualVersion = ruleVersions[relatedRule]; + if (actualVersion > declaredVersion) + { + string message = string.Format("Rule dependency version mismatch: {0} has version {1} (expected <= {2}) in {3}", path, actualVersion, declaredVersion, dependency.Item1.Recognizer.ToString()); + errors.AppendLine(dependency.Item2.ToString()); + errors.AppendLine(message); + } + return actualVersion; + } + + private static int[] GetRuleVersions(TypeInfo recognizerClass, string[] ruleNames) + { + int[] versions = new int[ruleNames.Length]; + IEnumerable fields = recognizerClass.DeclaredFields; + foreach (FieldInfo field in fields) + { + bool isStatic = field.IsStatic; + bool isInteger = field.FieldType == typeof(int); + if (isStatic && isInteger && field.Name.StartsWith("RULE_")) + { + try + { + string name = field.Name.Substring("RULE_".Length); + if (name.Length == 0 || !System.Char.IsLower(name[0])) + { + continue; + } + int index = (int)field.GetValue(null); + if (index < 0 || index >= versions.Length) + { + object[] @params = new object[] { index, field.Name, recognizerClass.Name }; +#if false + Logger.Log(Level.Warning, "Rule index {0} for rule ''{1}'' out of bounds for recognizer {2}.", @params); +#endif + continue; + } + MethodInfo ruleMethod = GetRuleMethod(recognizerClass, name); + if (ruleMethod == null) + { + object[] @params = new object[] { name, recognizerClass.Name }; +#if false + Logger.Log(Level.Warning, "Could not find rule method for rule ''{0}'' in recognizer {1}.", @params); +#endif + continue; + } + RuleVersionAttribute ruleVersion = ruleMethod.GetCustomAttribute(); + int version = ruleVersion != null ? ruleVersion.Version : 0; + versions[index] = version; + } + catch (ArgumentException) + { +#if false + Logger.Log(Level.Warning, null, ex); +#else + throw; +#endif + } + catch (MemberAccessException) + { +#if false + Logger.Log(Level.Warning, null, ex); +#else + throw; +#endif + } + } + } + return versions; + } + + private static MethodInfo GetRuleMethod(TypeInfo recognizerClass, string name) + { + IEnumerable declaredMethods = recognizerClass.DeclaredMethods; + foreach (MethodInfo method in declaredMethods) + { + if (method.Name.Equals(name) && method.GetCustomAttribute() != null) + { + return method; + } + } + return null; + } + + private static string[] GetRuleNames(TypeInfo recognizerClass) + { + FieldInfo ruleNames = recognizerClass.DeclaredFields.First(i => i.Name == "ruleNames"); + return (string[])ruleNames.GetValue(null); + } + + public static IList> GetDependencies(TypeInfo clazz) + { + IList> result = new List>(); + + GetElementDependencies(AsCustomAttributeProvider(clazz), result); + foreach (ConstructorInfo ctor in clazz.DeclaredConstructors) + { + GetElementDependencies(AsCustomAttributeProvider(ctor), result); + foreach (ParameterInfo parameter in ctor.GetParameters()) + GetElementDependencies(AsCustomAttributeProvider(parameter), result); + } + + foreach (FieldInfo field in clazz.DeclaredFields) + { + GetElementDependencies(AsCustomAttributeProvider(field), result); + } + + foreach (MethodInfo method in clazz.DeclaredMethods) + { + GetElementDependencies(AsCustomAttributeProvider(method), result); + + if (method.ReturnParameter != null) + GetElementDependencies(AsCustomAttributeProvider(method.ReturnParameter), result); + + foreach (ParameterInfo parameter in method.GetParameters()) + GetElementDependencies(AsCustomAttributeProvider(parameter), result); + } + + return result; + } + + private static void GetElementDependencies(ICustomAttributeProvider annotatedElement, IList> result) + { + foreach (RuleDependencyAttribute dependency in annotatedElement.GetCustomAttributes(typeof(RuleDependencyAttribute), true)) + { + result.Add(Tuple.Create(dependency, annotatedElement)); + } + } + + private static RuleDependencyChecker.RuleRelations ExtractRuleRelations(TypeInfo recognizer) + { + int[] serializedATN = GetSerializedATN(recognizer); + if (serializedATN == null) + { + return null; + } + ATN atn = new ATNDeserializer().Deserialize(serializedATN); + RuleDependencyChecker.RuleRelations relations = new RuleDependencyChecker.RuleRelations(atn.ruleToStartState.Length); + foreach (ATNState state in atn.states) + { + if (!state.epsilonOnlyTransitions) + { + continue; + } + foreach (Transition transition in state.transitions) + { + if (transition.TransitionType != TransitionType.RULE) + { + continue; + } + RuleTransition ruleTransition = (RuleTransition)transition; + relations.AddRuleInvocation(state.ruleIndex, ruleTransition.target.ruleIndex); + } + } + return relations; + } + + private static int[] GetSerializedATN(TypeInfo recognizerClass) + { + FieldInfo serializedAtnField = recognizerClass.DeclaredFields.First(i => i.Name == "_serializedATN"); + if (serializedAtnField != null) + return (int[])serializedAtnField.GetValue(null); + + if (recognizerClass.BaseType != null) + return GetSerializedATN(recognizerClass.BaseType.GetTypeInfo()); + + return null; + } + + private sealed class RuleRelations + { + public readonly BitSet[] parents; + + public readonly BitSet[] children; + + public RuleRelations(int ruleCount) + { + parents = new BitSet[ruleCount]; + for (int i = 0; i < ruleCount; i++) + { + parents[i] = new BitSet(); + } + children = new BitSet[ruleCount]; + for (int i_1 = 0; i_1 < ruleCount; i_1++) + { + children[i_1] = new BitSet(); + } + } + + public bool AddRuleInvocation(int caller, int callee) + { + if (caller < 0) + { + // tokens rule + return false; + } + if (children[caller].Get(callee)) + { + // already added + return false; + } + children[caller].Set(callee); + parents[callee].Set(caller); + return true; + } + + public BitSet GetAncestors(int rule) + { + BitSet ancestors = new BitSet(); + ancestors.Or(parents[rule]); + while (true) + { + int cardinality = ancestors.Cardinality(); + for (int i = ancestors.NextSetBit(0); i >= 0; i = ancestors.NextSetBit(i + 1)) + { + ancestors.Or(parents[i]); + } + if (ancestors.Cardinality() == cardinality) + { + // nothing changed + break; + } + } + return ancestors; + } + + public BitSet GetDescendants(int rule) + { + BitSet descendants = new BitSet(); + descendants.Or(children[rule]); + while (true) + { + int cardinality = descendants.Cardinality(); + for (int i = descendants.NextSetBit(0); i >= 0; i = descendants.NextSetBit(i + 1)) + { + descendants.Or(children[i]); + } + if (descendants.Cardinality() == cardinality) + { + // nothing changed + break; + } + } + return descendants; + } + } + + private RuleDependencyChecker() + { + } + + public interface ICustomAttributeProvider + { + object[] GetCustomAttributes(Type attributeType, bool inherit); + } + + protected static ICustomAttributeProvider AsCustomAttributeProvider(TypeInfo type) + { + return new TypeCustomAttributeProvider(type); + } + + protected static ICustomAttributeProvider AsCustomAttributeProvider(MethodBase method) + { + return new MethodBaseCustomAttributeProvider(method); + } + + protected static ICustomAttributeProvider AsCustomAttributeProvider(ParameterInfo parameter) + { + return new ParameterInfoCustomAttributeProvider(parameter); + } + + protected static ICustomAttributeProvider AsCustomAttributeProvider(FieldInfo field) + { + return new FieldInfoCustomAttributeProvider(field); + } + + protected sealed class TypeCustomAttributeProvider : ICustomAttributeProvider + { + private readonly TypeInfo _provider; + + public TypeCustomAttributeProvider(TypeInfo provider) + { + _provider = provider; + } + + public object[] GetCustomAttributes(Type attributeType, bool inherit) + { + return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); + } + } + + protected sealed class MethodBaseCustomAttributeProvider : ICustomAttributeProvider + { + private readonly MethodBase _provider; + + public MethodBaseCustomAttributeProvider(MethodBase provider) + { + _provider = provider; + } + + public object[] GetCustomAttributes(Type attributeType, bool inherit) + { + return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); + } + } + + protected sealed class ParameterInfoCustomAttributeProvider : ICustomAttributeProvider + { + private readonly ParameterInfo _provider; + + public ParameterInfoCustomAttributeProvider(ParameterInfo provider) + { + _provider = provider; + } + + public object[] GetCustomAttributes(Type attributeType, bool inherit) + { + return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); + } + } + + protected sealed class FieldInfoCustomAttributeProvider : ICustomAttributeProvider + { + private readonly FieldInfo _provider; + + public FieldInfoCustomAttributeProvider(FieldInfo provider) + { + _provider = provider; + } + + public object[] GetCustomAttributes(Type attributeType, bool inherit) + { + return _provider.GetCustomAttributes(attributeType, inherit).ToArray(); + } + } + } +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Utils.cs b/runtime/CSharp/src/Misc/Utils.cs similarity index 89% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Utils.cs rename to runtime/CSharp/src/Misc/Utils.cs index 9f7000bcce..2484a05a19 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Misc/Utils.cs +++ b/runtime/CSharp/src/Misc/Utils.cs @@ -5,28 +5,22 @@ using System; using System.Collections.Generic; using System.Text; -using Antlr4.Runtime.Sharpen; namespace Antlr4.Runtime.Misc { + public static class StaticUtils + { + public static string ToString(this IEnumerable list) + { + return "[" + Utils.Join(", ", list) + "]"; + } + } + public class Utils { public static string Join(string separator, IEnumerable items) { -#if NET40PLUS return string.Join(separator, items); -#else - ArrayList elements = new ArrayList(); - foreach (T item in items) - { - if (item == null) - elements.Add(""); - else - elements.Add(item.ToString()); - } - - return string.Join(separator, elements.ToArray()); -#endif } public static int NumNonnull(object[] data) diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/NoViableAltException.cs b/runtime/CSharp/src/NoViableAltException.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/NoViableAltException.cs rename to runtime/CSharp/src/NoViableAltException.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs b/runtime/CSharp/src/Parser.cs similarity index 96% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs rename to runtime/CSharp/src/Parser.cs index b139199234..ff05349137 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs +++ b/runtime/CSharp/src/Parser.cs @@ -19,23 +19,22 @@ namespace Antlr4.Runtime /// This is all the parsing support code essentially; most of it is error recovery stuff. public abstract class Parser : Recognizer { -#if !PORTABLE public class TraceListener : IParseTreeListener { - private readonly TextWriter Output; - public TraceListener(TextWriter output) { - Output = output; + public TraceListener(TextWriter output,Parser enclosing) { + _output = output; + _enclosing = enclosing; } public virtual void EnterEveryRule(ParserRuleContext ctx) { - Output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); + _output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); } public virtual void ExitEveryRule(ParserRuleContext ctx) { - Output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); + _output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); } public virtual void VisitErrorNode(IErrorNode node) @@ -46,17 +45,18 @@ public virtual void VisitTerminal(ITerminalNode node) { ParserRuleContext parent = (ParserRuleContext)((IRuleNode)node.Parent).RuleContext; IToken token = node.Symbol; - Output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]); + _output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]); } internal TraceListener(Parser _enclosing) { this._enclosing = _enclosing; + _output = Console.Out; } private readonly Parser _enclosing; + private readonly TextWriter _output; } -#endif public class TrimToSizeListener : IParseTreeListener { @@ -90,7 +90,8 @@ public virtual void ExitEveryRule(ParserRuleContext ctx) /// bypass alternatives. /// /// - private static readonly IDictionary bypassAltsAtnCache = new Dictionary(); +// private static readonly IDictionary bypassAltsAtnCache = new Dictionary(); + private ATN bypassAltsAtnCache; /// The error handling strategy for the parser. /// @@ -131,7 +132,6 @@ public virtual void ExitEveryRule(ParserRuleContext ctx) /// private bool _buildParseTrees = true; -#if !PORTABLE /// /// When /// @@ -147,7 +147,6 @@ public virtual void ExitEveryRule(ParserRuleContext ctx) /// other parser methods. /// private Parser.TraceListener _tracer; -#endif /// /// The list of @@ -190,9 +189,7 @@ public virtual void Reset() _errHandler.Reset(this); _ctx = null; _syntaxErrors = 0; -#if !PORTABLE Trace = false; -#endif _precedenceStack.Clear(); _precedenceStack.Add(0); ATNSimulator interpreter = Interpreter; @@ -566,22 +563,20 @@ public virtual ITokenFactory TokenFactory [return: NotNull] public virtual ATN GetATNWithBypassAlts() { - string serializedAtn = SerializedAtn; + int[] serializedAtn = SerializedAtn; if (serializedAtn == null) { throw new NotSupportedException("The current parser does not support an ATN with bypass alternatives."); } - lock (bypassAltsAtnCache) + lock (this) { - ATN result = bypassAltsAtnCache.Get(serializedAtn); - if (result == null) - { - ATNDeserializationOptions deserializationOptions = new ATNDeserializationOptions(); - deserializationOptions.GenerateRuleBypassTransitions = true; - result = new ATNDeserializer(deserializationOptions).Deserialize(serializedAtn.ToCharArray()); - bypassAltsAtnCache.Put(serializedAtn, result); + if ( bypassAltsAtnCache!=null ) { + return bypassAltsAtnCache; } - return result; + ATNDeserializationOptions deserializationOptions = new ATNDeserializationOptions(); + deserializationOptions.GenerateRuleBypassTransitions = true; + bypassAltsAtnCache = new ATNDeserializer(deserializationOptions).Deserialize(serializedAtn); + return bypassAltsAtnCache; } } @@ -1144,7 +1139,6 @@ public virtual IList GetDFAStrings() return s; } -#if !PORTABLE /// For debugging and other purposes. /// For debugging and other purposes. public virtual void DumpDFA() @@ -1165,7 +1159,6 @@ public virtual void DumpDFA() } } } -#endif public virtual string SourceName { @@ -1212,7 +1205,6 @@ public virtual bool Profile } } -#if !PORTABLE /// /// During a parse is sometimes useful to listen in on the rule entry and exit /// events as well as token matches. @@ -1256,6 +1248,5 @@ public virtual bool Trace } } } -#endif } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ParserInterpreter.cs b/runtime/CSharp/src/ParserInterpreter.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ParserInterpreter.cs rename to runtime/CSharp/src/ParserInterpreter.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ParserRuleContext.cs b/runtime/CSharp/src/ParserRuleContext.cs similarity index 92% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ParserRuleContext.cs rename to runtime/CSharp/src/ParserRuleContext.cs index 00b73ffc6b..79c22daa50 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ParserRuleContext.cs +++ b/runtime/CSharp/src/ParserRuleContext.cs @@ -34,7 +34,7 @@ namespace Antlr4.Runtime /// public class ParserRuleContext : RuleContext { - public static readonly Antlr4.Runtime.ParserRuleContext EMPTY = new Antlr4.Runtime.ParserRuleContext(); + public static readonly Antlr4.Runtime.ParserRuleContext EMPTY = new Antlr4.Runtime.ParserRuleContext(); /// /// If we are debugging or building a parse tree for a visitor, @@ -174,6 +174,7 @@ public virtual void AddChild(ITerminalNode t) { children = new List(); } + children.Add(t); } @@ -183,6 +184,7 @@ public virtual void AddChild(RuleContext ruleInvocation) { children = new List(); } + children.Add(ruleInvocation); } @@ -236,6 +238,7 @@ public virtual T GetChild(int i) { return default(T); } + int j = -1; // what element have we found with ctxType? foreach (IParseTree o in children) @@ -245,10 +248,11 @@ public virtual T GetChild(int i) j++; if (j == i) { - return (T)o; + return (T) o; } } } + return default(T); } @@ -258,13 +262,14 @@ public virtual ITerminalNode GetToken(int ttype, int i) { return null; } + int j = -1; // what token with ttype have we found? foreach (IParseTree o in children) { if (o is ITerminalNode) { - ITerminalNode tnode = (ITerminalNode)o; + ITerminalNode tnode = (ITerminalNode) o; IToken symbol = tnode.Symbol; if (symbol.Type == ttype) { @@ -276,25 +281,23 @@ public virtual ITerminalNode GetToken(int ttype, int i) } } } + return null; } -#if (NET45PLUS && !DOTNETCORE) - public virtual IReadOnlyList GetTokens(int ttype) -#else public virtual ITerminalNode[] GetTokens(int ttype) -#endif { if (children == null) { return Collections.EmptyList(); } + List tokens = null; foreach (IParseTree o in children) { if (o is ITerminalNode) { - ITerminalNode tnode = (ITerminalNode)o; + ITerminalNode tnode = (ITerminalNode) o; IToken symbol = tnode.Symbol; if (symbol.Type == ttype) { @@ -302,19 +305,18 @@ public virtual ITerminalNode[] GetTokens(int ttype) { tokens = new List(); } + tokens.Add(tnode); } } } + if (tokens == null) { return Collections.EmptyList(); } -#if (NET45PLUS && !DOTNETCORE) - return tokens; -#else + return tokens.ToArray(); -#endif } public virtual T GetRuleContext(int i) @@ -323,18 +325,14 @@ public virtual T GetRuleContext(int i) return GetChild(i); } -#if (NET45PLUS && !DOTNETCORE) - public virtual IReadOnlyList GetRuleContexts() - where T : Antlr4.Runtime.ParserRuleContext -#else public virtual T[] GetRuleContexts() where T : Antlr4.Runtime.ParserRuleContext -#endif { if (children == null) { return Collections.EmptyList(); } + List contexts = null; foreach (IParseTree o in children) { @@ -344,26 +342,22 @@ public virtual T[] GetRuleContexts() { contexts = new List(); } - contexts.Add((T)o); + + contexts.Add((T) o); } } + if (contexts == null) { return Collections.EmptyList(); } -#if (NET45PLUS && !DOTNETCORE) - return contexts; -#else + return contexts.ToArray(); -#endif } public override int ChildCount { - get - { - return children != null ? children.Count : 0; - } + get { return children != null ? children.Count : 0; } } public override Interval SourceInterval @@ -374,32 +368,21 @@ public override Interval SourceInterval { return Interval.Invalid; } + return Interval.Of(_start.TokenIndex, _stop.TokenIndex); } } public virtual IToken Start { - get - { - return _start; - } - set - { - _start = value; - } + get { return _start; } + set { _start = value; } } public virtual IToken Stop { - get - { - return _stop; - } - set - { - _stop = value; - } + get { return _stop; } + set { _stop = value; } } /// Used for rule context info debugging during parse-time, not so much for ATN debugging diff --git a/runtime/CSharp/src/Properties/AssemblyInfo.cs b/runtime/CSharp/src/Properties/AssemblyInfo.cs new file mode 100644 index 0000000000..8436c7c773 --- /dev/null +++ b/runtime/CSharp/src/Properties/AssemblyInfo.cs @@ -0,0 +1,9 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +using System; +using System.Reflection; + +[assembly: CLSCompliant(true)] +[assembly: AssemblyVersion("4.13.2")] diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ProxyErrorListener.cs b/runtime/CSharp/src/ProxyErrorListener.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ProxyErrorListener.cs rename to runtime/CSharp/src/ProxyErrorListener.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ProxyParserErrorListener.cs b/runtime/CSharp/src/ProxyParserErrorListener.cs similarity index 91% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ProxyParserErrorListener.cs rename to runtime/CSharp/src/ProxyParserErrorListener.cs index b673abe624..495ac599bf 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/ProxyParserErrorListener.cs +++ b/runtime/CSharp/src/ProxyParserErrorListener.cs @@ -30,7 +30,7 @@ public virtual void ReportAmbiguity(Parser recognizer, DFA dfa, int startIndex, } } - public virtual void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, SimulatorState conflictState) + public virtual void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { foreach (IAntlrErrorListener listener in Delegates) { @@ -39,11 +39,11 @@ public virtual void ReportAttemptingFullContext(Parser recognizer, DFA dfa, int continue; } IParserErrorListener parserErrorListener = (IParserErrorListener)listener; - parserErrorListener.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, conflictState); + parserErrorListener.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs); } } - public virtual void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, SimulatorState acceptState) + public virtual void ReportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs) { foreach (IAntlrErrorListener listener in Delegates) { @@ -52,7 +52,7 @@ public virtual void ReportContextSensitivity(Parser recognizer, DFA dfa, int sta continue; } IParserErrorListener parserErrorListener = (IParserErrorListener)listener; - parserErrorListener.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, acceptState); + parserErrorListener.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs); } } } diff --git a/runtime/CSharp/src/README.md b/runtime/CSharp/src/README.md new file mode 100644 index 0000000000..ea0c25268c --- /dev/null +++ b/runtime/CSharp/src/README.md @@ -0,0 +1,77 @@ +# C# target for ANTLR 4 + +### Note to historical users + +Versions of ANTLR 4.4.x and before managed the C# +target as part of a [separate tool provided by Sam Harwell](https://github.com/tunnelvisionlabs/antlr4cs/releases/tag/v4.6.6). +As of 4.5, we our releasing a (mono-compatible) C# target together +with the main tool. + +The current version is written using netstandard2.0 and netstandard2.1, it's possible +to use it on different platforms (Windows, MacOS X, Linux, and other). + +Releasing the runtime with the tool ensures that you can get the exact same behavior across many languages: Java, C#, Python, JavaScript, Go, Swift and C++. + +## Getting Started + +### Step 1: Install Java + +The C# target for ANTLR 4 requires Java for *generating* C# code (but the applications compiled from this C# code will not require Java to be installed). +You can install *any* of the following versions of Java to use this target. + +If you already have one of the following installed, you should check to make sure the installation is up-to-date. + +* Java 8 runtime environment (x86 or x64) +* Java 8 development kit (x86 or x64, provided that the JRE option is also installed during the development kit installation) +* Java 7 runtime environment (x86 or x64) +* Java 7 development kit (x86 or x64, provided that the JRE option is also installed during the development kit installation) + +### Step 2: Download the tool + +You need to download the ANTLR tool from the ANTLR web site. +This is a Java archive (*.jar) used to generate the C# code from an ANTLR grammar. + + +### Step 3: Add or create a grammar file (*.g4) in your project + +To avoid confusing your IDE, we suggest setting the build action to None for this file. +See the docs and the book to learn about writing lexer and parser grammars. + + +### Step 4: Generate the C# code + +This can be done either from the cmd line, or by adding a custom pre-build command in your project. +At minimal, the cmd line should look as follows: ``java -jar antlr4-4.13.2.jar -Dlanguage=CSharp grammar.g4`` +This will generate the files, which you can then integrate in your project. +This is just a quick start. The tool has many useful options to control generation, please refer to its documentation. + +### Step 5: Add a reference to the ANTLR runtime in your project + +The Antlr 4 standard runtime for C# is now available from NuGet. +We trust that you know how to do add NuGet references to your project :-). +The package id is [Antlr4.Runtime.Standard](https://www.nuget.org/packages/Antlr4.Runtime.Standard/). We do not support other packages. + +Use the GUI or the following command in the Package Manager Console: + +``` +Install-Package Antlr4.Runtime.Standard +``` + +### Step 6: You're done! + +Of course, the generated code is not going to meet your requirement by magic. +There are 3 ways to use the generated code: + - by generating a parse tree, and traversing it using a listener. This is the most common method. + - by generating a parse tree, and traversing it using a visitor. This requires the `-visitor` option, and is a bit more work. + - by providing code within your grammar, which gets executed when your input files are parsed. +While the latter works, it is no longer the recommended approach, because it is not portable, and harder to maintain. More importantly, it breaks the parsing when your code breaks. + +See the web site for examples of using the generated code. + +To learn more about ANTLR 4, read [the book](http://a.co/2n4rJlb). + +### Visual Studio integration + +If you require tighter Visual Studio integration, you can use the tools from [Tunnel Vision Labs](http://tunnelvisionlabs.com/). +(please note however that they use a different tool and runtime) + diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RecognitionException.cs b/runtime/CSharp/src/RecognitionException.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RecognitionException.cs rename to runtime/CSharp/src/RecognitionException.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Recognizer.cs b/runtime/CSharp/src/Recognizer.cs similarity index 97% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Recognizer.cs rename to runtime/CSharp/src/Recognizer.cs index 150ead5a62..3abbabccc1 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Recognizer.cs +++ b/runtime/CSharp/src/Recognizer.cs @@ -4,12 +4,10 @@ */ using System; using System.Collections.Generic; +using System.Runtime.CompilerServices; using Antlr4.Runtime.Atn; using Antlr4.Runtime.Misc; - -#if NET40PLUS -using System.Runtime.CompilerServices; -#endif +using Antlr4.Runtime.Sharpen; namespace Antlr4.Runtime { @@ -18,17 +16,13 @@ public abstract class Recognizer : IRecognizer { public const int Eof = -1; -#if NET40PLUS private static readonly ConditionalWeakTable> tokenTypeMapCache = new ConditionalWeakTable>(); private static readonly ConditionalWeakTable> ruleIndexMapCache = new ConditionalWeakTable>(); -#endif [NotNull] private IAntlrErrorListener[] _listeners = { -#if !PORTABLE ConsoleErrorListener.Instance -#endif }; private ATNInterpreter _interp; @@ -74,11 +68,7 @@ public virtual IDictionary TokenTypeMap { get { -#if NET40PLUS return tokenTypeMapCache.GetValue(Vocabulary, CreateTokenTypeMap); -#else - return CreateTokenTypeMap(Vocabulary); -#endif } } @@ -117,11 +107,8 @@ public virtual IDictionary RuleIndexMap { throw new NotSupportedException("The current recognizer does not provide a list of rule names."); } -#if NET40PLUS + return ruleIndexMapCache.GetValue(ruleNames, Utils.ToMap); -#else - return Utils.ToMap(ruleNames); -#endif } } @@ -145,7 +132,7 @@ public virtual int GetTokenType(string tokenName) ///

    For interpreters, we don't know their serialized ATN despite having /// created the interpreter from it.

    /// - public virtual string SerializedAtn + public virtual int[] SerializedAtn { [return: NotNull] get @@ -300,7 +287,7 @@ public virtual void RemoveErrorListener(IAntlrErrorListener listener) public virtual void RemoveErrorListeners() { - _listeners = new IAntlrErrorListener[0]; + _listeners = Collections.EmptyList>(); } [NotNull] diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RuleContext.cs b/runtime/CSharp/src/RuleContext.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RuleContext.cs rename to runtime/CSharp/src/RuleContext.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RuleDependencyAttribute.cs b/runtime/CSharp/src/RuleDependencyAttribute.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RuleDependencyAttribute.cs rename to runtime/CSharp/src/RuleDependencyAttribute.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RuleVersionAttribute.cs b/runtime/CSharp/src/RuleVersionAttribute.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/RuleVersionAttribute.cs rename to runtime/CSharp/src/RuleVersionAttribute.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Arrays.cs b/runtime/CSharp/src/Sharpen/Arrays.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Arrays.cs rename to runtime/CSharp/src/Sharpen/Arrays.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/AtomicReference.cs b/runtime/CSharp/src/Sharpen/AtomicReference.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/AtomicReference.cs rename to runtime/CSharp/src/Sharpen/AtomicReference.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/BitSet.cs b/runtime/CSharp/src/Sharpen/BitSet.cs similarity index 99% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/BitSet.cs rename to runtime/CSharp/src/Sharpen/BitSet.cs index 76751b41f4..59e59a9319 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/BitSet.cs +++ b/runtime/CSharp/src/Sharpen/BitSet.cs @@ -9,7 +9,7 @@ namespace Antlr4.Runtime.Sharpen public class BitSet { - private static readonly ulong[] EmptyBits = new ulong[0]; + private static readonly ulong[] EmptyBits = Collections.EmptyList(); private const int BitsPerElement = 8 * sizeof(ulong); private ulong[] _data = EmptyBits; diff --git a/runtime/CSharp/src/Sharpen/Collections.cs b/runtime/CSharp/src/Sharpen/Collections.cs new file mode 100644 index 0000000000..b9df0ba56c --- /dev/null +++ b/runtime/CSharp/src/Sharpen/Collections.cs @@ -0,0 +1,51 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +namespace Antlr4.Runtime.Sharpen +{ + using System.Collections.Generic; + using System.Collections.ObjectModel; + + internal static class Collections + { + /// + /// Available in .NET as Array.Empty but not to the net45 target. + /// See: https://learn.microsoft.com/dotnet/api/system.array.empty. + /// + public static T[] EmptyList() + { + return EmptyListImpl.Instance; + } + + public static ReadOnlyDictionary EmptyMap() + { + return EmptyMapImpl.Instance; + } + + public static ReadOnlyCollection SingletonList(T item) + { + return new ReadOnlyCollection(new T[] { item }); + } + + public static ReadOnlyDictionary SingletonMap(TKey key, TValue value) + { + return new ReadOnlyDictionary(new Dictionary { { key, value } }); + } + + private static class EmptyListImpl + { +#pragma warning disable CA1825 + // Provides a solution for CA1825. + public static readonly T[] Instance = new T[0]; +#pragma warning restore CA1825 + } + + private static class EmptyMapImpl + { + public static readonly ReadOnlyDictionary Instance = + new ReadOnlyDictionary(new Dictionary()); + } + } + +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/DictionaryExtensions.cs b/runtime/CSharp/src/Sharpen/DictionaryExtensions.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/DictionaryExtensions.cs rename to runtime/CSharp/src/Sharpen/DictionaryExtensions.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/ListExtensions.cs b/runtime/CSharp/src/Sharpen/ListExtensions.cs similarity index 94% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/ListExtensions.cs rename to runtime/CSharp/src/Sharpen/ListExtensions.cs index 999a97f71f..e57af31df9 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/ListExtensions.cs +++ b/runtime/CSharp/src/Sharpen/ListExtensions.cs @@ -5,7 +5,6 @@ namespace Antlr4.Runtime.Sharpen { using System.Collections.Generic; - using Antlr4.Runtime.Misc; internal static class ListExtensions { @@ -16,8 +15,5 @@ public static T Set(this IList list, int index, T value) list[index] = value; return previous; } - - - - } + } } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Runtime.cs b/runtime/CSharp/src/Sharpen/Runtime.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/Runtime.cs rename to runtime/CSharp/src/Sharpen/Runtime.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/SequenceEqualityComparer.cs b/runtime/CSharp/src/Sharpen/SequenceEqualityComparer.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Sharpen/SequenceEqualityComparer.cs rename to runtime/CSharp/src/Sharpen/SequenceEqualityComparer.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/TokenStreamRewriter.cs b/runtime/CSharp/src/TokenStreamRewriter.cs similarity index 99% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/TokenStreamRewriter.cs rename to runtime/CSharp/src/TokenStreamRewriter.cs index 8ba429c605..62b22ba366 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/TokenStreamRewriter.cs +++ b/runtime/CSharp/src/TokenStreamRewriter.cs @@ -621,9 +621,7 @@ public virtual string GetText(string programName, Interval interval) // kill first delete rop.index = Math.Min(prevRop.index, rop.index); rop.lastIndex = Math.Max(prevRop.lastIndex, rop.lastIndex); -#if !PORTABLE System.Console.Out.WriteLine("new rop " + rop); -#endif } else { diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/TokenTypes.cs b/runtime/CSharp/src/TokenTypes.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/TokenTypes.cs rename to runtime/CSharp/src/TokenTypes.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/AbstractParseTreeVisitor.cs b/runtime/CSharp/src/Tree/AbstractParseTreeVisitor.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/AbstractParseTreeVisitor.cs rename to runtime/CSharp/src/Tree/AbstractParseTreeVisitor.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ErrorNodeImpl.cs b/runtime/CSharp/src/Tree/ErrorNodeImpl.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ErrorNodeImpl.cs rename to runtime/CSharp/src/Tree/ErrorNodeImpl.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IErrorNode.cs b/runtime/CSharp/src/Tree/IErrorNode.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IErrorNode.cs rename to runtime/CSharp/src/Tree/IErrorNode.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTree.cs b/runtime/CSharp/src/Tree/IParseTree.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTree.cs rename to runtime/CSharp/src/Tree/IParseTree.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTreeListener.cs b/runtime/CSharp/src/Tree/IParseTreeListener.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTreeListener.cs rename to runtime/CSharp/src/Tree/IParseTreeListener.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTreeVisitor.cs b/runtime/CSharp/src/Tree/IParseTreeVisitor.cs similarity index 97% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTreeVisitor.cs rename to runtime/CSharp/src/Tree/IParseTreeVisitor.cs index 3b3188183c..4ad93c6da4 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IParseTreeVisitor.cs +++ b/runtime/CSharp/src/Tree/IParseTreeVisitor.cs @@ -16,11 +16,7 @@ namespace Antlr4.Runtime.Tree /// . /// /// Sam Harwell -#if COMPACT - public interface IParseTreeVisitor -#else public interface IParseTreeVisitor -#endif { /// Visit a parse tree, and return a user-defined result of the operation. /// Visit a parse tree, and return a user-defined result of the operation. diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IRuleNode.cs b/runtime/CSharp/src/Tree/IRuleNode.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/IRuleNode.cs rename to runtime/CSharp/src/Tree/IRuleNode.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ISyntaxTree.cs b/runtime/CSharp/src/Tree/ISyntaxTree.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ISyntaxTree.cs rename to runtime/CSharp/src/Tree/ISyntaxTree.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ITerminalNode.cs b/runtime/CSharp/src/Tree/ITerminalNode.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ITerminalNode.cs rename to runtime/CSharp/src/Tree/ITerminalNode.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ITree.cs b/runtime/CSharp/src/Tree/ITree.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ITree.cs rename to runtime/CSharp/src/Tree/ITree.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeProperty.cs b/runtime/CSharp/src/Tree/ParseTreeProperty.cs similarity index 96% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeProperty.cs rename to runtime/CSharp/src/Tree/ParseTreeProperty.cs index 40b2e421a5..3b12cb99e8 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeProperty.cs +++ b/runtime/CSharp/src/Tree/ParseTreeProperty.cs @@ -2,11 +2,7 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -#if NET40PLUS using System.Collections.Concurrent; -#else -using Antlr4.Runtime.Sharpen; -#endif namespace Antlr4.Runtime.Tree { diff --git a/runtime/CSharp/src/Tree/ParseTreeWalker.cs b/runtime/CSharp/src/Tree/ParseTreeWalker.cs new file mode 100644 index 0000000000..5f91e20f67 --- /dev/null +++ b/runtime/CSharp/src/Tree/ParseTreeWalker.cs @@ -0,0 +1,75 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +namespace Antlr4.Runtime.Tree +{ + public class ParseTreeWalker + { + public static readonly ParseTreeWalker Default = new ParseTreeWalker(); + + /// + /// Performs a walk on the given parse tree starting at the root and going down recursively + /// with depth-first search. On each node, + /// is called before + /// recursively walking down into child nodes, then + /// + /// is called after the recursive call to wind up. + /// + /// The listener used by the walker to process grammar rules + /// The parse tree to be walked on + public virtual void Walk(IParseTreeListener listener, IParseTree t) + { + if (t is IErrorNode) + { + listener.VisitErrorNode((IErrorNode)t); + return; + } + else + { + if (t is ITerminalNode) + { + listener.VisitTerminal((ITerminalNode)t); + return; + } + } + IRuleNode r = (IRuleNode)t; + EnterRule(listener, r); + int n = r.ChildCount; + for (int i = 0; i < n; i++) + { + Walk(listener, r.GetChild(i)); + } + ExitRule(listener, r); + } + + /// + /// Enters a grammar rule by first triggering the generic event + /// + /// then by triggering the event specific to the given parse tree node + /// + /// The listener responding to the trigger events + /// The grammar rule containing the rule context + protected internal virtual void EnterRule(IParseTreeListener listener, IRuleNode r) + { + ParserRuleContext ctx = (ParserRuleContext)r.RuleContext; + listener.EnterEveryRule(ctx); + ctx.EnterRule(listener); + } + + /// + /// Exits a grammar rule by first triggering the event specific to the given parse tree node + /// then by triggering the generic event + /// + /// + /// The listener responding to the trigger events + /// The grammar rule containing the rule context + protected internal virtual void ExitRule(IParseTreeListener listener, IRuleNode r) + { + ParserRuleContext ctx = (ParserRuleContext)r.RuleContext; + ctx.ExitRule(listener); + listener.ExitEveryRule(ctx); + } + } +} diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/Chunk.cs b/runtime/CSharp/src/Tree/Pattern/Chunk.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/Chunk.cs rename to runtime/CSharp/src/Tree/Pattern/Chunk.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreeMatch.cs b/runtime/CSharp/src/Tree/Pattern/ParseTreeMatch.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreeMatch.cs rename to runtime/CSharp/src/Tree/Pattern/ParseTreeMatch.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreePattern.cs b/runtime/CSharp/src/Tree/Pattern/ParseTreePattern.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreePattern.cs rename to runtime/CSharp/src/Tree/Pattern/ParseTreePattern.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreePatternMatcher.cs b/runtime/CSharp/src/Tree/Pattern/ParseTreePatternMatcher.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreePatternMatcher.cs rename to runtime/CSharp/src/Tree/Pattern/ParseTreePatternMatcher.cs index 6deab10811..d2cb6e9665 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/ParseTreePatternMatcher.cs +++ b/runtime/CSharp/src/Tree/Pattern/ParseTreePatternMatcher.cs @@ -301,7 +301,11 @@ public virtual ParseTreePattern Compile(string pattern, int patternRuleIndex) IList tokenList = Tokenize(pattern); ListTokenSource tokenSrc = new ListTokenSource(tokenList); CommonTokenStream tokens = new CommonTokenStream(tokenSrc); - ParserInterpreter parserInterp = new ParserInterpreter(parser.GrammarFileName, parser.Vocabulary, Arrays.AsList(parser.RuleNames), parser.GetATNWithBypassAlts(), tokens); + ParserInterpreter parserInterp = new ParserInterpreter(parser.GrammarFileName, + parser.Vocabulary, + Arrays.AsList(parser.RuleNames), + parser.GetATNWithBypassAlts(), + tokens); IParseTree tree = null; try { diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/RuleTagToken.cs b/runtime/CSharp/src/Tree/Pattern/RuleTagToken.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/RuleTagToken.cs rename to runtime/CSharp/src/Tree/Pattern/RuleTagToken.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/TagChunk.cs b/runtime/CSharp/src/Tree/Pattern/TagChunk.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/TagChunk.cs rename to runtime/CSharp/src/Tree/Pattern/TagChunk.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/TextChunk.cs b/runtime/CSharp/src/Tree/Pattern/TextChunk.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/TextChunk.cs rename to runtime/CSharp/src/Tree/Pattern/TextChunk.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/TokenTagToken.cs b/runtime/CSharp/src/Tree/Pattern/TokenTagToken.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Pattern/TokenTagToken.cs rename to runtime/CSharp/src/Tree/Pattern/TokenTagToken.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/TerminalNodeImpl.cs b/runtime/CSharp/src/Tree/TerminalNodeImpl.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/TerminalNodeImpl.cs rename to runtime/CSharp/src/Tree/TerminalNodeImpl.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Trees.cs b/runtime/CSharp/src/Tree/Trees.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Trees.cs rename to runtime/CSharp/src/Tree/Trees.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPath.cs b/runtime/CSharp/src/Tree/Xpath/XPath.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPath.cs rename to runtime/CSharp/src/Tree/Xpath/XPath.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathElement.cs diff --git a/runtime/CSharp/src/Tree/Xpath/XPathLexer.cs b/runtime/CSharp/src/Tree/Xpath/XPathLexer.cs new file mode 100644 index 0000000000..274106d0a3 --- /dev/null +++ b/runtime/CSharp/src/Tree/Xpath/XPathLexer.cs @@ -0,0 +1,135 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by a tool. +// ANTLR Version: 4.13.1 +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + +// Generated from XPathLexer.g4 by ANTLR 4.13.1 + +// Unreachable code detected +#pragma warning disable 0162 +// The variable '...' is assigned but its value is never used +#pragma warning disable 0219 +// Missing XML comment for publicly visible type or member '...' +#pragma warning disable 1591 +// Ambiguous reference in cref attribute +#pragma warning disable 419 + +using System; +using System.IO; +using System.Text; +using Antlr4.Runtime; +using Antlr4.Runtime.Atn; +using Antlr4.Runtime.Misc; +using DFA = Antlr4.Runtime.Dfa.DFA; + +[System.CodeDom.Compiler.GeneratedCode("ANTLR", "4.13.1")] +[System.CLSCompliant(false)] +public partial class XPathLexer : Lexer { + protected static DFA[] decisionToDFA; + protected static PredictionContextCache sharedContextCache = new PredictionContextCache(); + public const int + TokenRef=1, RuleRef=2, Anywhere=3, Root=4, Wildcard=5, Bang=6, ID=7, String=8; + public static string[] channelNames = { + "DEFAULT_TOKEN_CHANNEL", "HIDDEN" + }; + + public static string[] modeNames = { + "DEFAULT_MODE" + }; + + public static readonly string[] ruleNames = { + "Anywhere", "Root", "Wildcard", "Bang", "ID", "NameChar", "NameStartChar", + "String" + }; + + + public XPathLexer(ICharStream input) + : this(input, Console.Out, Console.Error) { } + + public XPathLexer(ICharStream input, TextWriter output, TextWriter errorOutput) + : base(input, output, errorOutput) + { + Interpreter = new LexerATNSimulator(this, _ATN, decisionToDFA, sharedContextCache); + } + + private static readonly string[] _LiteralNames = { + null, null, null, "'//'", "'/'", "'*'", "'!'" + }; + private static readonly string[] _SymbolicNames = { + null, "TokenRef", "RuleRef", "Anywhere", "Root", "Wildcard", "Bang", "ID", + "String" + }; + public static readonly IVocabulary DefaultVocabulary = new Vocabulary(_LiteralNames, _SymbolicNames); + + [NotNull] + public override IVocabulary Vocabulary + { + get + { + return DefaultVocabulary; + } + } + + public override string GrammarFileName { get { return "XPathLexer.g4"; } } + + public override string[] RuleNames { get { return ruleNames; } } + + public override string[] ChannelNames { get { return channelNames; } } + + public override string[] ModeNames { get { return modeNames; } } + + public override int[] SerializedAtn { get { return _serializedATN; } } + + static XPathLexer() { + decisionToDFA = new DFA[_ATN.NumberOfDecisions]; + for (int i = 0; i < _ATN.NumberOfDecisions; i++) { + decisionToDFA[i] = new DFA(_ATN.GetDecisionState(i), i); + } + } + public override void Action(RuleContext _localctx, int ruleIndex, int actionIndex) { + switch (ruleIndex) { + case 4 : ID_action(_localctx, actionIndex); break; + } + } + private void ID_action(RuleContext _localctx, int actionIndex) { + switch (actionIndex) { + case 0: + String text = Text; + if ( Char.IsUpper(text[0]) ) + Type = TokenRef; + else + Type = RuleRef; + break; + } + } + + private static int[] _serializedATN = { + 4,0,8,50,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,6, + 2,7,7,7,1,0,1,0,1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,5,4,29,8,4,10,4,12, + 4,32,9,4,1,4,1,4,1,5,1,5,3,5,38,8,5,1,6,1,6,1,7,1,7,5,7,44,8,7,10,7,12, + 7,47,9,7,1,7,1,7,1,45,0,8,1,3,3,4,5,5,7,6,9,7,11,0,13,0,15,8,1,0,2,5,0, + 48,57,95,95,183,183,768,879,8255,8256,13,0,65,90,97,122,192,214,216,246, + 248,767,880,893,895,8191,8204,8205,8304,8591,11264,12271,12289,55295,63744, + 64975,65008,65533,50,0,1,1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0, + 9,1,0,0,0,0,15,1,0,0,0,1,17,1,0,0,0,3,20,1,0,0,0,5,22,1,0,0,0,7,24,1,0, + 0,0,9,26,1,0,0,0,11,37,1,0,0,0,13,39,1,0,0,0,15,41,1,0,0,0,17,18,5,47, + 0,0,18,19,5,47,0,0,19,2,1,0,0,0,20,21,5,47,0,0,21,4,1,0,0,0,22,23,5,42, + 0,0,23,6,1,0,0,0,24,25,5,33,0,0,25,8,1,0,0,0,26,30,3,13,6,0,27,29,3,11, + 5,0,28,27,1,0,0,0,29,32,1,0,0,0,30,28,1,0,0,0,30,31,1,0,0,0,31,33,1,0, + 0,0,32,30,1,0,0,0,33,34,6,4,0,0,34,10,1,0,0,0,35,38,3,13,6,0,36,38,7,0, + 0,0,37,35,1,0,0,0,37,36,1,0,0,0,38,12,1,0,0,0,39,40,7,1,0,0,40,14,1,0, + 0,0,41,45,5,39,0,0,42,44,9,0,0,0,43,42,1,0,0,0,44,47,1,0,0,0,45,46,1,0, + 0,0,45,43,1,0,0,0,46,48,1,0,0,0,47,45,1,0,0,0,48,49,5,39,0,0,49,16,1,0, + 0,0,4,0,30,37,45,1,1,4,0 + }; + + public static readonly ATN _ATN = + new ATNDeserializer().Deserialize(_serializedATN); + + +} diff --git a/runtime/CSharp/src/Tree/Xpath/XPathLexer.g4 b/runtime/CSharp/src/Tree/Xpath/XPathLexer.g4 new file mode 100644 index 0000000000..0dff1f2b02 --- /dev/null +++ b/runtime/CSharp/src/Tree/Xpath/XPathLexer.g4 @@ -0,0 +1,65 @@ +lexer grammar XPathLexer; + +tokens { TokenRef, RuleRef } + +/* +path : separator? word (separator word)* EOF ; + +separator + : '/' '!' + | '//' '!' + | '/' + | '//' + ; + +word: TokenRef + | RuleRef + | String + | '*' + ; +*/ + +Anywhere : '//' ; +Root : '/' ; +Wildcard : '*' ; +Bang : '!' ; + +ID : NameStartChar NameChar* + { + String text = Text; + if ( Char.IsUpper(text[0]) ) + Type = TokenRef; + else + Type = RuleRef; + } + ; + +fragment +NameChar : NameStartChar + | '0'..'9' + | '_' + | '\u00B7' + | '\u0300'..'\u036F' + | '\u203F'..'\u2040' + ; + +fragment +NameStartChar + : 'A'..'Z' | 'a'..'z' + | '\u00C0'..'\u00D6' + | '\u00D8'..'\u00F6' + | '\u00F8'..'\u02FF' + | '\u0370'..'\u037D' + | '\u037F'..'\u1FFF' + | '\u200C'..'\u200D' + | '\u2070'..'\u218F' + | '\u2C00'..'\u2FEF' + | '\u3001'..'\uD7FF' + | '\uF900'..'\uFDCF' + | '\uFDF0'..'\uFFFD' + ; // ignores | ['\u10000-'\uEFFFF] ; + +String : '\'' .*? '\'' ; + +//Ws : [ \t\r\n]+ -> skip ; + diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.tokens b/runtime/CSharp/src/Tree/Xpath/XPathLexer.tokens similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.tokens rename to runtime/CSharp/src/Tree/Xpath/XPathLexer.tokens diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexerErrorListener.cs b/runtime/CSharp/src/Tree/Xpath/XPathLexerErrorListener.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexerErrorListener.cs rename to runtime/CSharp/src/Tree/Xpath/XPathLexerErrorListener.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathRuleAnywhereElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathRuleAnywhereElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathRuleAnywhereElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathRuleAnywhereElement.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathRuleElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathRuleElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathRuleElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathRuleElement.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathTokenAnywhereElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathTokenAnywhereElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathTokenAnywhereElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathTokenAnywhereElement.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathTokenElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathTokenElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathTokenElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathTokenElement.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathWildcardAnywhereElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathWildcardAnywhereElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathWildcardAnywhereElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathWildcardAnywhereElement.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathWildcardElement.cs b/runtime/CSharp/src/Tree/Xpath/XPathWildcardElement.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathWildcardElement.cs rename to runtime/CSharp/src/Tree/Xpath/XPathWildcardElement.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/UnbufferedCharStream.cs b/runtime/CSharp/src/UnbufferedCharStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/UnbufferedCharStream.cs rename to runtime/CSharp/src/UnbufferedCharStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/UnbufferedTokenStream.cs b/runtime/CSharp/src/UnbufferedTokenStream.cs similarity index 100% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/UnbufferedTokenStream.cs rename to runtime/CSharp/src/UnbufferedTokenStream.cs diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Vocabulary.cs b/runtime/CSharp/src/Vocabulary.cs similarity index 98% rename from runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Vocabulary.cs rename to runtime/CSharp/src/Vocabulary.cs index 9b12932d2c..ee18daae90 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Vocabulary.cs +++ b/runtime/CSharp/src/Vocabulary.cs @@ -16,7 +16,7 @@ namespace Antlr4.Runtime /// Sam Harwell public class Vocabulary : IVocabulary { - private static readonly string[] EmptyNames = new string[0]; + private static readonly string[] EmptyNames = Collections.EmptyList(); /// /// Gets an empty diff --git a/runtime/CSharp/tests/issue-2693/ErrorListener.cs b/runtime/CSharp/tests/issue-2693/ErrorListener.cs new file mode 100644 index 0000000000..1d610a3b56 --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/ErrorListener.cs @@ -0,0 +1,18 @@ +using Antlr4.Runtime; +using Antlr4.Runtime.Misc; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +public class ErrorListener : ConsoleErrorListener +{ + public bool had_error; + + public override void SyntaxError(TextWriter output, IRecognizer recognizer, S offendingSymbol, int line, + int col, string msg, RecognitionException e) + { + had_error = true; + base.SyntaxError(output, recognizer, offendingSymbol, line, col, msg, e); + } +} diff --git a/runtime/CSharp/tests/issue-2693/Program.cs b/runtime/CSharp/tests/issue-2693/Program.cs new file mode 100644 index 0000000000..a7b3efdb04 --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/Program.cs @@ -0,0 +1,90 @@ +using Antlr4.Runtime; +using System; +using System.Linq; +using System.Text; + +public class Program +{ + static void Main(string[] args) + { + bool show_tree = false; + bool show_tokens = false; + string file_name = null; + string input = null; + for (int i = 0; i < args.Length; ++i) + { + if (args[i].Equals("-tokens")) + { + show_tokens = true; + continue; + } + else if (args[i].Equals("-tree")) + { + show_tree = true; + continue; + } + else if (args[i].Equals("-input")) + input = args[i]; + else if (args[i].Equals("-file")) + file_name = args[++i]; + } + ICharStream str = null; + if (input == null && file_name == null) + { + StringBuilder sb = new StringBuilder(); + int ch; + while ((ch = System.Console.Read()) != -1) + { + sb.Append((char)ch); + } + input = sb.ToString(); + str = CharStreams.fromString(input); + } + else if (input != null) + { + str = CharStreams.fromString(input); + } + else if (file_name != null) + { + str = CharStreams.fromPath(file_name); + } + var lexer = new asm8080Lexer(str); + if (show_tokens) + { + StringBuilder new_s = new StringBuilder(); + for (int i = 0; ; ++i) + { + var ro_token = lexer.NextToken(); + var token = (CommonToken)ro_token; + token.TokenIndex = i; + new_s.AppendLine(token.ToString()); + if (token.Type == Antlr4.Runtime.TokenConstants.EOF) + break; + } + System.Console.Error.WriteLine(new_s.ToString()); + } + lexer.Reset(); + var tokens = new CommonTokenStream(lexer); + var parser = new asm8080Parser(tokens); + var listener_lexer = new ErrorListener(); + var listener_parser = new ErrorListener(); + lexer.AddErrorListener(listener_lexer); + parser.AddErrorListener(listener_parser); + parser.Profile = true; + var tree = parser.prog(); + if (listener_lexer.had_error || listener_parser.had_error) + { + System.Console.Error.WriteLine("parse failed."); + } + else + { + System.Console.Error.WriteLine("parse succeeded."); + } + if (show_tree) + { + System.Console.Error.WriteLine(tree.ToStringTree()); + } + System.Console.Out.WriteLine(String.Join(", ", parser.ParseInfo.getDecisionInfo().Select(d => d.ToString()))); + System.Environment.Exit(listener_lexer.had_error || listener_parser.had_error ? 1 : 0); + } +} diff --git a/runtime/CSharp/tests/issue-2693/Test.csproj b/runtime/CSharp/tests/issue-2693/Test.csproj new file mode 100644 index 0000000000..33b85beef5 --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/Test.csproj @@ -0,0 +1,27 @@ + + + + net5.0 + Exe + + + + + + + + ../../../../tool/target/antlr4-*-SNAPSHOT-complete.jar + + + + + + + + + PackageReference + + + 1701;1702;3021 + + diff --git a/runtime/CSharp/tests/issue-2693/Test.sln b/runtime/CSharp/tests/issue-2693/Test.sln new file mode 100644 index 0000000000..4768052b0f --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/Test.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.31019.35 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Test", "Test.csproj", "{FD11E8CC-1631-4FF3-9B44-F10084562311}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Antlr4", "..\..\src\Antlr4.csproj", "{A60B5000-4473-4D00-85C4-C3A4B469F608}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {FD11E8CC-1631-4FF3-9B44-F10084562311}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FD11E8CC-1631-4FF3-9B44-F10084562311}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FD11E8CC-1631-4FF3-9B44-F10084562311}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FD11E8CC-1631-4FF3-9B44-F10084562311}.Release|Any CPU.Build.0 = Release|Any CPU + {A60B5000-4473-4D00-85C4-C3A4B469F608}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A60B5000-4473-4D00-85C4-C3A4B469F608}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A60B5000-4473-4D00-85C4-C3A4B469F608}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A60B5000-4473-4D00-85C4-C3A4B469F608}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {2FFB66F7-2552-4F2B-B97E-77B5F8743ED4} + EndGlobalSection +EndGlobal diff --git a/runtime/CSharp/tests/issue-2693/TreeOutput.cs b/runtime/CSharp/tests/issue-2693/TreeOutput.cs new file mode 100644 index 0000000000..2975049461 --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/TreeOutput.cs @@ -0,0 +1,108 @@ + +// Template generated code from Antlr4BuildTasks.dotnet-antlr v 1.3 + +using Antlr4.Runtime; +using Antlr4.Runtime.Misc; +using Antlr4.Runtime.Tree; +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +public class TreeOutput +{ + private static int changed = 0; + private static bool first_time = true; + + public static StringBuilder OutputTree(IParseTree tree, Lexer lexer, Parser parser, CommonTokenStream stream) + { + changed = 0; + first_time = true; + var sb = new StringBuilder(); + ParenthesizedAST(tree, sb, lexer, parser, stream); + return sb; + } + + private static void ParenthesizedAST(IParseTree tree, StringBuilder sb, Lexer lexer, Parser parser, CommonTokenStream stream, int level = 0) + { + if (tree as TerminalNodeImpl != null) + { + TerminalNodeImpl tok = tree as TerminalNodeImpl; + Interval interval = tok.SourceInterval; + IList inter = null; + if (tok.Symbol.TokenIndex >= 0) + inter = stream?.GetHiddenTokensToLeft(tok.Symbol.TokenIndex); + if (inter != null) + foreach (var t in inter) + { + var ty = tok.Symbol.Type; + var name = lexer.Vocabulary.GetSymbolicName(ty); + StartLine(sb, level); + sb.AppendLine("(" + name + " text = " + PerformEscapes(t.Text) + " " + lexer.ChannelNames[t.Channel]); + } + { + var ty = tok.Symbol.Type; + var name = lexer.Vocabulary.GetSymbolicName(ty); + StartLine(sb, level); + sb.AppendLine("( " + name + " i =" + tree.SourceInterval.a + + " txt =" + PerformEscapes(tree.GetText()) + + " tt =" + tok.Symbol.Type + + " " + lexer.ChannelNames[tok.Symbol.Channel]); + } + } + else + { + var x = tree as RuleContext; + var ri = x.RuleIndex; + var name = parser.RuleNames[ri]; + StartLine(sb, level); + sb.Append("( " + name); + sb.AppendLine(); + } + for (int i = 0; i= 0) + { + if (!first_time) + { + for (int j = 0; j < level; ++j) sb.Append(" "); + for (int k = 0; k < 1 + changed - level; ++k) sb.Append(") "); + sb.AppendLine(); + } + changed = 0; + first_time = false; + } + changed = level; + for (int j = 0; j < level; ++j) sb.Append(" "); + } + + private static string ToLiteral(string input) + { + using (var writer = new StringWriter()) + { + var literal = input; + literal = literal.Replace("\\", "\\\\"); + return literal; + } + } + + public static string PerformEscapes(string s) + { + StringBuilder new_s = new StringBuilder(); + new_s.Append(ToLiteral(s)); + return new_s.ToString(); + } +} diff --git a/runtime/CSharp/tests/issue-2693/asm8080.g4 b/runtime/CSharp/tests/issue-2693/asm8080.g4 new file mode 100644 index 0000000000..6f22bd08dc --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/asm8080.g4 @@ -0,0 +1,288 @@ +/* +BSD License + +Copyright (c) 2018, Tom Everett +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of Tom Everett nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +/* +* http://fms.komkon.org/comp/CPUs/8080.txt +*/ + +grammar asm8080; + +prog + : (line? EOL) + + ; + +line + : lbl? (instruction | directive)? comment? + ; + +instruction + : opcode expressionlist? + ; + +opcode + : OPCODE + ; + +register_ + : REGISTER + ; + +directive + : argument? assemblerdirective expressionlist + ; + +assemblerdirective + : ASSEMBLER_DIRECTIVE + ; + +lbl + : label ':'? + ; + +expressionlist + : expression (',' expression)* + ; + +label + : name + ; + +expression + : multiplyingExpression (('+' | '-') multiplyingExpression)* + ; + +multiplyingExpression + : argument (('*' | '/') argument)* + ; + +argument + : number + | register_ + | dollar + | name + | string + | ('(' expression ')') + ; + +dollar + : '$' + ; + +string + : STRING + ; + +name + : NAME + ; + +number + : NUMBER + ; + +comment + : COMMENT + ; + + +ASSEMBLER_DIRECTIVE + : (O R G) | (E N D) | (E Q U) | (D B) | (D W) | (D S) | (I F) | (E N D I F) | (S E T) + ; + + +REGISTER + : 'A' | 'B' | 'C' | 'D' | 'E' | 'H' | 'L' | 'PC' | 'SP' + ; + + +OPCODE + : (M O V) | (M V I) | (L D A) | (S T A) | (L D A X) | (S T A X) | (L H L D) | (S H L D) | (L X I) | (P U S H) | (P O P) | (X T H L) | (S P H L) | (P C H L) | (X C H G) | (A D D) | (S U B) | (I N R) | (D C R) | (C M P) | (A N A) | (O R A) | (X R A) | (A D I) | (S U I) | (C P I) | (A N I) | (O R I) | (X R I) | (D A A) | (A D C) | (A C I) | (S B B) | (S B I) | (D A D) | (I N X) | (D C X) | (J M P) | (C A L L) | (R E T) | (R A L) | (R A R) | (R L C) | (R R C) | (I N) | (O U T) | (C M C) | (S T C) | (C M A) | (H L T) | (N O P) | (D I) | (E I) | (R S T) | (J N Z) | (J Z) | (J N C) | (J C) | (J P O) | (J P E) | (J P) | (J M) | (C N Z) | (C Z) | (C N C) | (C C) | (C P O) | (C P E) | (C P) | (C M) | (R N Z) | (R Z) | (R N C) | (R C) | (R P O) | (R P E) | (R P) | (R M) + ; + + +fragment A + : ('a' | 'A') + ; + + +fragment B + : ('b' | 'B') + ; + + +fragment C + : ('c' | 'C') + ; + + +fragment D + : ('d' | 'D') + ; + + +fragment E + : ('e' | 'E') + ; + + +fragment F + : ('f' | 'F') + ; + + +fragment G + : ('g' | 'G') + ; + + +fragment H + : ('h' | 'H') + ; + + +fragment I + : ('i' | 'I') + ; + + +fragment J + : ('j' | 'J') + ; + + +fragment K + : ('k' | 'K') + ; + + +fragment L + : ('l' | 'L') + ; + + +fragment M + : ('m' | 'M') + ; + + +fragment N + : ('n' | 'N') + ; + + +fragment O + : ('o' | 'O') + ; + + +fragment P + : ('p' | 'P') + ; + + +fragment Q + : ('q' | 'Q') + ; + + +fragment R + : ('r' | 'R') + ; + + +fragment S + : ('s' | 'S') + ; + + +fragment T + : ('t' | 'T') + ; + + +fragment U + : ('u' | 'U') + ; + + +fragment V + : ('v' | 'V') + ; + + +fragment W + : ('w' | 'W') + ; + + +fragment X + : ('x' | 'X') + ; + + +fragment Y + : ('y' | 'Y') + ; + + +fragment Z + : ('z' | 'Z') + ; + + +NAME + : [a-zA-Z] [a-zA-Z0-9."]* + ; + + +NUMBER + : '$'? [0-9a-fA-F] + ('H' | 'h')? + ; + + +COMMENT + : ';' ~ [\r\n]* -> skip + ; + + +STRING + : '\u0027' ~'\u0027'* '\u0027' + ; + + +EOL + : [\r\n] + + ; + + +WS + : [ \t] -> skip + ; diff --git a/runtime/CSharp/tests/issue-2693/cpm22.asm b/runtime/CSharp/tests/issue-2693/cpm22.asm new file mode 100644 index 0000000000..28cfe03f0c --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/cpm22.asm @@ -0,0 +1,3739 @@ + +;************************************************************** +;* +;* C P / M version 2 . 2 +;* +;* Reconstructed from memory image on February 27, 1981 +;* +;* by Clark A. Calkins +;* +;************************************************************** +; +; Set memory limit here. This is the amount of contigeous +; ram starting from 0000. CP/M will reside at the end of this space. +; +MEM EQU 62 ;for a 62k system (TS802 TEST - WORKS OK). +; +IOBYTE EQU 3 ;i/o definition byte. +TDRIVE EQU 4 ;current drive name and user number. +ENTRY EQU 5 ;entry point for the cp/m bdos. +TFCB EQU 5CH ;default file control block. +TBUFF EQU 80H ;i/o buffer and command line storage. +TBASE EQU 100H ;transiant program storage area. +; +; Set control character equates. +; +CNTRLC EQU 3 ;control-c +CNTRLE EQU 05H ;control-e +BS EQU 08H ;backspace +TAB EQU 09H ;tab +LF EQU 0AH ;line feed +FF EQU 0CH ;form feed +CR EQU 0DH ;carriage return +CNTRLP EQU 10H ;control-p +CNTRLR EQU 12H ;control-r +CNTRLS EQU 13H ;control-s +CNTRLU EQU 15H ;control-u +CNTRLX EQU 18H ;control-x +CNTRLZ EQU 1AH ;control-z (end-of-file mark) +DEL EQU 7FH ;rubout +; +; Set origin for CP/M +; + ORG (MEM-7)*1024 +; +CBASE JMP COMMAND ;execute command processor (ccp). + JMP CLEARBUF ;entry to empty input buffer before starting ccp. + +; +; Standard cp/m ccp input buffer. Format is (max length), +; (actual length), (char #1), (char #2), (char #3), etc. +; +INBUFF DB 127 ;length of input buffer. + DB 0 ;current length of contents. + DB 'Copyright' + DB ' 1979 (c) by Digital Research ' + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +INPOINT DW INBUFF+2;input line pointer +NAMEPNT DW 0 ;input line pointer used for error message. Points to +; ;start of name in error. +; +; Routine to print (A) on the console. All registers used. +; +PRINT MOV E,A ;setup bdos call. + MVI C,2 + JMP ENTRY +; +; Routine to print (A) on the console and to save (BC). +; +PRINTB PUSH B + CALL PRINT + POP B + RET +; +; Routine to send a carriage return, line feed combination +; to the console. +; +CRLF MVI A,CR + CALL PRINTB + MVI A,LF + JMP PRINTB +; +; Routine to send one space to the console and save (BC). +; +SPACE MVI A,' ' + JMP PRINTB +; +; Routine to print character string pointed to be (BC) on the +; console. It must terminate with a null byte. +; +PLINE PUSH B + CALL CRLF + POP H +PLINE2 MOV A,M + ORA A + RZ + INX H + PUSH H + CALL PRINT + POP H + JMP PLINE2 +; +; Routine to reset the disk system. +; +RESDSK MVI C,13 + JMP ENTRY +; +; Routine to select disk (A). +; +DSKSEL MOV E,A + MVI C,14 + JMP ENTRY +; +; Routine to call bdos and save the return code. The zero +; flag is set on a return of 0ffh. +; +ENTRY1 CALL ENTRY + STA RTNCODE ;save return code. + INR A ;set zero if 0ffh returned. + RET +; +; Routine to open a file. (DE) must point to the FCB. +; +OPEN MVI C,15 + JMP ENTRY1 +; +; Routine to open file at (FCB). +; +OPENFCB XRA A ;clear the record number byte at fcb+32 + STA FCB+32 + LXI D,FCB + JMP OPEN +; +; Routine to close a file. (DE) points to FCB. +; +CLOSE MVI C,16 + JMP ENTRY1 +; +; Routine to search for the first file with ambigueous name +; (DE). +; +SRCHFST MVI C,17 + JMP ENTRY1 +; +; Search for the next ambigeous file name. +; +SRCHNXT MVI C,18 + JMP ENTRY1 +; +; Search for file at (FCB). +; +SRCHFCB LXI D,FCB + JMP SRCHFST +; +; Routine to delete a file pointed to by (DE). +; +DELETE MVI C,19 + JMP ENTRY +; +; Routine to call the bdos and set the zero flag if a zero +; status is returned. +; +ENTRY2 CALL ENTRY + ORA A ;set zero flag if appropriate. + RET +; +; Routine to read the next record from a sequential file. +; (DE) points to the FCB. +; +RDREC MVI C,20 + JMP ENTRY2 +; +; Routine to read file at (FCB). +; +READFCB LXI D,FCB + JMP RDREC +; +; Routine to write the next record of a sequential file. +; (DE) points to the FCB. +; +WRTREC MVI C,21 + JMP ENTRY2 +; +; Routine to create the file pointed to by (DE). +; +CREATE MVI C,22 + JMP ENTRY1 +; +; Routine to rename the file pointed to by (DE). Note that +; the new name starts at (DE+16). +; +RENAM MVI C,23 + JMP ENTRY +; +; Get the current user code. +; +GETUSR MVI E,0FFH +; +; Routne to get or set the current user code. +; If (E) is FF then this is a GET, else it is a SET. +; +GETSETUC:MVI C,32 + JMP ENTRY +; +; Routine to set the current drive byte at (TDRIVE). +; +SETCDRV CALL GETUSR ;get user number + ADD A ;and shift into the upper 4 bits. + ADD A + ADD A + ADD A + LXI H,CDRIVE;now add in the current drive number. + ORA M + STA TDRIVE ;and save. + RET +; +; Move currently active drive down to (TDRIVE). +; +MOVECD LDA CDRIVE + STA TDRIVE + RET +; +; Routine to convert (A) into upper case ascii. Only letters +; are affected. +; +UPPER CPI 'a' ;check for letters in the range of 'a' to 'z'. + RC + CPI '{' + RNC + ANI 5FH ;convert it if found. + RET +; +; Routine to get a line of input. We must check to see if the +; user is in (BATCH) mode. If so, then read the input from file +; ($$$.SUB). At the end, reset to console input. +; +GETINP LDA BATCH ;if =0, then use console input. + ORA A + JZ GETINP1 +; +; Use the submit file ($$$.sub) which is prepared by a +; SUBMIT run. It must be on drive (A) and it will be deleted +; if and error occures (like eof). +; + LDA CDRIVE ;select drive 0 if need be. + ORA A + MVI A,0 ;always use drive A for submit. + CNZ DSKSEL ;select it if required. + LXI D,BATCHFCB + CALL OPEN ;look for it. + JZ GETINP1 ;if not there, use normal input. + LDA BATCHFCB+15;get last record number+1. + DCR A + STA BATCHFCB+32 + LXI D,BATCHFCB + CALL RDREC ;read last record. + JNZ GETINP1 ;quit on end of file. +; +; Move this record into input buffer. +; + LXI D,INBUFF+1 + LXI H,TBUFF ;data was read into buffer here. + MVI B,128 ;all 128 characters may be used. + CALL HL2DE ;(HL) to (DE), (B) bytes. + LXI H,BATCHFCB+14 + MVI M,0 ;zero out the 's2' byte. + INX H ;and decrement the record count. + DCR M + LXI D,BATCHFCB;close the batch file now. + CALL CLOSE + JZ GETINP1 ;quit on an error. + LDA CDRIVE ;re-select previous drive if need be. + ORA A + CNZ DSKSEL ;don't do needless selects. +; +; Print line just read on console. +; + LXI H,INBUFF+2 + CALL PLINE2 + CALL CHKCON ;check console, quit on a key. + JZ GETINP2 ;jump if no key is pressed. +; +; Terminate the submit job on any keyboard input. Delete this +; file such that it is not re-started and jump to normal keyboard +; input section. +; + CALL DELBATCH;delete the batch file. + JMP CMMND1 ;and restart command input. +; +; Get here for normal keyboard input. Delete the submit file +; incase there was one. +; +GETINP1 CALL DELBATCH;delete file ($$$.sub). + CALL SETCDRV ;reset active disk. + MVI C,10 ;get line from console device. + LXI D,INBUFF + CALL ENTRY + CALL MOVECD ;reset current drive (again). +; +; Convert input line to upper case. +; +GETINP2 LXI H,INBUFF+1 + MOV B,M ;(B)=character counter. +GETINP3 INX H + MOV A,B ;end of the line? + ORA A + JZ GETINP4 + MOV A,M ;convert to upper case. + CALL UPPER + MOV M,A + DCR B ;adjust character count. + JMP GETINP3 +GETINP4 MOV M,A ;add trailing null. + LXI H,INBUFF+2 + SHLD INPOINT ;reset input line pointer. + RET +; +; Routine to check the console for a key pressed. The zero +; flag is set is none, else the character is returned in (A). +; +CHKCON MVI C,11 ;check console. + CALL ENTRY + ORA A + RZ ;return if nothing. + MVI C,1 ;else get character. + CALL ENTRY + ORA A ;clear zero flag and return. + RET +; +; Routine to get the currently active drive number. +; +GETDSK MVI C,25 + JMP ENTRY +; +; Set the stabdard dma address. +; +STDDMA LXI D,TBUFF +; +; Routine to set the dma address to (DE). +; +DMASET MVI C,26 + JMP ENTRY +; +; Delete the batch file created by SUBMIT. +; +DELBATCH:LXI H,BATCH ;is batch active? + MOV A,M + ORA A + RZ + MVI M,0 ;yes, de-activate it. + XRA A + CALL DSKSEL ;select drive 0 for sure. + LXI D,BATCHFCB;and delete this file. + CALL DELETE + LDA CDRIVE ;reset current drive. + JMP DSKSEL +; +; Check to two strings at (PATTRN1) and (PATTRN2). They must be +; the same or we halt.... +; +VERIFY LXI D,PATTRN1;these are the serial number bytes. + LXI H,PATTRN2;ditto, but how could they be different? + MVI B,6 ;6 bytes each. +VERIFY1 LDAX D + CMP M + JNZ HALT ;jump to halt routine. + INX D + INX H + DCR B + JNZ VERIFY1 + RET +; +; Print back file name with a '?' to indicate a syntax error. +; +SYNERR CALL CRLF ;end current line. + LHLD NAMEPNT ;this points to name in error. +SYNERR1 MOV A,M ;print it until a space or null is found. + CPI ' ' + JZ SYNERR2 + ORA A + JZ SYNERR2 + PUSH H + CALL PRINT + POP H + INX H + JMP SYNERR1 +SYNERR2 MVI A,'?' ;add trailing '?'. + CALL PRINT + CALL CRLF + CALL DELBATCH;delete any batch file. + JMP CMMND1 ;and restart from console input. +; +; Check character at (DE) for legal command input. Note that the +; zero flag is set if the character is a delimiter. +; +CHECK LDAX D + ORA A + RZ + CPI ' ' ;control characters are not legal here. + JC SYNERR + RZ ;check for valid delimiter. + CPI '=' + RZ + CPI '_' + RZ + CPI '.' + RZ + CPI ':' + RZ + CPI ';' + RZ + CPI '<' + RZ + CPI '>' + RZ + RET +; +; Get the next non-blank character from (DE). +; +NONBLANK:LDAX D + ORA A ;string ends with a null. + RZ + CPI ' ' + RNZ + INX D + JMP NONBLANK +; +; Add (HL)=(HL)+(A) +; +ADDHL ADD L + MOV L,A + RNC ;take care of any carry. + INR H + RET +; +; Convert the first name in (FCB). +; +CONVFST MVI A,0 +; +; Format a file name (convert * to '?', etc.). On return, +; (A)=0 is an unambigeous name was specified. Enter with (A) equal to +; the position within the fcb for the name (either 0 or 16). +; +CONVERT LXI H,FCB + CALL ADDHL + PUSH H + PUSH H + XRA A + STA CHGDRV ;initialize drive change flag. + LHLD INPOINT ;set (HL) as pointer into input line. + XCHG + CALL NONBLANK;get next non-blank character. + XCHG + SHLD NAMEPNT ;save pointer here for any error message. + XCHG + POP H + LDAX D ;get first character. + ORA A + JZ CONVRT1 + SBI 'A'-1 ;might be a drive name, convert to binary. + MOV B,A ;and save. + INX D ;check next character for a ':'. + LDAX D + CPI ':' + JZ CONVRT2 + DCX D ;nope, move pointer back to the start of the line. +CONVRT1 LDA CDRIVE + MOV M,A + JMP CONVRT3 +CONVRT2 MOV A,B + STA CHGDRV ;set change in drives flag. + MOV M,B + INX D +; +; Convert the basic file name. +; +CONVRT3 MVI B,08H +CONVRT4 CALL CHECK + JZ CONVRT8 + INX H + CPI '*' ;note that an '*' will fill the remaining + JNZ CONVRT5 ;field with '?'. + MVI M,'?' + JMP CONVRT6 +CONVRT5 MOV M,A + INX D +CONVRT6 DCR B + JNZ CONVRT4 +CONVRT7 CALL CHECK ;get next delimiter. + JZ GETEXT + INX D + JMP CONVRT7 +CONVRT8 INX H ;blank fill the file name. + MVI M,' ' + DCR B + JNZ CONVRT8 +; +; Get the extension and convert it. +; +GETEXT MVI B,03H + CPI '.' + JNZ GETEXT5 + INX D +GETEXT1 CALL CHECK + JZ GETEXT5 + INX H + CPI '*' + JNZ GETEXT2 + MVI M,'?' + JMP GETEXT3 +GETEXT2 MOV M,A + INX D +GETEXT3 DCR B + JNZ GETEXT1 +GETEXT4 CALL CHECK + JZ GETEXT6 + INX D + JMP GETEXT4 +GETEXT5 INX H + MVI M,' ' + DCR B + JNZ GETEXT5 +GETEXT6 MVI B,3 +GETEXT7 INX H + MVI M,0 + DCR B + JNZ GETEXT7 + XCHG + SHLD INPOINT ;save input line pointer. + POP H +; +; Check to see if this is an ambigeous file name specification. +; Set the (A) register to non zero if it is. +; + LXI B,11 ;set name length. +GETEXT8 INX H + MOV A,M + CPI '?' ;any question marks? + JNZ GETEXT9 + INR B ;count them. +GETEXT9 DCR C + JNZ GETEXT8 + MOV A,B + ORA A + RET +; +; CP/M command table. Note commands can be either 3 or 4 characters long. +; +NUMCMDS EQU 6 ;number of commands +CMDTBL DB 'DIR ' + DB 'ERA ' + DB 'TYPE' + DB 'SAVE' + DB 'REN ' + DB 'USER' +; +; The following six bytes must agree with those at (PATTRN2) +; or cp/m will HALT. Why? +; +PATTRN1 DB 0,22,0,0,0,0;(* serial number bytes *). +; +; Search the command table for a match with what has just +; been entered. If a match is found, then we jump to the +; proper section. Else jump to (UNKNOWN). +; On return, the (C) register is set to the command number +; that matched (or NUMCMDS+1 if no match). +; +SEARCH LXI H,CMDTBL + MVI C,0 +SEARCH1 MOV A,C + CPI NUMCMDS ;this commands exists. + RNC + LXI D,FCB+1 ;check this one. + MVI B,4 ;max command length. +SEARCH2 LDAX D + CMP M + JNZ SEARCH3 ;not a match. + INX D + INX H + DCR B + JNZ SEARCH2 + LDAX D ;allow a 3 character command to match. + CPI ' ' + JNZ SEARCH4 + MOV A,C ;set return register for this command. + RET +SEARCH3 INX H + DCR B + JNZ SEARCH3 +SEARCH4 INR C + JMP SEARCH1 +; +; Set the input buffer to empty and then start the command +; processor (ccp). +; +CLEARBUF:XRA A + STA INBUFF+1;second byte is actual length. +; +;************************************************************** +;* +;* +;* C C P - C o n s o l e C o m m a n d P r o c e s s o r +;* +;************************************************************** +;* +COMMAND LXI SP,CCPSTACK;setup stack area. + PUSH B ;note that (C) should be equal to: + MOV A,C ;(uuuudddd) where 'uuuu' is the user number + RAR ;and 'dddd' is the drive number. + RAR + RAR + RAR + ANI 0FH ;isolate the user number. + MOV E,A + CALL GETSETUC;and set it. + CALL RESDSK ;reset the disk system. + STA BATCH ;clear batch mode flag. + POP B + MOV A,C + ANI 0FH ;isolate the drive number. + STA CDRIVE ;and save. + CALL DSKSEL ;...and select. + LDA INBUFF+1 + ORA A ;anything in input buffer already? + JNZ CMMND2 ;yes, we just process it. +; +; Entry point to get a command line from the console. +; +CMMND1 LXI SP,CCPSTACK;set stack straight. + CALL CRLF ;start a new line on the screen. + CALL GETDSK ;get current drive. + ADI 'a' + CALL PRINT ;print current drive. + MVI A,'>' + CALL PRINT ;and add prompt. + CALL GETINP ;get line from user. +; +; Process command line here. +; +CMMND2 LXI D,TBUFF + CALL DMASET ;set standard dma address. + CALL GETDSK + STA CDRIVE ;set current drive. + CALL CONVFST ;convert name typed in. + CNZ SYNERR ;wild cards are not allowed. + LDA CHGDRV ;if a change in drives was indicated, + ORA A ;then treat this as an unknown command + JNZ UNKNOWN ;which gets executed. + CALL SEARCH ;else search command table for a match. +; +; Note that an unknown command returns +; with (A) pointing to the last address +; in our table which is (UNKNOWN). +; + LXI H,CMDADR;now, look thru our address table for command (A). + MOV E,A ;set (DE) to command number. + MVI D,0 + DAD D + DAD D ;(HL)=(CMDADR)+2*(command number). + MOV A,M ;now pick out this address. + INX H + MOV H,M + MOV L,A + PCHL ;now execute it. +; +; CP/M command address table. +; +CMDADR DW DIRECT,ERASE,TYPE,SAVE + DW RENAME,USER,UNKNOWN +; +; Halt the system. Reason for this is unknown at present. +; +HALT LXI H,76F3H ;'DI HLT' instructions. + SHLD CBASE + LXI H,CBASE + PCHL +; +; Read error while TYPEing a file. +; +RDERROR LXI B,RDERR + JMP PLINE +RDERR DB 'Read error',0 +; +; Required file was not located. +; +NONE LXI B,NOFILE + JMP PLINE +NOFILE DB 'No file',0 +; +; Decode a command of the form 'A>filename number{ filename}. +; Note that a drive specifier is not allowed on the first file +; name. On return, the number is in register (A). Any error +; causes 'filename?' to be printed and the command is aborted. +; +DECODE CALL CONVFST ;convert filename. + LDA CHGDRV ;do not allow a drive to be specified. + ORA A + JNZ SYNERR + LXI H,FCB+1 ;convert number now. + LXI B,11 ;(B)=sum register, (C)=max digit count. +DECODE1 MOV A,M + CPI ' ' ;a space terminates the numeral. + JZ DECODE3 + INX H + SUI '0' ;make binary from ascii. + CPI 10 ;legal digit? + JNC SYNERR + MOV D,A ;yes, save it in (D). + MOV A,B ;compute (B)=(B)*10 and check for overflow. + ANI 0E0H + JNZ SYNERR + MOV A,B + RLC + RLC + RLC ;(A)=(B)*8 + ADD B ;.......*9 + JC SYNERR + ADD B ;.......*10 + JC SYNERR + ADD D ;add in new digit now. +DECODE2 JC SYNERR + MOV B,A ;and save result. + DCR C ;only look at 11 digits. + JNZ DECODE1 + RET +DECODE3 MOV A,M ;spaces must follow (why?). + CPI ' ' + JNZ SYNERR + INX H +DECODE4 DCR C + JNZ DECODE3 + MOV A,B ;set (A)=the numeric value entered. + RET +; +; Move 3 bytes from (HL) to (DE). Note that there is only +; one reference to this at (A2D5h). +; +MOVE3 MVI B,3 +; +; Move (B) bytes from (HL) to (DE). +; +HL2DE MOV A,M + STAX D + INX H + INX D + DCR B + JNZ HL2DE + RET +; +; Compute (HL)=(TBUFF)+(A)+(C) and get the byte that's here. +; +EXTRACT LXI H,TBUFF + ADD C + CALL ADDHL + MOV A,M + RET +; +; Check drive specified. If it means a change, then the new +; drive will be selected. In any case, the drive byte of the +; fcb will be set to null (means use current drive). +; +DSELECT XRA A ;null out first byte of fcb. + STA FCB + LDA CHGDRV ;a drive change indicated? + ORA A + RZ + DCR A ;yes, is it the same as the current drive? + LXI H,CDRIVE + CMP M + RZ + JMP DSKSEL ;no. Select it then. +; +; Check the drive selection and reset it to the previous +; drive if it was changed for the preceeding command. +; +RESETDR LDA CHGDRV ;drive change indicated? + ORA A + RZ + DCR A ;yes, was it a different drive? + LXI H,CDRIVE + CMP M + RZ + LDA CDRIVE ;yes, re-select our old drive. + JMP DSKSEL +; +;************************************************************** +;* +;* D I R E C T O R Y C O M M A N D +;* +;************************************************************** +; +DIRECT CALL CONVFST ;convert file name. + CALL DSELECT ;select indicated drive. + LXI H,FCB+1 ;was any file indicated? + MOV A,M + CPI ' ' + JNZ DIRECT2 + MVI B,11 ;no. Fill field with '?' - same as *.*. +DIRECT1 MVI M,'?' + INX H + DCR B + JNZ DIRECT1 +DIRECT2 MVI E,0 ;set initial cursor position. + PUSH D + CALL SRCHFCB ;get first file name. + CZ NONE ;none found at all? +DIRECT3 JZ DIRECT9 ;terminate if no more names. + LDA RTNCODE ;get file's position in segment (0-3). + RRC + RRC + RRC + ANI 60H ;(A)=position*32 + MOV C,A + MVI A,10 + CALL EXTRACT ;extract the tenth entry in fcb. + RAL ;check system file status bit. + JC DIRECT8 ;we don't list them. + POP D + MOV A,E ;bump name count. + INR E + PUSH D + ANI 03H ;at end of line? + PUSH PSW + JNZ DIRECT4 + CALL CRLF ;yes, end this line and start another. + PUSH B + CALL GETDSK ;start line with ('A:'). + POP B + ADI 'A' + CALL PRINTB + MVI A,':' + CALL PRINTB + JMP DIRECT5 +DIRECT4 CALL SPACE ;add seperator between file names. + MVI A,':' + CALL PRINTB +DIRECT5 CALL SPACE + MVI B,1 ;'extract' each file name character at a time. +DIRECT6 MOV A,B + CALL EXTRACT + ANI 7FH ;strip bit 7 (status bit). + CPI ' ' ;are we at the end of the name? + JNZ DRECT65 + POP PSW ;yes, don't print spaces at the end of a line. + PUSH PSW + CPI 3 + JNZ DRECT63 + MVI A,9 ;first check for no extension. + CALL EXTRACT + ANI 7FH + CPI ' ' + JZ DIRECT7 ;don't print spaces. +DRECT63 MVI A,' ' ;else print them. +DRECT65 CALL PRINTB + INR B ;bump to next character psoition. + MOV A,B + CPI 12 ;end of the name? + JNC DIRECT7 + CPI 9 ;nope, starting extension? + JNZ DIRECT6 + CALL SPACE ;yes, add seperating space. + JMP DIRECT6 +DIRECT7 POP PSW ;get the next file name. +DIRECT8 CALL CHKCON ;first check console, quit on anything. + JNZ DIRECT9 + CALL SRCHNXT ;get next name. + JMP DIRECT3 ;and continue with our list. +DIRECT9 POP D ;restore the stack and return to command level. + JMP GETBACK +; +;************************************************************** +;* +;* E R A S E C O M M A N D +;* +;************************************************************** +; +ERASE CALL CONVFST ;convert file name. + CPI 11 ;was '*.*' entered? + JNZ ERASE1 + LXI B,YESNO ;yes, ask for confirmation. + CALL PLINE + CALL GETINP + LXI H,INBUFF+1 + DCR M ;must be exactly 'y'. + JNZ CMMND1 + INX H + MOV A,M + CPI 'Y' + JNZ CMMND1 + INX H + SHLD INPOINT ;save input line pointer. +ERASE1 CALL DSELECT ;select desired disk. + LXI D,FCB + CALL DELETE ;delete the file. + INR A + CZ NONE ;not there? + JMP GETBACK ;return to command level now. +YESNO DB 'All (y/n)?',0 +; +;************************************************************** +;* +;* T Y P E C O M M A N D +;* +;************************************************************** +; +TYPE CALL CONVFST ;convert file name. + JNZ SYNERR ;wild cards not allowed. + CALL DSELECT ;select indicated drive. + CALL OPENFCB ;open the file. + JZ TYPE5 ;not there? + CALL CRLF ;ok, start a new line on the screen. + LXI H,NBYTES;initialize byte counter. + MVI M,0FFH ;set to read first sector. +TYPE1 LXI H,NBYTES +TYPE2 MOV A,M ;have we written the entire sector? + CPI 128 + JC TYPE3 + PUSH H ;yes, read in the next one. + CALL READFCB + POP H + JNZ TYPE4 ;end or error? + XRA A ;ok, clear byte counter. + MOV M,A +TYPE3 INR M ;count this byte. + LXI H,TBUFF ;and get the (A)th one from the buffer (TBUFF). + CALL ADDHL + MOV A,M + CPI CNTRLZ ;end of file mark? + JZ GETBACK + CALL PRINT ;no, print it. + CALL CHKCON ;check console, quit if anything ready. + JNZ GETBACK + JMP TYPE1 +; +; Get here on an end of file or read error. +; +TYPE4 DCR A ;read error? + JZ GETBACK + CALL RDERROR ;yes, print message. +TYPE5 CALL RESETDR ;and reset proper drive + JMP SYNERR ;now print file name with problem. +; +;************************************************************** +;* +;* S A V E C O M M A N D +;* +;************************************************************** +; +SAVE CALL DECODE ;get numeric number that follows SAVE. + PUSH PSW ;save number of pages to write. + CALL CONVFST ;convert file name. + JNZ SYNERR ;wild cards not allowed. + CALL DSELECT ;select specified drive. + LXI D,FCB ;now delete this file. + PUSH D + CALL DELETE + POP D + CALL CREATE ;and create it again. + JZ SAVE3 ;can't create? + XRA A ;clear record number byte. + STA FCB+32 + POP PSW ;convert pages to sectors. + MOV L,A + MVI H,0 + DAD H ;(HL)=number of sectors to write. + LXI D,TBASE ;and we start from here. +SAVE1 MOV A,H ;done yet? + ORA L + JZ SAVE2 + DCX H ;nope, count this and compute the start + PUSH H ;of the next 128 byte sector. + LXI H,128 + DAD D + PUSH H ;save it and set the transfer address. + CALL DMASET + LXI D,FCB ;write out this sector now. + CALL WRTREC + POP D ;reset (DE) to the start of the last sector. + POP H ;restore sector count. + JNZ SAVE3 ;write error? + JMP SAVE1 +; +; Get here after writing all of the file. +; +SAVE2 LXI D,FCB ;now close the file. + CALL CLOSE + INR A ;did it close ok? + JNZ SAVE4 +; +; Print out error message (no space). +; +SAVE3 LXI B,NOSPACE + CALL PLINE +SAVE4 CALL STDDMA ;reset the standard dma address. + JMP GETBACK +NOSPACE DB 'No space',0 +; +;************************************************************** +;* +;* R E N A M E C O M M A N D +;* +;************************************************************** +; +RENAME CALL CONVFST ;convert first file name. + JNZ SYNERR ;wild cards not allowed. + LDA CHGDRV ;remember any change in drives specified. + PUSH PSW + CALL DSELECT ;and select this drive. + CALL SRCHFCB ;is this file present? + JNZ RENAME6 ;yes, print error message. + LXI H,FCB ;yes, move this name into second slot. + LXI D,FCB+16 + MVI B,16 + CALL HL2DE + LHLD INPOINT ;get input pointer. + XCHG + CALL NONBLANK;get next non blank character. + CPI '=' ;only allow an '=' or '_' seperator. + JZ RENAME1 + CPI '_' + JNZ RENAME5 +RENAME1 XCHG + INX H ;ok, skip seperator. + SHLD INPOINT ;save input line pointer. + CALL CONVFST ;convert this second file name now. + JNZ RENAME5 ;again, no wild cards. + POP PSW ;if a drive was specified, then it + MOV B,A ;must be the same as before. + LXI H,CHGDRV + MOV A,M + ORA A + JZ RENAME2 + CMP B + MOV M,B + JNZ RENAME5 ;they were different, error. +RENAME2 MOV M,B; reset as per the first file specification. + XRA A + STA FCB ;clear the drive byte of the fcb. +RENAME3 CALL SRCHFCB ;and go look for second file. + JZ RENAME4 ;doesn't exist? + LXI D,FCB + CALL RENAM ;ok, rename the file. + JMP GETBACK +; +; Process rename errors here. +; +RENAME4 CALL NONE ;file not there. + JMP GETBACK +RENAME5 CALL RESETDR ;bad command format. + JMP SYNERR +RENAME6 LXI B,EXISTS;destination file already exists. + CALL PLINE + JMP GETBACK +EXISTS DB 'File exists',0 +; +;************************************************************** +;* +;* U S E R C O M M A N D +;* +;************************************************************** +; +USER CALL DECODE ;get numeric value following command. + CPI 16 ;legal user number? + JNC SYNERR + MOV E,A ;yes but is there anything else? + LDA FCB+1 + CPI ' ' + JZ SYNERR ;yes, that is not allowed. + CALL GETSETUC;ok, set user code. + JMP GETBACK1 +; +;************************************************************** +;* +;* T R A N S I A N T P R O G R A M C O M M A N D +;* +;************************************************************** +; +UNKNOWN CALL VERIFY ;check for valid system (why?). + LDA FCB+1 ;anything to execute? + CPI ' ' + JNZ UNKWN1 + LDA CHGDRV ;nope, only a drive change? + ORA A + JZ GETBACK1;neither??? + DCR A + STA CDRIVE ;ok, store new drive. + CALL MOVECD ;set (TDRIVE) also. + CALL DSKSEL ;and select this drive. + JMP GETBACK1;then return. +; +; Here a file name was typed. Prepare to execute it. +; +UNKWN1 LXI D,FCB+9 ;an extension specified? + LDAX D + CPI ' ' + JNZ SYNERR ;yes, not allowed. +UNKWN2 PUSH D + CALL DSELECT ;select specified drive. + POP D + LXI H,COMFILE ;set the extension to 'COM'. + CALL MOVE3 + CALL OPENFCB ;and open this file. + JZ UNKWN9 ;not present? +; +; Load in the program. +; + LXI H,TBASE ;store the program starting here. +UNKWN3 PUSH H + XCHG + CALL DMASET ;set transfer address. + LXI D,FCB ;and read the next record. + CALL RDREC + JNZ UNKWN4 ;end of file or read error? + POP H ;nope, bump pointer for next sector. + LXI D,128 + DAD D + LXI D,CBASE ;enough room for the whole file? + MOV A,L + SUB E + MOV A,H + SBB D + JNC UNKWN0 ;no, it can't fit. + JMP UNKWN3 +; +; Get here after finished reading. +; +UNKWN4 POP H + DCR A ;normal end of file? + JNZ UNKWN0 + CALL RESETDR ;yes, reset previous drive. + CALL CONVFST ;convert the first file name that follows + LXI H,CHGDRV;command name. + PUSH H + MOV A,M ;set drive code in default fcb. + STA FCB + MVI A,16 ;put second name 16 bytes later. + CALL CONVERT ;convert second file name. + POP H + MOV A,M ;and set the drive for this second file. + STA FCB+16 + XRA A ;clear record byte in fcb. + STA FCB+32 + LXI D,TFCB ;move it into place at(005Ch). + LXI H,FCB + MVI B,33 + CALL HL2DE + LXI H,INBUFF+2;now move the remainder of the input +UNKWN5 MOV A,M ;line down to (0080h). Look for a non blank. + ORA A ;or a null. + JZ UNKWN6 + CPI ' ' + JZ UNKWN6 + INX H + JMP UNKWN5 +; +; Do the line move now. It ends in a null byte. +; +UNKWN6 MVI B,0 ;keep a character count. + LXI D,TBUFF+1;data gets put here. +UNKWN7 MOV A,M ;move it now. + STAX D + ORA A + JZ UNKWN8 + INR B + INX H + INX D + JMP UNKWN7 +UNKWN8 MOV A,B ;now store the character count. + STA TBUFF + CALL CRLF ;clean up the screen. + CALL STDDMA ;set standard transfer address. + CALL SETCDRV ;reset current drive. + CALL TBASE ;and execute the program. +; +; Transiant programs return here (or reboot). +; + LXI SP,BATCH ;set stack first off. + CALL MOVECD ;move current drive into place (TDRIVE). + CALL DSKSEL ;and reselect it. + JMP CMMND1 ;back to comand mode. +; +; Get here if some error occured. +; +UNKWN9 CALL RESETDR ;inproper format. + JMP SYNERR +UNKWN0 LXI B,BADLOAD;read error or won't fit. + CALL PLINE + JMP GETBACK +BADLOAD DB 'Bad load',0 +COMFILE DB 'COM' ;command file extension. +; +; Get here to return to command level. We will reset the +; previous active drive and then either return to command +; level directly or print error message and then return. +; +GETBACK CALL RESETDR ;reset previous drive. +GETBACK1:CALL CONVFST ;convert first name in (FCB). + LDA FCB+1 ;if this was just a drive change request, + SUI ' ' ;make sure it was valid. + LXI H,CHGDRV + ORA M + JNZ SYNERR + JMP CMMND1 ;ok, return to command level. +; +; ccp stack area. +; + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +CCPSTACK:EQU $ ;end of ccp stack area. +; +; Batch (or SUBMIT) processing information storage. +; +BATCH DB 0 ;batch mode flag (0=not active). +BATCHFCB:DB 0,'$$$ SUB',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +; +; File control block setup by the CCP. +; +FCB DB 0,' ',0,0,0,0,0,' ',0,0,0,0,0 +RTNCODE DB 0 ;status returned from bdos call. +CDRIVE DB 0 ;currently active drive. +CHGDRV DB 0 ;change in drives flag (0=no change). +NBYTES DW 0 ;byte counter used by TYPE. +; +; Room for expansion? +; + DB 0,0,0,0,0,0,0,0,0,0,0,0,0 +; +; Note that the following six bytes must match those at +; (PATTRN1) or cp/m will HALT. Why? +; +PATTRN2 DB 0,22,0,0,0,0;(* serial number bytes *). +; +;************************************************************** +;* +;* B D O S E N T R Y +;* +;************************************************************** +; +FBASE JMP FBASE1 +; +; Bdos error table. +; +BADSCTR DW ERROR1 ;bad sector on read or write. +BADSLCT DW ERROR2 ;bad disk select. +RODISK DW ERROR3 ;disk is read only. +ROFILE DW ERROR4 ;file is read only. +; +; Entry into bdos. (DE) or (E) are the parameters passed. The +; function number desired is in register (C). +; +FBASE1 XCHG ;save the (DE) parameters. + SHLD PARAMS + XCHG + MOV A,E ;and save register (E) in particular. + STA EPARAM + LXI H,0 + SHLD STATUS ;clear return status. + DAD SP + SHLD USRSTACK;save users stack pointer. + LXI SP,STKAREA;and set our own. + XRA A ;clear auto select storage space. + STA AUTOFLAG + STA AUTO + LXI H,GOBACK;set return address. + PUSH H + MOV A,C ;get function number. + CPI NFUNCTS ;valid function number? + RNC + MOV C,E ;keep single register function here. + LXI H,FUNCTNS;now look thru the function table. + MOV E,A + MVI D,0 ;(DE)=function number. + DAD D + DAD D ;(HL)=(start of table)+2*(function number). + MOV E,M + INX H + MOV D,M ;now (DE)=address for this function. + LHLD PARAMS ;retrieve parameters. + XCHG ;now (DE) has the original parameters. + PCHL ;execute desired function. +; +; BDOS function jump table. +; +NFUNCTS EQU 41 ;number of functions in followin table. +; +FUNCTNS DW WBOOT,GETCON,OUTCON,GETRDR,PUNCH,LIST,DIRCIO,GETIOB + DW SETIOB,PRTSTR,RDBUFF,GETCSTS,GETVER,RSTDSK,SETDSK,OPENFIL + DW CLOSEFIL,GETFST,GETNXT,DELFILE,READSEQ,WRTSEQ,FCREATE + DW RENFILE,GETLOG,GETCRNT,PUTDMA,GETALOC,WRTPRTD,GETROV,SETATTR + DW GETPARM,GETUSER,RDRANDOM,WTRANDOM,FILESIZE,SETRAN,LOGOFF,RTN + DW RTN,WTSPECL +; +; Bdos error message section. +; +ERROR1 LXI H,BADSEC ;bad sector message. + CALL PRTERR ;print it and get a 1 char responce. + CPI CNTRLC ;re-boot request (control-c)? + JZ 0 ;yes. + RET ;no, return to retry i/o function. +; +ERROR2 LXI H,BADSEL ;bad drive selected. + JMP ERROR5 +; +ERROR3 LXI H,DISKRO ;disk is read only. + JMP ERROR5 +; +ERROR4 LXI H,FILERO ;file is read only. +; +ERROR5 CALL PRTERR + JMP 0 ;always reboot on these errors. +; +BDOSERR DB 'Bdos Err On ' +BDOSDRV DB ' : $' +BADSEC DB 'Bad Sector$' +BADSEL DB 'Select$' +FILERO DB 'File ' +DISKRO DB 'R/O$' +; +; Print bdos error message. +; +PRTERR PUSH H ;save second message pointer. + CALL OUTCRLF ;send (cr)(lf). + LDA ACTIVE ;get active drive. + ADI 'A' ;make ascii. + STA BDOSDRV ;and put in message. + LXI B,BDOSERR;and print it. + CALL PRTMESG + POP B ;print second message line now. + CALL PRTMESG +; +; Get an input character. We will check our 1 character +; buffer first. This may be set by the console status routine. +; +GETCHAR LXI H,CHARBUF;check character buffer. + MOV A,M ;anything present already? + MVI M,0 ;...either case clear it. + ORA A + RNZ ;yes, use it. + JMP CONIN ;nope, go get a character responce. +; +; Input and echo a character. +; +GETECHO CALL GETCHAR ;input a character. + CALL CHKCHAR ;carriage control? + RC ;no, a regular control char so don't echo. + PUSH PSW ;ok, save character now. + MOV C,A + CALL OUTCON ;and echo it. + POP PSW ;get character and return. + RET +; +; Check character in (A). Set the zero flag on a carriage +; control character and the carry flag on any other control +; character. +; +CHKCHAR CPI CR ;check for carriage return, line feed, backspace, + RZ ;or a tab. + CPI LF + RZ + CPI TAB + RZ + CPI BS + RZ + CPI ' ' ;other control char? Set carry flag. + RET +; +; Check the console during output. Halt on a control-s, then +; reboot on a control-c. If anything else is ready, clear the +; zero flag and return (the calling routine may want to do +; something). +; +CKCONSOL:LDA CHARBUF ;check buffer. + ORA A ;if anything, just return without checking. + JNZ CKCON2 + CALL CONST ;nothing in buffer. Check console. + ANI 01H ;look at bit 0. + RZ ;return if nothing. + CALL CONIN ;ok, get it. + CPI CNTRLS ;if not control-s, return with zero cleared. + JNZ CKCON1 + CALL CONIN ;halt processing until another char + CPI CNTRLC ;is typed. Control-c? + JZ 0 ;yes, reboot now. + XRA A ;no, just pretend nothing was ever ready. + RET +CKCON1 STA CHARBUF ;save character in buffer for later processing. +CKCON2 MVI A,1 ;set (A) to non zero to mean something is ready. + RET +; +; Output (C) to the screen. If the printer flip-flop flag +; is set, we will send character to printer also. The console +; will be checked in the process. +; +OUTCHAR LDA OUTFLAG ;check output flag. + ORA A ;anything and we won't generate output. + JNZ OUTCHR1 + PUSH B + CALL CKCONSOL;check console (we don't care whats there). + POP B + PUSH B + CALL CONOUT ;output (C) to the screen. + POP B + PUSH B + LDA PRTFLAG ;check printer flip-flop flag. + ORA A + CNZ LIST ;print it also if non-zero. + POP B +OUTCHR1 MOV A,C ;update cursors position. + LXI H,CURPOS + CPI DEL ;rubouts don't do anything here. + RZ + INR M ;bump line pointer. + CPI ' ' ;and return if a normal character. + RNC + DCR M ;restore and check for the start of the line. + MOV A,M + ORA A + RZ ;ingnore control characters at the start of the line. + MOV A,C + CPI BS ;is it a backspace? + JNZ OUTCHR2 + DCR M ;yes, backup pointer. + RET +OUTCHR2 CPI LF ;is it a line feed? + RNZ ;ignore anything else. + MVI M,0 ;reset pointer to start of line. + RET +; +; Output (A) to the screen. If it is a control character +; (other than carriage control), use ^x format. +; +SHOWIT MOV A,C + CALL CHKCHAR ;check character. + JNC OUTCON ;not a control, use normal output. + PUSH PSW + MVI C,'^' ;for a control character, preceed it with '^'. + CALL OUTCHAR + POP PSW + ORI '@' ;and then use the letter equivelant. + MOV C,A +; +; Function to output (C) to the console device and expand tabs +; if necessary. +; +OUTCON MOV A,C + CPI TAB ;is it a tab? + JNZ OUTCHAR ;use regular output. +OUTCON1 MVI C,' ' ;yes it is, use spaces instead. + CALL OUTCHAR + LDA CURPOS ;go until the cursor is at a multiple of 8 + + ANI 07H ;position. + JNZ OUTCON1 + RET +; +; Echo a backspace character. Erase the prevoius character +; on the screen. +; +BACKUP CALL BACKUP1 ;backup the screen 1 place. + MVI C,' ' ;then blank that character. + CALL CONOUT +BACKUP1 MVI C,BS ;then back space once more. + JMP CONOUT +; +; Signal a deleted line. Print a '#' at the end and start +; over. +; +NEWLINE MVI C,'#' + CALL OUTCHAR ;print this. + CALL OUTCRLF ;start new line. +NEWLN1 LDA CURPOS ;move the cursor to the starting position. + LXI H,STARTING + CMP M + RNC ;there yet? + MVI C,' ' + CALL OUTCHAR ;nope, keep going. + JMP NEWLN1 +; +; Output a (cr) (lf) to the console device (screen). +; +OUTCRLF MVI C,CR + CALL OUTCHAR + MVI C,LF + JMP OUTCHAR +; +; Print message pointed to by (BC). It will end with a '$'. +; +PRTMESG LDAX B ;check for terminating character. + CPI '$' + RZ + INX B + PUSH B ;otherwise, bump pointer and print it. + MOV C,A + CALL OUTCON + POP B + JMP PRTMESG +; +; Function to execute a buffered read. +; +RDBUFF LDA CURPOS ;use present location as starting one. + STA STARTING + LHLD PARAMS ;get the maximum buffer space. + MOV C,M + INX H ;point to first available space. + PUSH H ;and save. + MVI B,0 ;keep a character count. +RDBUF1 PUSH B + PUSH H +RDBUF2 CALL GETCHAR ;get the next input character. + ANI 7FH ;strip bit 7. + POP H ;reset registers. + POP B + CPI CR ;en of the line? + JZ RDBUF17 + CPI LF + JZ RDBUF17 + CPI BS ;how about a backspace? + JNZ RDBUF3 + MOV A,B ;yes, but ignore at the beginning of the line. + ORA A + JZ RDBUF1 + DCR B ;ok, update counter. + LDA CURPOS ;if we backspace to the start of the line, + STA OUTFLAG ;treat as a cancel (control-x). + JMP RDBUF10 +RDBUF3 CPI DEL ;user typed a rubout? + JNZ RDBUF4 + MOV A,B ;ignore at the start of the line. + ORA A + JZ RDBUF1 + MOV A,M ;ok, echo the prevoius character. + DCR B ;and reset pointers (counters). + DCX H + JMP RDBUF15 +RDBUF4 CPI CNTRLE ;physical end of line? + JNZ RDBUF5 + PUSH B ;yes, do it. + PUSH H + CALL OUTCRLF + XRA A ;and update starting position. + STA STARTING + JMP RDBUF2 +RDBUF5 CPI CNTRLP ;control-p? + JNZ RDBUF6 + PUSH H ;yes, flip the print flag filp-flop byte. + LXI H,PRTFLAG + MVI A,1 ;PRTFLAG=1-PRTFLAG + SUB M + MOV M,A + POP H + JMP RDBUF1 +RDBUF6 CPI CNTRLX ;control-x (cancel)? + JNZ RDBUF8 + POP H +RDBUF7 LDA STARTING;yes, backup the cursor to here. + LXI H,CURPOS + CMP M + JNC RDBUFF ;done yet? + DCR M ;no, decrement pointer and output back up one space. + CALL BACKUP + JMP RDBUF7 +RDBUF8 CPI CNTRLU ;cntrol-u (cancel line)? + JNZ RDBUF9 + CALL NEWLINE ;start a new line. + POP H + JMP RDBUFF +RDBUF9 CPI CNTRLR ;control-r? + JNZ RDBUF14 +RDBUF10 PUSH B ;yes, start a new line and retype the old one. + CALL NEWLINE + POP B + POP H + PUSH H + PUSH B +RDBUF11 MOV A,B ;done whole line yet? + ORA A + JZ RDBUF12 + INX H ;nope, get next character. + MOV C,M + DCR B ;count it. + PUSH B + PUSH H + CALL SHOWIT ;and display it. + POP H + POP B + JMP RDBUF11 +RDBUF12 PUSH H ;done with line. If we were displaying + LDA OUTFLAG ;then update cursor position. + ORA A + JZ RDBUF2 + LXI H,CURPOS;because this line is shorter, we must + SUB M ;back up the cursor (not the screen however) + STA OUTFLAG ;some number of positions. +RDBUF13 CALL BACKUP ;note that as long as (OUTFLAG) is non + LXI H,OUTFLAG;zero, the screen will not be changed. + DCR M + JNZ RDBUF13 + JMP RDBUF2 ;now just get the next character. +; +; Just a normal character, put this in our buffer and echo. +; +RDBUF14 INX H + MOV M,A ;store character. + INR B ;and count it. +RDBUF15 PUSH B + PUSH H + MOV C,A ;echo it now. + CALL SHOWIT + POP H + POP B + MOV A,M ;was it an abort request? + CPI CNTRLC ;control-c abort? + MOV A,B + JNZ RDBUF16 + CPI 1 ;only if at start of line. + JZ 0 +RDBUF16 CMP C ;nope, have we filled the buffer? + JC RDBUF1 +RDBUF17 POP H ;yes end the line and return. + MOV M,B + MVI C,CR + JMP OUTCHAR ;output (cr) and return. +; +; Function to get a character from the console device. +; +GETCON CALL GETECHO ;get and echo. + JMP SETSTAT ;save status and return. +; +; Function to get a character from the tape reader device. +; +GETRDR CALL READER ;get a character from reader, set status and return. + JMP SETSTAT +; +; Function to perform direct console i/o. If (C) contains (FF) +; then this is an input request. If (C) contains (FE) then +; this is a status request. Otherwise we are to output (C). +; +DIRCIO MOV A,C ;test for (FF). + INR A + JZ DIRC1 + INR A ;test for (FE). + JZ CONST + JMP CONOUT ;just output (C). +DIRC1 CALL CONST ;this is an input request. + ORA A + JZ GOBACK1 ;not ready? Just return (directly). + CALL CONIN ;yes, get character. + JMP SETSTAT ;set status and return. +; +; Function to return the i/o byte. +; +GETIOB LDA IOBYTE + JMP SETSTAT +; +; Function to set the i/o byte. +; +SETIOB LXI H,IOBYTE + MOV M,C + RET +; +; Function to print the character string pointed to by (DE) +; on the console device. The string ends with a '$'. +; +PRTSTR XCHG + MOV C,L + MOV B,H ;now (BC) points to it. + JMP PRTMESG +; +; Function to interigate the console device. +; +GETCSTS CALL CKCONSOL +; +; Get here to set the status and return to the cleanup +; section. Then back to the user. +; +SETSTAT STA STATUS +RTN RET +; +; Set the status to 1 (read or write error code). +; +IOERR1 MVI A,1 + JMP SETSTAT +; +OUTFLAG DB 0 ;output flag (non zero means no output). +STARTING:DB 2 ;starting position for cursor. +CURPOS DB 0 ;cursor position (0=start of line). +PRTFLAG DB 0 ;printer flag (control-p toggle). List if non zero. +CHARBUF DB 0 ;single input character buffer. +; +; Stack area for BDOS calls. +; +USRSTACK:DW 0 ;save users stack pointer here. +; + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +STKAREA EQU $ ;end of stack area. +; +USERNO DB 0 ;current user number. +ACTIVE DB 0 ;currently active drive. +PARAMS DW 0 ;save (DE) parameters here on entry. +STATUS DW 0 ;status returned from bdos function. +; +; Select error occured, jump to error routine. +; +SLCTERR LXI H,BADSLCT +; +; Jump to (HL) indirectly. +; +JUMPHL MOV E,M + INX H + MOV D,M ;now (DE) contain the desired address. + XCHG + PCHL +; +; Block move. (DE) to (HL), (C) bytes total. +; +DE2HL INR C ;is count down to zero? +DE2HL1 DCR C + RZ ;yes, we are done. + LDAX D ;no, move one more byte. + MOV M,A + INX D + INX H + JMP DE2HL1 ;and repeat. +; +; Select the desired drive. +; +SELECT LDA ACTIVE ;get active disk. + MOV C,A + CALL SELDSK ;select it. + MOV A,H ;valid drive? + ORA L ;valid drive? + RZ ;return if not. +; +; Here, the BIOS returned the address of the parameter block +; in (HL). We will extract the necessary pointers and save them. +; + MOV E,M ;yes, get address of translation table into (DE). + INX H + MOV D,M + INX H + SHLD SCRATCH1 ;save pointers to scratch areas. + INX H + INX H + SHLD SCRATCH2 ;ditto. + INX H + INX H + SHLD SCRATCH3 ;ditto. + INX H + INX H + XCHG ;now save the translation table address. + SHLD XLATE + LXI H,DIRBUF ;put the next 8 bytes here. + MVI C,8 ;they consist of the directory buffer + CALL DE2HL ;pointer, parameter block pointer, + LHLD DISKPB ;check and allocation vectors. + XCHG + LXI H,SECTORS ;move parameter block into our ram. + MVI C,15 ;it is 15 bytes long. + CALL DE2HL + LHLD DSKSIZE ;check disk size. + MOV A,H ;more than 256 blocks on this? + LXI H,BIGDISK + MVI M,0FFH ;set to samll. + ORA A + JZ SELECT1 + MVI M,0 ;wrong, set to large. +SELECT1 MVI A,0FFH ;clear the zero flag. + ORA A + RET +; +; Routine to home the disk track head and clear pointers. +; +HOMEDRV CALL HOME ;home the head. + XRA A + LHLD SCRATCH2;set our track pointer also. + MOV M,A + INX H + MOV M,A + LHLD SCRATCH3;and our sector pointer. + MOV M,A + INX H + MOV M,A + RET +; +; Do the actual disk read and check the error return status. +; +DOREAD CALL READ + JMP IORET +; +; Do the actual disk write and handle any bios error. +; +DOWRITE CALL WRITE +IORET ORA A + RZ ;return unless an error occured. + LXI H,BADSCTR;bad read/write on this sector. + JMP JUMPHL +; +; Routine to select the track and sector that the desired +; block number falls in. +; +TRKSEC LHLD FILEPOS ;get position of last accessed file + MVI C,2 ;in directory and compute sector #. + CALL SHIFTR ;sector #=file-position/4. + SHLD BLKNMBR ;save this as the block number of interest. + SHLD CKSUMTBL;what's it doing here too? +; +; if the sector number has already been set (BLKNMBR), enter +; at this point. +; +TRKSEC1 LXI H,BLKNMBR + MOV C,M ;move sector number into (BC). + INX H + MOV B,M + LHLD SCRATCH3;get current sector number and + MOV E,M ;move this into (DE). + INX H + MOV D,M + LHLD SCRATCH2;get current track number. + MOV A,M ;and this into (HL). + INX H + MOV H,M + MOV L,A +TRKSEC2 MOV A,C ;is desired sector before current one? + SUB E + MOV A,B + SBB D + JNC TRKSEC3 + PUSH H ;yes, decrement sectors by one track. + LHLD SECTORS ;get sectors per track. + MOV A,E + SUB L + MOV E,A + MOV A,D + SBB H + MOV D,A ;now we have backed up one full track. + POP H + DCX H ;adjust track counter. + JMP TRKSEC2 +TRKSEC3 PUSH H ;desired sector is after current one. + LHLD SECTORS ;get sectors per track. + DAD D ;bump sector pointer to next track. + JC TRKSEC4 + MOV A,C ;is desired sector now before current one? + SUB L + MOV A,B + SBB H + JC TRKSEC4 + XCHG ;not yes, increment track counter + POP H ;and continue until it is. + INX H + JMP TRKSEC3 +; +; here we have determined the track number that contains the +; desired sector. +; +TRKSEC4 POP H ;get track number (HL). + PUSH B + PUSH D + PUSH H + XCHG + LHLD OFFSET ;adjust for first track offset. + DAD D + MOV B,H + MOV C,L + CALL SETTRK ;select this track. + POP D ;reset current track pointer. + LHLD SCRATCH2 + MOV M,E + INX H + MOV M,D + POP D + LHLD SCRATCH3;reset the first sector on this track. + MOV M,E + INX H + MOV M,D + POP B + MOV A,C ;now subtract the desired one. + SUB E ;to make it relative (1-# sectors/track). + MOV C,A + MOV A,B + SBB D + MOV B,A + LHLD XLATE ;translate this sector according to this table. + XCHG + CALL SECTRN ;let the bios translate it. + MOV C,L + MOV B,H + JMP SETSEC ;and select it. +; +; Compute block number from record number (SAVNREC) and +; extent number (SAVEXT). +; +GETBLOCK:LXI H,BLKSHFT;get logical to physical conversion. + MOV C,M ;note that this is base 2 log of ratio. + LDA SAVNREC ;get record number. +GETBLK1 ORA A ;compute (A)=(A)/2^BLKSHFT. + RAR + DCR C + JNZ GETBLK1 + MOV B,A ;save result in (B). + MVI A,8 + SUB M + MOV C,A ;compute (C)=8-BLKSHFT. + LDA SAVEXT +GETBLK2 DCR C ;compute (A)=SAVEXT*2^(8-BLKSHFT). + JZ GETBLK3 + ORA A + RAL + JMP GETBLK2 +GETBLK3 ADD B + RET +; +; Routine to extract the (BC) block byte from the fcb pointed +; to by (PARAMS). If this is a big-disk, then these are 16 bit +; block numbers, else they are 8 bit numbers. +; Number is returned in (HL). +; +EXTBLK LHLD PARAMS ;get fcb address. + LXI D,16 ;block numbers start 16 bytes into fcb. + DAD D + DAD B + LDA BIGDISK ;are we using a big-disk? + ORA A + JZ EXTBLK1 + MOV L,M ;no, extract an 8 bit number from the fcb. + MVI H,0 + RET +EXTBLK1 DAD B ;yes, extract a 16 bit number. + MOV E,M + INX H + MOV D,M + XCHG ;return in (HL). + RET +; +; Compute block number. +; +COMBLK CALL GETBLOCK + MOV C,A + MVI B,0 + CALL EXTBLK + SHLD BLKNMBR + RET +; +; Check for a zero block number (unused). +; +CHKBLK LHLD BLKNMBR + MOV A,L ;is it zero? + ORA H + RET +; +; Adjust physical block (BLKNMBR) and convert to logical +; sector (LOGSECT). This is the starting sector of this block. +; The actual sector of interest is then added to this and the +; resulting sector number is stored back in (BLKNMBR). This +; will still have to be adjusted for the track number. +; +LOGICAL LDA BLKSHFT ;get log2(physical/logical sectors). + LHLD BLKNMBR ;get physical sector desired. +LOGICL1 DAD H ;compute logical sector number. + DCR A ;note logical sectors are 128 bytes long. + JNZ LOGICL1 + SHLD LOGSECT ;save logical sector. + LDA BLKMASK ;get block mask. + MOV C,A + LDA SAVNREC ;get next sector to access. + ANA C ;extract the relative position within physical block. + ORA L ;and add it too logical sector. + MOV L,A + SHLD BLKNMBR ;and store. + RET +; +; Set (HL) to point to extent byte in fcb. +; +SETEXT LHLD PARAMS + LXI D,12 ;it is the twelth byte. + DAD D + RET +; +; Set (HL) to point to record count byte in fcb and (DE) to +; next record number byte. +; +SETHLDE LHLD PARAMS + LXI D,15 ;record count byte (#15). + DAD D + XCHG + LXI H,17 ;next record number (#32). + DAD D + RET +; +; Save current file data from fcb. +; +STRDATA CALL SETHLDE + MOV A,M ;get and store record count byte. + STA SAVNREC + XCHG + MOV A,M ;get and store next record number byte. + STA SAVNXT + CALL SETEXT ;point to extent byte. + LDA EXTMASK ;get extent mask. + ANA M + STA SAVEXT ;and save extent here. + RET +; +; Set the next record to access. If (MODE) is set to 2, then +; the last record byte (SAVNREC) has the correct number to access. +; For sequential access, (MODE) will be equal to 1. +; +SETNREC CALL SETHLDE + LDA MODE ;get sequential flag (=1). + CPI 2 ;a 2 indicates that no adder is needed. + JNZ STNREC1 + XRA A ;clear adder (random access?). +STNREC1 MOV C,A + LDA SAVNREC ;get last record number. + ADD C ;increment record count. + MOV M,A ;and set fcb's next record byte. + XCHG + LDA SAVNXT ;get next record byte from storage. + MOV M,A ;and put this into fcb as number of records used. + RET +; +; Shift (HL) right (C) bits. +; +SHIFTR INR C +SHIFTR1 DCR C + RZ + MOV A,H + ORA A + RAR + MOV H,A + MOV A,L + RAR + MOV L,A + JMP SHIFTR1 +; +; Compute the check-sum for the directory buffer. Return +; integer sum in (A). +; +CHECKSUM:MVI C,128 ;length of buffer. + LHLD DIRBUF ;get its location. + XRA A ;clear summation byte. +CHKSUM1 ADD M ;and compute sum ignoring carries. + INX H + DCR C + JNZ CHKSUM1 + RET +; +; Shift (HL) left (C) bits. +; +SHIFTL INR C +SHIFTL1 DCR C + RZ + DAD H ;shift left 1 bit. + JMP SHIFTL1 +; +; Routine to set a bit in a 16 bit value contained in (BC). +; The bit set depends on the current drive selection. +; +SETBIT PUSH B ;save 16 bit word. + LDA ACTIVE ;get active drive. + MOV C,A + LXI H,1 + CALL SHIFTL ;shift bit 0 into place. + POP B ;now 'or' this with the original word. + MOV A,C + ORA L + MOV L,A ;low byte done, do high byte. + MOV A,B + ORA H + MOV H,A + RET +; +; Extract the write protect status bit for the current drive. +; The result is returned in (A), bit 0. +; +GETWPRT LHLD WRTPRT ;get status bytes. + LDA ACTIVE ;which drive is current? + MOV C,A + CALL SHIFTR ;shift status such that bit 0 is the + MOV A,L ;one of interest for this drive. + ANI 01H ;and isolate it. + RET +; +; Function to write protect the current disk. +; +WRTPRTD LXI H,WRTPRT;point to status word. + MOV C,M ;set (BC) equal to the status. + INX H + MOV B,M + CALL SETBIT ;and set this bit according to current drive. + SHLD WRTPRT ;then save. + LHLD DIRSIZE ;now save directory size limit. + INX H ;remember the last one. + XCHG + LHLD SCRATCH1;and store it here. + MOV M,E ;put low byte. + INX H + MOV M,D ;then high byte. + RET +; +; Check for a read only file. +; +CHKROFL CALL FCB2HL ;set (HL) to file entry in directory buffer. +CKROF1 LXI D,9 ;look at bit 7 of the ninth byte. + DAD D + MOV A,M + RAL + RNC ;return if ok. + LXI H,ROFILE;else, print error message and terminate. + JMP JUMPHL +; +; Check the write protect status of the active disk. +; +CHKWPRT CALL GETWPRT + RZ ;return if ok. + LXI H,RODISK;else print message and terminate. + JMP JUMPHL +; +; Routine to set (HL) pointing to the proper entry in the +; directory buffer. +; +FCB2HL LHLD DIRBUF ;get address of buffer. + LDA FCBPOS ;relative position of file. +; +; Routine to add (A) to (HL). +; +ADDA2HL ADD L + MOV L,A + RNC + INR H ;take care of any carry. + RET +; +; Routine to get the 's2' byte from the fcb supplied in +; the initial parameter specification. +; +GETS2 LHLD PARAMS ;get address of fcb. + LXI D,14 ;relative position of 's2'. + DAD D + MOV A,M ;extract this byte. + RET +; +; Clear the 's2' byte in the fcb. +; +CLEARS2 CALL GETS2 ;this sets (HL) pointing to it. + MVI M,0 ;now clear it. + RET +; +; Set bit 7 in the 's2' byte of the fcb. +; +SETS2B7 CALL GETS2 ;get the byte. + ORI 80H ;and set bit 7. + MOV M,A ;then store. + RET +; +; Compare (FILEPOS) with (SCRATCH1) and set flags based on +; the difference. This checks to see if there are more file +; names in the directory. We are at (FILEPOS) and there are +; (SCRATCH1) of them to check. +; +MOREFLS LHLD FILEPOS ;we are here. + XCHG + LHLD SCRATCH1;and don't go past here. + MOV A,E ;compute difference but don't keep. + SUB M + INX H + MOV A,D + SBB M ;set carry if no more names. + RET +; +; Call this routine to prevent (SCRATCH1) from being greater +; than (FILEPOS). +; +CHKNMBR CALL MOREFLS ;SCRATCH1 too big? + RC + INX D ;yes, reset it to (FILEPOS). + MOV M,D + DCX H + MOV M,E + RET +; +; Compute (HL)=(DE)-(HL) +; +SUBHL MOV A,E ;compute difference. + SUB L + MOV L,A ;store low byte. + MOV A,D + SBB H + MOV H,A ;and then high byte. + RET +; +; Set the directory checksum byte. +; +SETDIR MVI C,0FFH +; +; Routine to set or compare the directory checksum byte. If +; (C)=0ffh, then this will set the checksum byte. Else the byte +; will be checked. If the check fails (the disk has been changed), +; then this disk will be write protected. +; +CHECKDIR:LHLD CKSUMTBL + XCHG + LHLD ALLOC1 + CALL SUBHL + RNC ;ok if (CKSUMTBL) > (ALLOC1), so return. + PUSH B + CALL CHECKSUM;else compute checksum. + LHLD CHKVECT ;get address of checksum table. + XCHG + LHLD CKSUMTBL + DAD D ;set (HL) to point to byte for this drive. + POP B + INR C ;set or check ? + JZ CHKDIR1 + CMP M ;check them. + RZ ;return if they are the same. + CALL MOREFLS ;not the same, do we care? + RNC + CALL WRTPRTD ;yes, mark this as write protected. + RET +CHKDIR1 MOV M,A ;just set the byte. + RET +; +; Do a write to the directory of the current disk. +; +DIRWRITE:CALL SETDIR ;set checksum byte. + CALL DIRDMA ;set directory dma address. + MVI C,1 ;tell the bios to actually write. + CALL DOWRITE ;then do the write. + JMP DEFDMA +; +; Read from the directory. +; +DIRREAD CALL DIRDMA ;set the directory dma address. + CALL DOREAD ;and read it. +; +; Routine to set the dma address to the users choice. +; +DEFDMA LXI H,USERDMA;reset the default dma address and return. + JMP DIRDMA1 +; +; Routine to set the dma address for directory work. +; +DIRDMA LXI H,DIRBUF +; +; Set the dma address. On entry, (HL) points to +; word containing the desired dma address. +; +DIRDMA1 MOV C,M + INX H + MOV B,M ;setup (BC) and go to the bios to set it. + JMP SETDMA +; +; Move the directory buffer into user's dma space. +; +MOVEDIR LHLD DIRBUF ;buffer is located here, and + XCHG + LHLD USERDMA; put it here. + MVI C,128 ;this is its length. + JMP DE2HL ;move it now and return. +; +; Check (FILEPOS) and set the zero flag if it equals 0ffffh. +; +CKFILPOS:LXI H,FILEPOS + MOV A,M + INX H + CMP M ;are both bytes the same? + RNZ + INR A ;yes, but are they each 0ffh? + RET +; +; Set location (FILEPOS) to 0ffffh. +; +STFILPOS:LXI H,0FFFFH + SHLD FILEPOS + RET +; +; Move on to the next file position within the current +; directory buffer. If no more exist, set pointer to 0ffffh +; and the calling routine will check for this. Enter with (C) +; equal to 0ffh to cause the checksum byte to be set, else we +; will check this disk and set write protect if checksums are +; not the same (applies only if another directory sector must +; be read). +; +NXENTRY LHLD DIRSIZE ;get directory entry size limit. + XCHG + LHLD FILEPOS ;get current count. + INX H ;go on to the next one. + SHLD FILEPOS + CALL SUBHL ;(HL)=(DIRSIZE)-(FILEPOS) + JNC NXENT1 ;is there more room left? + JMP STFILPOS;no. Set this flag and return. +NXENT1 LDA FILEPOS ;get file position within directory. + ANI 03H ;only look within this sector (only 4 entries fit). + MVI B,5 ;convert to relative position (32 bytes each). +NXENT2 ADD A ;note that this is not efficient code. + DCR B ;5 'ADD A's would be better. + JNZ NXENT2 + STA FCBPOS ;save it as position of fcb. + ORA A + RNZ ;return if we are within buffer. + PUSH B + CALL TRKSEC ;we need the next directory sector. + CALL DIRREAD + POP B + JMP CHECKDIR +; +; Routine to to get a bit from the disk space allocation +; map. It is returned in (A), bit position 0. On entry to here, +; set (BC) to the block number on the disk to check. +; On return, (D) will contain the original bit position for +; this block number and (HL) will point to the address for it. +; +CKBITMAP:MOV A,C ;determine bit number of interest. + ANI 07H ;compute (D)=(E)=(C and 7)+1. + INR A + MOV E,A ;save particular bit number. + MOV D,A +; +; compute (BC)=(BC)/8. +; + MOV A,C + RRC ;now shift right 3 bits. + RRC + RRC + ANI 1FH ;and clear bits 7,6,5. + MOV C,A + MOV A,B + ADD A ;now shift (B) into bits 7,6,5. + ADD A + ADD A + ADD A + ADD A + ORA C ;and add in (C). + MOV C,A ;ok, (C) ha been completed. + MOV A,B ;is there a better way of doing this? + RRC + RRC + RRC + ANI 1FH + MOV B,A ;and now (B) is completed. +; +; use this as an offset into the disk space allocation +; table. +; + LHLD ALOCVECT + DAD B + MOV A,M ;now get correct byte. +CKBMAP1 RLC ;get correct bit into position 0. + DCR E + JNZ CKBMAP1 + RET +; +; Set or clear the bit map such that block number (BC) will be marked +; as used. On entry, if (E)=0 then this bit will be cleared, if it equals +; 1 then it will be set (don't use anyother values). +; +STBITMAP:PUSH D + CALL CKBITMAP;get the byte of interest. + ANI 0FEH ;clear the affected bit. + POP B + ORA C ;and now set it acording to (C). +; +; entry to restore the original bit position and then store +; in table. (A) contains the value, (D) contains the bit +; position (1-8), and (HL) points to the address within the +; space allocation table for this byte. +; +STBMAP1 RRC ;restore original bit position. + DCR D + JNZ STBMAP1 + MOV M,A ;and stor byte in table. + RET +; +; Set/clear space used bits in allocation map for this file. +; On entry, (C)=1 to set the map and (C)=0 to clear it. +; +SETFILE CALL FCB2HL ;get address of fcb + LXI D,16 + DAD D ;get to block number bytes. + PUSH B + MVI C,17 ;check all 17 bytes (max) of table. +SETFL1 POP D + DCR C ;done all bytes yet? + RZ + PUSH D + LDA BIGDISK ;check disk size for 16 bit block numbers. + ORA A + JZ SETFL2 + PUSH B ;only 8 bit numbers. set (BC) to this one. + PUSH H + MOV C,M ;get low byte from table, always + MVI B,0 ;set high byte to zero. + JMP SETFL3 +SETFL2 DCR C ;for 16 bit block numbers, adjust counter. + PUSH B + MOV C,M ;now get both the low and high bytes. + INX H + MOV B,M + PUSH H +SETFL3 MOV A,C ;block used? + ORA B + JZ SETFL4 + LHLD DSKSIZE ;is this block number within the + MOV A,L ;space on the disk? + SUB C + MOV A,H + SBB B + CNC STBITMAP;yes, set the proper bit. +SETFL4 POP H ;point to next block number in fcb. + INX H + POP B + JMP SETFL1 +; +; Construct the space used allocation bit map for the active +; drive. If a file name starts with '$' and it is under the +; current user number, then (STATUS) is set to minus 1. Otherwise +; it is not set at all. +; +BITMAP LHLD DSKSIZE ;compute size of allocation table. + MVI C,3 + CALL SHIFTR ;(HL)=(HL)/8. + INX H ;at lease 1 byte. + MOV B,H + MOV C,L ;set (BC) to the allocation table length. +; +; Initialize the bitmap for this drive. Right now, the first +; two bytes are specified by the disk parameter block. However +; a patch could be entered here if it were necessary to setup +; this table in a special mannor. For example, the bios could +; determine locations of 'bad blocks' and set them as already +; 'used' in the map. +; + LHLD ALOCVECT;now zero out the table now. +BITMAP1 MVI M,0 + INX H + DCX B + MOV A,B + ORA C + JNZ BITMAP1 + LHLD ALLOC0 ;get initial space used by directory. + XCHG + LHLD ALOCVECT;and put this into map. + MOV M,E + INX H + MOV M,D +; +; End of initialization portion. +; + CALL HOMEDRV ;now home the drive. + LHLD SCRATCH1 + MVI M,3 ;force next directory request to read + INX H ;in a sector. + MVI M,0 + CALL STFILPOS;clear initial file position also. +BITMAP2 MVI C,0FFH ;read next file name in directory + CALL NXENTRY ;and set checksum byte. + CALL CKFILPOS;is there another file? + RZ + CALL FCB2HL ;yes, get its address. + MVI A,0E5H + CMP M ;empty file entry? + JZ BITMAP2 + LDA USERNO ;no, correct user number? + CMP M + JNZ BITMAP3 + INX H + MOV A,M ;yes, does name start with a '$'? + SUI '$' + JNZ BITMAP3 + DCR A ;yes, set atatus to minus one. + STA STATUS +BITMAP3 MVI C,1 ;now set this file's space as used in bit map. + CALL SETFILE + CALL CHKNMBR ;keep (SCRATCH1) in bounds. + JMP BITMAP2 +; +; Set the status (STATUS) and return. +; +STSTATUS:LDA FNDSTAT + JMP SETSTAT +; +; Check extents in (A) and (C). Set the zero flag if they +; are the same. The number of 16k chunks of disk space that +; the directory extent covers is expressad is (EXTMASK+1). +; No registers are modified. +; +SAMEXT PUSH B + PUSH PSW + LDA EXTMASK ;get extent mask and use it to + CMA ;to compare both extent numbers. + MOV B,A ;save resulting mask here. + MOV A,C ;mask first extent and save in (C). + ANA B + MOV C,A + POP PSW ;now mask second extent and compare + ANA B ;with the first one. + SUB C + ANI 1FH ;(* only check buts 0-4 *) + POP B ;the zero flag is set if they are the same. + RET ;restore (BC) and return. +; +; Search for the first occurence of a file name. On entry, +; register (C) should contain the number of bytes of the fcb +; that must match. +; +FINDFST MVI A,0FFH + STA FNDSTAT + LXI H,COUNTER;save character count. + MOV M,C + LHLD PARAMS ;get filename to match. + SHLD SAVEFCB ;and save. + CALL STFILPOS;clear initial file position (set to 0ffffh). + CALL HOMEDRV ;home the drive. +; +; Entry to locate the next occurence of a filename within the +; directory. The disk is not expected to have been changed. If +; it was, then it will be write protected. +; +FINDNXT MVI C,0 ;write protect the disk if changed. + CALL NXENTRY ;get next filename entry in directory. + CALL CKFILPOS;is file position = 0ffffh? + JZ FNDNXT6 ;yes, exit now then. + LHLD SAVEFCB ;set (DE) pointing to filename to match. + XCHG + LDAX D + CPI 0E5H ;empty directory entry? + JZ FNDNXT1 ;(* are we trying to reserect erased entries? *) + PUSH D + CALL MOREFLS ;more files in directory? + POP D + JNC FNDNXT6 ;no more. Exit now. +FNDNXT1 CALL FCB2HL ;get address of this fcb in directory. + LDA COUNTER ;get number of bytes (characters) to check. + MOV C,A + MVI B,0 ;initialize byte position counter. +FNDNXT2 MOV A,C ;are we done with the compare? + ORA A + JZ FNDNXT5 + LDAX D ;no, check next byte. + CPI '?' ;don't care about this character? + JZ FNDNXT4 + MOV A,B ;get bytes position in fcb. + CPI 13 ;don't care about the thirteenth byte either. + JZ FNDNXT4 + CPI 12 ;extent byte? + LDAX D + JZ FNDNXT3 + SUB M ;otherwise compare characters. + ANI 7FH + JNZ FINDNXT ;not the same, check next entry. + JMP FNDNXT4 ;so far so good, keep checking. +FNDNXT3 PUSH B ;check the extent byte here. + MOV C,M + CALL SAMEXT + POP B + JNZ FINDNXT ;not the same, look some more. +; +; So far the names compare. Bump pointers to the next byte +; and continue until all (C) characters have been checked. +; +FNDNXT4 INX D ;bump pointers. + INX H + INR B + DCR C ;adjust character counter. + JMP FNDNXT2 +FNDNXT5 LDA FILEPOS ;return the position of this entry. + ANI 03H + STA STATUS + LXI H,FNDSTAT + MOV A,M + RAL + RNC + XRA A + MOV M,A + RET +; +; Filename was not found. Set appropriate status. +; +FNDNXT6 CALL STFILPOS;set (FILEPOS) to 0ffffh. + MVI A,0FFH ;say not located. + JMP SETSTAT +; +; Erase files from the directory. Only the first byte of the +; fcb will be affected. It is set to (E5). +; +ERAFILE CALL CHKWPRT ;is disk write protected? + MVI C,12 ;only compare file names. + CALL FINDFST ;get first file name. +ERAFIL1 CALL CKFILPOS;any found? + RZ ;nope, we must be done. + CALL CHKROFL ;is file read only? + CALL FCB2HL ;nope, get address of fcb and + MVI M,0E5H ;set first byte to 'empty'. + MVI C,0 ;clear the space from the bit map. + CALL SETFILE + CALL DIRWRITE;now write the directory sector back out. + CALL FINDNXT ;find the next file name. + JMP ERAFIL1 ;and repeat process. +; +; Look through the space allocation map (bit map) for the +; next available block. Start searching at block number (BC-1). +; The search procedure is to look for an empty block that is +; before the starting block. If not empty, look at a later +; block number. In this way, we return the closest empty block +; on either side of the 'target' block number. This will speed +; access on random devices. For serial devices, this should be +; changed to look in the forward direction first and then start +; at the front and search some more. +; +; On return, (DE)= block number that is empty and (HL) =0 +; if no empry block was found. +; +FNDSPACE:MOV D,B ;set (DE) as the block that is checked. + MOV E,C +; +; Look before target block. Registers (BC) are used as the lower +; pointer and (DE) as the upper pointer. +; +FNDSPA1 MOV A,C ;is block 0 specified? + ORA B + JZ FNDSPA2 + DCX B ;nope, check previous block. + PUSH D + PUSH B + CALL CKBITMAP + RAR ;is this block empty? + JNC FNDSPA3 ;yes. use this. +; +; Note that the above logic gets the first block that it finds +; that is empty. Thus a file could be written 'backward' making +; it very slow to access. This could be changed to look for the +; first empty block and then continue until the start of this +; empty space is located and then used that starting block. +; This should help speed up access to some files especially on +; a well used disk with lots of fairly small 'holes'. +; + POP B ;nope, check some more. + POP D +; +; Now look after target block. +; +FNDSPA2 LHLD DSKSIZE ;is block (DE) within disk limits? + MOV A,E + SUB L + MOV A,D + SBB H + JNC FNDSPA4 + INX D ;yes, move on to next one. + PUSH B + PUSH D + MOV B,D + MOV C,E + CALL CKBITMAP;check it. + RAR ;empty? + JNC FNDSPA3 + POP D ;nope, continue searching. + POP B + JMP FNDSPA1 +; +; Empty block found. Set it as used and return with (HL) +; pointing to it (true?). +; +FNDSPA3 RAL ;reset byte. + INR A ;and set bit 0. + CALL STBMAP1 ;update bit map. + POP H ;set return registers. + POP D + RET +; +; Free block was not found. If (BC) is not zero, then we have +; not checked all of the disk space. +; +FNDSPA4 MOV A,C + ORA B + JNZ FNDSPA1 + LXI H,0 ;set 'not found' status. + RET +; +; Move a complete fcb entry into the directory and write it. +; +FCBSET MVI C,0 + MVI E,32 ;length of each entry. +; +; Move (E) bytes from the fcb pointed to by (PARAMS) into +; fcb in directory starting at relative byte (C). This updated +; directory buffer is then written to the disk. +; +UPDATE PUSH D + MVI B,0 ;set (BC) to relative byte position. + LHLD PARAMS ;get address of fcb. + DAD B ;compute starting byte. + XCHG + CALL FCB2HL ;get address of fcb to update in directory. + POP B ;set (C) to number of bytes to change. + CALL DE2HL +UPDATE1 CALL TRKSEC ;determine the track and sector affected. + JMP DIRWRITE ;then write this sector out. +; +; Routine to change the name of all files on the disk with a +; specified name. The fcb contains the current name as the +; first 12 characters and the new name 16 bytes into the fcb. +; +CHGNAMES:CALL CHKWPRT ;check for a write protected disk. + MVI C,12 ;match first 12 bytes of fcb only. + CALL FINDFST ;get first name. + LHLD PARAMS ;get address of fcb. + MOV A,M ;get user number. + LXI D,16 ;move over to desired name. + DAD D + MOV M,A ;keep same user number. +CHGNAM1 CALL CKFILPOS;any matching file found? + RZ ;no, we must be done. + CALL CHKROFL ;check for read only file. + MVI C,16 ;start 16 bytes into fcb. + MVI E,12 ;and update the first 12 bytes of directory. + CALL UPDATE + CALL FINDNXT ;get te next file name. + JMP CHGNAM1 ;and continue. +; +; Update a files attributes. The procedure is to search for +; every file with the same name as shown in fcb (ignoring bit 7) +; and then to update it (which includes bit 7). No other changes +; are made. +; +SAVEATTR:MVI C,12 ;match first 12 bytes. + CALL FINDFST ;look for first filename. +SAVATR1 CALL CKFILPOS;was one found? + RZ ;nope, we must be done. + MVI C,0 ;yes, update the first 12 bytes now. + MVI E,12 + CALL UPDATE ;update filename and write directory. + CALL FINDNXT ;and get the next file. + JMP SAVATR1 ;then continue until done. +; +; Open a file (name specified in fcb). +; +OPENIT MVI C,15 ;compare the first 15 bytes. + CALL FINDFST ;get the first one in directory. + CALL CKFILPOS;any at all? + RZ +OPENIT1 CALL SETEXT ;point to extent byte within users fcb. + MOV A,M ;and get it. + PUSH PSW ;save it and address. + PUSH H + CALL FCB2HL ;point to fcb in directory. + XCHG + LHLD PARAMS ;this is the users copy. + MVI C,32 ;move it into users space. + PUSH D + CALL DE2HL + CALL SETS2B7 ;set bit 7 in 's2' byte (unmodified). + POP D ;now get the extent byte from this fcb. + LXI H,12 + DAD D + MOV C,M ;into (C). + LXI H,15 ;now get the record count byte into (B). + DAD D + MOV B,M + POP H ;keep the same extent as the user had originally. + POP PSW + MOV M,A + MOV A,C ;is it the same as in the directory fcb? + CMP M + MOV A,B ;if yes, then use the same record count. + JZ OPENIT2 + MVI A,0 ;if the user specified an extent greater than + JC OPENIT2 ;the one in the directory, then set record count to 0. + MVI A,128 ;otherwise set to maximum. +OPENIT2 LHLD PARAMS ;set record count in users fcb to (A). + LXI D,15 + DAD D ;compute relative position. + MOV M,A ;and set the record count. + RET +; +; Move two bytes from (DE) to (HL) if (and only if) (HL) +; point to a zero value (16 bit). +; Return with zero flag set it (DE) was moved. Registers (DE) +; and (HL) are not changed. However (A) is. +; +MOVEWORD:MOV A,M ;check for a zero word. + INX H + ORA M ;both bytes zero? + DCX H + RNZ ;nope, just return. + LDAX D ;yes, move two bytes from (DE) into + MOV M,A ;this zero space. + INX D + INX H + LDAX D + MOV M,A + DCX D ;don't disturb these registers. + DCX H + RET +; +; Get here to close a file specified by (fcb). +; +CLOSEIT XRA A ;clear status and file position bytes. + STA STATUS + STA FILEPOS + STA FILEPOS+1 + CALL GETWPRT ;get write protect bit for this drive. + RNZ ;just return if it is set. + CALL GETS2 ;else get the 's2' byte. + ANI 80H ;and look at bit 7 (file unmodified?). + RNZ ;just return if set. + MVI C,15 ;else look up this file in directory. + CALL FINDFST + CALL CKFILPOS;was it found? + RZ ;just return if not. + LXI B,16 ;set (HL) pointing to records used section. + CALL FCB2HL + DAD B + XCHG + LHLD PARAMS ;do the same for users specified fcb. + DAD B + MVI C,16 ;this many bytes are present in this extent. +CLOSEIT1:LDA BIGDISK ;8 or 16 bit record numbers? + ORA A + JZ CLOSEIT4 + MOV A,M ;just 8 bit. Get one from users fcb. + ORA A + LDAX D ;now get one from directory fcb. + JNZ CLOSEIT2 + MOV M,A ;users byte was zero. Update from directory. +CLOSEIT2:ORA A + JNZ CLOSEIT3 + MOV A,M ;directories byte was zero, update from users fcb. + STAX D +CLOSEIT3:CMP M ;if neither one of these bytes were zero, + JNZ CLOSEIT7 ;then close error if they are not the same. + JMP CLOSEIT5 ;ok so far, get to next byte in fcbs. +CLOSEIT4:CALL MOVEWORD;update users fcb if it is zero. + XCHG + CALL MOVEWORD;update directories fcb if it is zero. + XCHG + LDAX D ;if these two values are no different, + CMP M ;then a close error occured. + JNZ CLOSEIT7 + INX D ;check second byte. + INX H + LDAX D + CMP M + JNZ CLOSEIT7 + DCR C ;remember 16 bit values. +CLOSEIT5:INX D ;bump to next item in table. + INX H + DCR C ;there are 16 entries only. + JNZ CLOSEIT1;continue if more to do. + LXI B,0FFECH;backup 20 places (extent byte). + DAD B + XCHG + DAD B + LDAX D + CMP M ;directory's extent already greater than the + JC CLOSEIT6 ;users extent? + MOV M,A ;no, update directory extent. + LXI B,3 ;and update the record count byte in + DAD B ;directories fcb. + XCHG + DAD B + MOV A,M ;get from user. + STAX D ;and put in directory. +CLOSEIT6:MVI A,0FFH ;set 'was open and is now closed' byte. + STA CLOSEFLG + JMP UPDATE1 ;update the directory now. +CLOSEIT7:LXI H,STATUS;set return status and then return. + DCR M + RET +; +; Routine to get the next empty space in the directory. It +; will then be cleared for use. +; +GETEMPTY:CALL CHKWPRT ;make sure disk is not write protected. + LHLD PARAMS ;save current parameters (fcb). + PUSH H + LXI H,EMPTYFCB;use special one for empty space. + SHLD PARAMS + MVI C,1 ;search for first empty spot in directory. + CALL FINDFST ;(* only check first byte *) + CALL CKFILPOS;none? + POP H + SHLD PARAMS ;restore original fcb address. + RZ ;return if no more space. + XCHG + LXI H,15 ;point to number of records for this file. + DAD D + MVI C,17 ;and clear all of this space. + XRA A +GETMT1 MOV M,A + INX H + DCR C + JNZ GETMT1 + LXI H,13 ;clear the 's1' byte also. + DAD D + MOV M,A + CALL CHKNMBR ;keep (SCRATCH1) within bounds. + CALL FCBSET ;write out this fcb entry to directory. + JMP SETS2B7 ;set 's2' byte bit 7 (unmodified at present). +; +; Routine to close the current extent and open the next one +; for reading. +; +GETNEXT XRA A + STA CLOSEFLG;clear close flag. + CALL CLOSEIT ;close this extent. + CALL CKFILPOS + RZ ;not there??? + LHLD PARAMS ;get extent byte. + LXI B,12 + DAD B + MOV A,M ;and increment it. + INR A + ANI 1FH ;keep within range 0-31. + MOV M,A + JZ GTNEXT1 ;overflow? + MOV B,A ;mask extent byte. + LDA EXTMASK + ANA B + LXI H,CLOSEFLG;check close flag (0ffh is ok). + ANA M + JZ GTNEXT2 ;if zero, we must read in next extent. + JMP GTNEXT3 ;else, it is already in memory. +GTNEXT1 LXI B,2 ;Point to the 's2' byte. + DAD B + INR M ;and bump it. + MOV A,M ;too many extents? + ANI 0FH + JZ GTNEXT5 ;yes, set error code. +; +; Get here to open the next extent. +; +GTNEXT2 MVI C,15 ;set to check first 15 bytes of fcb. + CALL FINDFST ;find the first one. + CALL CKFILPOS;none available? + JNZ GTNEXT3 + LDA RDWRTFLG;no extent present. Can we open an empty one? + INR A ;0ffh means reading (so not possible). + JZ GTNEXT5 ;or an error. + CALL GETEMPTY;we are writing, get an empty entry. + CALL CKFILPOS;none? + JZ GTNEXT5 ;error if true. + JMP GTNEXT4 ;else we are almost done. +GTNEXT3 CALL OPENIT1 ;open this extent. +GTNEXT4 CALL STRDATA ;move in updated data (rec #, extent #, etc.) + XRA A ;clear status and return. + JMP SETSTAT +; +; Error in extending the file. Too many extents were needed +; or not enough space on the disk. +; +GTNEXT5 CALL IOERR1 ;set error code, clear bit 7 of 's2' + JMP SETS2B7 ;so this is not written on a close. +; +; Read a sequential file. +; +RDSEQ MVI A,1 ;set sequential access mode. + STA MODE +RDSEQ1 MVI A,0FFH ;don't allow reading unwritten space. + STA RDWRTFLG + CALL STRDATA ;put rec# and ext# into fcb. + LDA SAVNREC ;get next record to read. + LXI H,SAVNXT;get number of records in extent. + CMP M ;within this extent? + JC RDSEQ2 + CPI 128 ;no. Is this extent fully used? + JNZ RDSEQ3 ;no. End-of-file. + CALL GETNEXT ;yes, open the next one. + XRA A ;reset next record to read. + STA SAVNREC + LDA STATUS ;check on open, successful? + ORA A + JNZ RDSEQ3 ;no, error. +RDSEQ2 CALL COMBLK ;ok. compute block number to read. + CALL CHKBLK ;check it. Within bounds? + JZ RDSEQ3 ;no, error. + CALL LOGICAL ;convert (BLKNMBR) to logical sector (128 byte). + CALL TRKSEC1 ;set the track and sector for this block #. + CALL DOREAD ;and read it. + JMP SETNREC ;and set the next record to be accessed. +; +; Read error occured. Set status and return. +; +RDSEQ3 JMP IOERR1 +; +; Write the next sequential record. +; +WTSEQ MVI A,1 ;set sequential access mode. + STA MODE +WTSEQ1 MVI A,0 ;allow an addition empty extent to be opened. + STA RDWRTFLG + CALL CHKWPRT ;check write protect status. + LHLD PARAMS + CALL CKROF1 ;check for read only file, (HL) already set to fcb. + CALL STRDATA ;put updated data into fcb. + LDA SAVNREC ;get record number to write. + CPI 128 ;within range? + JNC IOERR1 ;no, error(?). + CALL COMBLK ;compute block number. + CALL CHKBLK ;check number. + MVI C,0 ;is there one to write to? + JNZ WTSEQ6 ;yes, go do it. + CALL GETBLOCK;get next block number within fcb to use. + STA RELBLOCK;and save. + LXI B,0 ;start looking for space from the start + ORA A ;if none allocated as yet. + JZ WTSEQ2 + MOV C,A ;extract previous block number from fcb + DCX B ;so we can be closest to it. + CALL EXTBLK + MOV B,H + MOV C,L +WTSEQ2 CALL FNDSPACE;find the next empty block nearest number (BC). + MOV A,L ;check for a zero number. + ORA H + JNZ WTSEQ3 + MVI A,2 ;no more space? + JMP SETSTAT +WTSEQ3 SHLD BLKNMBR ;save block number to access. + XCHG ;put block number into (DE). + LHLD PARAMS ;now we must update the fcb for this + LXI B,16 ;newly allocated block. + DAD B + LDA BIGDISK ;8 or 16 bit block numbers? + ORA A + LDA RELBLOCK ;(* update this entry *) + JZ WTSEQ4 ;zero means 16 bit ones. + CALL ADDA2HL ;(HL)=(HL)+(A) + MOV M,E ;store new block number. + JMP WTSEQ5 +WTSEQ4 MOV C,A ;compute spot in this 16 bit table. + MVI B,0 + DAD B + DAD B + MOV M,E ;stuff block number (DE) there. + INX H + MOV M,D +WTSEQ5 MVI C,2 ;set (C) to indicate writing to un-used disk space. +WTSEQ6 LDA STATUS ;are we ok so far? + ORA A + RNZ + PUSH B ;yes, save write flag for bios (register C). + CALL LOGICAL ;convert (BLKNMBR) over to loical sectors. + LDA MODE ;get access mode flag (1=sequential, + DCR A ;0=random, 2=special?). + DCR A + JNZ WTSEQ9 +; +; Special random i/o from function #40. Maybe for M/PM, but the +; current block, if it has not been written to, will be zeroed +; out and then written (reason?). +; + POP B + PUSH B + MOV A,C ;get write status flag (2=writing unused space). + DCR A + DCR A + JNZ WTSEQ9 + PUSH H + LHLD DIRBUF ;zero out the directory buffer. + MOV D,A ;note that (A) is zero here. +WTSEQ7 MOV M,A + INX H + INR D ;do 128 bytes. + JP WTSEQ7 + CALL DIRDMA ;tell the bios the dma address for directory access. + LHLD LOGSECT ;get sector that starts current block. + MVI C,2 ;set 'writing to unused space' flag. +WTSEQ8 SHLD BLKNMBR ;save sector to write. + PUSH B + CALL TRKSEC1 ;determine its track and sector numbers. + POP B + CALL DOWRITE ;now write out 128 bytes of zeros. + LHLD BLKNMBR ;get sector number. + MVI C,0 ;set normal write flag. + LDA BLKMASK ;determine if we have written the entire + MOV B,A ;physical block. + ANA L + CMP B + INX H ;prepare for the next one. + JNZ WTSEQ8 ;continue until (BLKMASK+1) sectors written. + POP H ;reset next sector number. + SHLD BLKNMBR + CALL DEFDMA ;and reset dma address. +; +; Normal disk write. Set the desired track and sector then +; do the actual write. +; +WTSEQ9 CALL TRKSEC1 ;determine track and sector for this write. + POP B ;get write status flag. + PUSH B + CALL DOWRITE ;and write this out. + POP B + LDA SAVNREC ;get number of records in file. + LXI H,SAVNXT;get last record written. + CMP M + JC WTSEQ10 + MOV M,A ;we have to update record count. + INR M + MVI C,2 +; +;* This area has been patched to correct disk update problem +;* when using blocking and de-blocking in the BIOS. +; +WTSEQ10 NOP ;was 'dcr c' + NOP ;was 'dcr c' + LXI H,0 ;was 'jnz wtseq99' +; +; * End of patch. +; + PUSH PSW + CALL GETS2 ;set 'extent written to' flag. + ANI 7FH ;(* clear bit 7 *) + MOV M,A + POP PSW ;get record count for this extent. +WTSEQ99 CPI 127 ;is it full? + JNZ WTSEQ12 + LDA MODE ;yes, are we in sequential mode? + CPI 1 + JNZ WTSEQ12 + CALL SETNREC ;yes, set next record number. + CALL GETNEXT ;and get next empty space in directory. + LXI H,STATUS;ok? + MOV A,M + ORA A + JNZ WTSEQ11 + DCR A ;yes, set record count to -1. + STA SAVNREC +WTSEQ11 MVI M,0 ;clear status. +WTSEQ12 JMP SETNREC ;set next record to access. +; +; For random i/o, set the fcb for the desired record number +; based on the 'r0,r1,r2' bytes. These bytes in the fcb are +; used as follows: +; +; fcb+35 fcb+34 fcb+33 +; | 'r-2' | 'r-1' | 'r-0' | +; |7 0 | 7 0 | 7 0| +; |0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0 | 0 0 0 0 0 0 0 0| +; | overflow | | extra | extent | record # | +; | ______________| |_extent|__number___|_____________| +; also 's2' +; +; On entry, register (C) contains 0ffh if this is a read +; and thus we can not access unwritten disk space. Otherwise, +; another extent will be opened (for writing) if required. +; +POSITION:XRA A ;set random i/o flag. + STA MODE +; +; Special entry (function #40). M/PM ? +; +POSITN1 PUSH B ;save read/write flag. + LHLD PARAMS ;get address of fcb. + XCHG + LXI H,33 ;now get byte 'r0'. + DAD D + MOV A,M + ANI 7FH ;keep bits 0-6 for the record number to access. + PUSH PSW + MOV A,M ;now get bit 7 of 'r0' and bits 0-3 of 'r1'. + RAL + INX H + MOV A,M + RAL + ANI 1FH ;and save this in bits 0-4 of (C). + MOV C,A ;this is the extent byte. + MOV A,M ;now get the extra extent byte. + RAR + RAR + RAR + RAR + ANI 0FH + MOV B,A ;and save it in (B). + POP PSW ;get record number back to (A). + INX H ;check overflow byte 'r2'. + MOV L,M + INR L + DCR L + MVI L,6 ;prepare for error. + JNZ POSITN5 ;out of disk space error. + LXI H,32 ;store record number into fcb. + DAD D + MOV M,A + LXI H,12 ;and now check the extent byte. + DAD D + MOV A,C + SUB M ;same extent as before? + JNZ POSITN2 + LXI H,14 ;yes, check extra extent byte 's2' also. + DAD D + MOV A,B + SUB M + ANI 7FH + JZ POSITN3;same, we are almost done then. +; +; Get here when another extent is required. +; +POSITN2 PUSH B + PUSH D + CALL CLOSEIT ;close current extent. + POP D + POP B + MVI L,3 ;prepare for error. + LDA STATUS + INR A + JZ POSITN4 ;close error. + LXI H,12 ;put desired extent into fcb now. + DAD D + MOV M,C + LXI H,14 ;and store extra extent byte 's2'. + DAD D + MOV M,B + CALL OPENIT ;try and get this extent. + LDA STATUS ;was it there? + INR A + JNZ POSITN3 + POP B ;no. can we create a new one (writing?). + PUSH B + MVI L,4 ;prepare for error. + INR C + JZ POSITN4 ;nope, reading unwritten space error. + CALL GETEMPTY;yes we can, try to find space. + MVI L,5 ;prepare for error. + LDA STATUS + INR A + JZ POSITN4 ;out of space? +; +; Normal return location. Clear error code and return. +; +POSITN3 POP B ;restore stack. + XRA A ;and clear error code byte. + JMP SETSTAT +; +; Error. Set the 's2' byte to indicate this (why?). +; +POSITN4 PUSH H + CALL GETS2 + MVI M,0C0H + POP H +; +; Return with error code (presently in L). +; +POSITN5 POP B + MOV A,L ;get error code. + STA STATUS + JMP SETS2B7 +; +; Read a random record. +; +READRAN MVI C,0FFH ;set 'read' status. + CALL POSITION;position the file to proper record. + CZ RDSEQ1 ;and read it as usual (if no errors). + RET +; +; Write to a random record. +; +WRITERAN:MVI C,0 ;set 'writing' flag. + CALL POSITION;position the file to proper record. + CZ WTSEQ1 ;and write as usual (if no errors). + RET +; +; Compute the random record number. Enter with (HL) pointing +; to a fcb an (DE) contains a relative location of a record +; number. On exit, (C) contains the 'r0' byte, (B) the 'r1' +; byte, and (A) the 'r2' byte. +; +; On return, the zero flag is set if the record is within +; bounds. Otherwise, an overflow occured. +; +COMPRAND:XCHG ;save fcb pointer in (DE). + DAD D ;compute relative position of record #. + MOV C,M ;get record number into (BC). + MVI B,0 + LXI H,12 ;now get extent. + DAD D + MOV A,M ;compute (BC)=(record #)+(extent)*128. + RRC ;move lower bit into bit 7. + ANI 80H ;and ignore all other bits. + ADD C ;add to our record number. + MOV C,A + MVI A,0 ;take care of any carry. + ADC B + MOV B,A + MOV A,M ;now get the upper bits of extent into + RRC ;bit positions 0-3. + ANI 0FH ;and ignore all others. + ADD B ;add this in to 'r1' byte. + MOV B,A + LXI H,14 ;get the 's2' byte (extra extent). + DAD D + MOV A,M + ADD A ;and shift it left 4 bits (bits 4-7). + ADD A + ADD A + ADD A + PUSH PSW ;save carry flag (bit 0 of flag byte). + ADD B ;now add extra extent into 'r1'. + MOV B,A + PUSH PSW ;and save carry (overflow byte 'r2'). + POP H ;bit 0 of (L) is the overflow indicator. + MOV A,L + POP H ;and same for first carry flag. + ORA L ;either one of these set? + ANI 01H ;only check the carry flags. + RET +; +; Routine to setup the fcb (bytes 'r0', 'r1', 'r2') to +; reflect the last record used for a random (or other) file. +; This reads the directory and looks at all extents computing +; the largerst record number for each and keeping the maximum +; value only. Then 'r0', 'r1', and 'r2' will reflect this +; maximum record number. This is used to compute the space used +; by a random file. +; +RANSIZE MVI C,12 ;look thru directory for first entry with + CALL FINDFST ;this name. + LHLD PARAMS ;zero out the 'r0, r1, r2' bytes. + LXI D,33 + DAD D + PUSH H + MOV M,D ;note that (D)=0. + INX H + MOV M,D + INX H + MOV M,D +RANSIZ1 CALL CKFILPOS;is there an extent to process? + JZ RANSIZ3 ;no, we are done. + CALL FCB2HL ;set (HL) pointing to proper fcb in dir. + LXI D,15 ;point to last record in extent. + CALL COMPRAND;and compute random parameters. + POP H + PUSH H ;now check these values against those + MOV E,A ;already in fcb. + MOV A,C ;the carry flag will be set if those + SUB M ;in the fcb represent a larger size than + INX H ;this extent does. + MOV A,B + SBB M + INX H + MOV A,E + SBB M + JC RANSIZ2 + MOV M,E ;we found a larger (in size) extent. + DCX H ;stuff these values into fcb. + MOV M,B + DCX H + MOV M,C +RANSIZ2 CALL FINDNXT ;now get the next extent. + JMP RANSIZ1 ;continue til all done. +RANSIZ3 POP H ;we are done, restore the stack and + RET ;return. +; +; Function to return the random record position of a given +; file which has been read in sequential mode up to now. +; +SETRAN LHLD PARAMS ;point to fcb. + LXI D,32 ;and to last used record. + CALL COMPRAND;compute random position. + LXI H,33 ;now stuff these values into fcb. + DAD D + MOV M,C ;move 'r0'. + INX H + MOV M,B ;and 'r1'. + INX H + MOV M,A ;and lastly 'r2'. + RET +; +; This routine select the drive specified in (ACTIVE) and +; update the login vector and bitmap table if this drive was +; not already active. +; +LOGINDRV:LHLD LOGIN ;get the login vector. + LDA ACTIVE ;get the default drive. + MOV C,A + CALL SHIFTR ;position active bit for this drive + PUSH H ;into bit 0. + XCHG + CALL SELECT ;select this drive. + POP H + CZ SLCTERR ;valid drive? + MOV A,L ;is this a newly activated drive? + RAR + RC + LHLD LOGIN ;yes, update the login vector. + MOV C,L + MOV B,H + CALL SETBIT + SHLD LOGIN ;and save. + JMP BITMAP ;now update the bitmap. +; +; Function to set the active disk number. +; +SETDSK LDA EPARAM ;get parameter passed and see if this + LXI H,ACTIVE;represents a change in drives. + CMP M + RZ + MOV M,A ;yes it does, log it in. + JMP LOGINDRV +; +; This is the 'auto disk select' routine. The firsst byte +; of the fcb is examined for a drive specification. If non +; zero then the drive will be selected and loged in. +; +AUTOSEL MVI A,0FFH ;say 'auto-select activated'. + STA AUTO + LHLD PARAMS ;get drive specified. + MOV A,M + ANI 1FH ;look at lower 5 bits. + DCR A ;adjust for (1=A, 2=B) etc. + STA EPARAM ;and save for the select routine. + CPI 1EH ;check for 'no change' condition. + JNC AUTOSL1 ;yes, don't change. + LDA ACTIVE ;we must change, save currently active + STA OLDDRV ;drive. + MOV A,M ;and save first byte of fcb also. + STA AUTOFLAG;this must be non-zero. + ANI 0E0H ;whats this for (bits 6,7 are used for + MOV M,A ;something)? + CALL SETDSK ;select and log in this drive. +AUTOSL1 LDA USERNO ;move user number into fcb. + LHLD PARAMS ;(* upper half of first byte *) + ORA M + MOV M,A + RET ;and return (all done). +; +; Function to return the current cp/m version number. +; +GETVER MVI A,022h ;version 2.2 + JMP SETSTAT +; +; Function to reset the disk system. +; +RSTDSK LXI H,0 ;clear write protect status and log + SHLD WRTPRT ;in vector. + SHLD LOGIN + XRA A ;select drive 'A'. + STA ACTIVE + LXI H,TBUFF ;setup default dma address. + SHLD USERDMA + CALL DEFDMA + JMP LOGINDRV;now log in drive 'A'. +; +; Function to open a specified file. +; +OPENFIL CALL CLEARS2 ;clear 's2' byte. + CALL AUTOSEL ;select proper disk. + JMP OPENIT ;and open the file. +; +; Function to close a specified file. +; +CLOSEFIL:CALL AUTOSEL ;select proper disk. + JMP CLOSEIT ;and close the file. +; +; Function to return the first occurence of a specified file +; name. If the first byte of the fcb is '?' then the name will +; not be checked (get the first entry no matter what). +; +GETFST MVI C,0 ;prepare for special search. + XCHG + MOV A,M ;is first byte a '?'? + CPI '?' + JZ GETFST1 ;yes, just get very first entry (zero length match). + CALL SETEXT ;get the extension byte from fcb. + MOV A,M ;is it '?'? if yes, then we want + CPI '?' ;an entry with a specific 's2' byte. + CNZ CLEARS2 ;otherwise, look for a zero 's2' byte. + CALL AUTOSEL ;select proper drive. + MVI C,15 ;compare bytes 0-14 in fcb (12&13 excluded). +GETFST1 CALL FINDFST ;find an entry and then move it into + JMP MOVEDIR ;the users dma space. +; +; Function to return the next occurence of a file name. +; +GETNXT LHLD SAVEFCB ;restore pointers. note that no + SHLD PARAMS ;other dbos calls are allowed. + CALL AUTOSEL ;no error will be returned, but the + CALL FINDNXT ;results will be wrong. + JMP MOVEDIR +; +; Function to delete a file by name. +; +DELFILE CALL AUTOSEL ;select proper drive. + CALL ERAFILE ;erase the file. + JMP STSTATUS;set status and return. +; +; Function to execute a sequential read of the specified +; record number. +; +READSEQ CALL AUTOSEL ;select proper drive then read. + JMP RDSEQ +; +; Function to write the net sequential record. +; +WRTSEQ CALL AUTOSEL ;select proper drive then write. + JMP WTSEQ +; +; Create a file function. +; +FCREATE CALL CLEARS2 ;clear the 's2' byte on all creates. + CALL AUTOSEL ;select proper drive and get the next + JMP GETEMPTY;empty directory space. +; +; Function to rename a file. +; +RENFILE CALL AUTOSEL ;select proper drive and then switch + CALL CHGNAMES;file names. + JMP STSTATUS +; +; Function to return the login vector. +; +GETLOG LHLD LOGIN + JMP GETPRM1 +; +; Function to return the current disk assignment. +; +GETCRNT LDA ACTIVE + JMP SETSTAT +; +; Function to set the dma address. +; +PUTDMA XCHG + SHLD USERDMA ;save in our space and then get to + JMP DEFDMA ;the bios with this also. +; +; Function to return the allocation vector. +; +GETALOC LHLD ALOCVECT + JMP GETPRM1 +; +; Function to return the read-only status vector. +; +GETROV LHLD WRTPRT + JMP GETPRM1 +; +; Function to set the file attributes (read-only, system). +; +SETATTR CALL AUTOSEL ;select proper drive then save attributes. + CALL SAVEATTR + JMP STSTATUS +; +; Function to return the address of the disk parameter block +; for the current drive. +; +GETPARM LHLD DISKPB +GETPRM1 SHLD STATUS + RET +; +; Function to get or set the user number. If (E) was (FF) +; then this is a request to return the current user number. +; Else set the user number from (E). +; +GETUSER LDA EPARAM ;get parameter. + CPI 0FFH ;get user number? + JNZ SETUSER + LDA USERNO ;yes, just do it. + JMP SETSTAT +SETUSER ANI 1FH ;no, we should set it instead. keep low + STA USERNO ;bits (0-4) only. + RET +; +; Function to read a random record from a file. +; +RDRANDOM:CALL AUTOSEL ;select proper drive and read. + JMP READRAN +; +; Function to compute the file size for random files. +; +WTRANDOM:CALL AUTOSEL ;select proper drive and write. + JMP WRITERAN +; +; Function to compute the size of a random file. +; +FILESIZE:CALL AUTOSEL ;select proper drive and check file length + JMP RANSIZE +; +; Function #37. This allows a program to log off any drives. +; On entry, set (DE) to contain a word with bits set for those +; drives that are to be logged off. The log-in vector and the +; write protect vector will be updated. This must be a M/PM +; special function. +; +LOGOFF LHLD PARAMS ;get drives to log off. + MOV A,L ;for each bit that is set, we want + CMA ;to clear that bit in (LOGIN) + MOV E,A ;and (WRTPRT). + MOV A,H + CMA + LHLD LOGIN ;reset the login vector. + ANA H + MOV D,A + MOV A,L + ANA E + MOV E,A + LHLD WRTPRT + XCHG + SHLD LOGIN ;and save. + MOV A,L ;now do the write protect vector. + ANA E + MOV L,A + MOV A,H + ANA D + MOV H,A + SHLD WRTPRT ;and save. all done. + RET +; +; Get here to return to the user. +; +GOBACK LDA AUTO ;was auto select activated? + ORA A + JZ GOBACK1 + LHLD PARAMS ;yes, but was a change made? + MVI M,0 ;(* reset first byte of fcb *) + LDA AUTOFLAG + ORA A + JZ GOBACK1 + MOV M,A ;yes, reset first byte properly. + LDA OLDDRV ;and get the old drive and select it. + STA EPARAM + CALL SETDSK +GOBACK1 LHLD USRSTACK;reset the users stack pointer. + SPHL + LHLD STATUS ;get return status. + MOV A,L ;force version 1.4 compatability. + MOV B,H + RET ;and go back to user. +; +; Function #40. This is a special entry to do random i/o. +; For the case where we are writing to unused disk space, this +; space will be zeroed out first. This must be a M/PM special +; purpose function, because why would any normal program even +; care about the previous contents of a sector about to be +; written over. +; +WTSPECL CALL AUTOSEL ;select proper drive. + MVI A,2 ;use special write mode. + STA MODE + MVI C,0 ;set write indicator. + CALL POSITN1 ;position the file. + CZ WTSEQ1 ;and write (if no errors). + RET +; +;************************************************************** +;* +;* BDOS data storage pool. +;* +;************************************************************** +; +EMPTYFCB:DB 0E5H ;empty directory segment indicator. +WRTPRT DW 0 ;write protect status for all 16 drives. +LOGIN DW 0 ;drive active word (1 bit per drive). +USERDMA DW 080H ;user's dma address (defaults to 80h). +; +; Scratch areas from parameter block. +; +SCRATCH1:DW 0 ;relative position within dir segment for file (0-3). +SCRATCH2:DW 0 ;last selected track number. +SCRATCH3:DW 0 ;last selected sector number. +; +; Disk storage areas from parameter block. +; +DIRBUF DW 0 ;address of directory buffer to use. +DISKPB DW 0 ;contains address of disk parameter block. +CHKVECT DW 0 ;address of check vector. +ALOCVECT:DW 0 ;address of allocation vector (bit map). +; +; Parameter block returned from the bios. +; +SECTORS DW 0 ;sectors per track from bios. +BLKSHFT DB 0 ;block shift. +BLKMASK DB 0 ;block mask. +EXTMASK DB 0 ;extent mask. +DSKSIZE DW 0 ;disk size from bios (number of blocks-1). +DIRSIZE DW 0 ;directory size. +ALLOC0 DW 0 ;storage for first bytes of bit map (dir space used). +ALLOC1 DW 0 +OFFSET DW 0 ;first usable track number. +XLATE DW 0 ;sector translation table address. +; +; +CLOSEFLG:DB 0 ;close flag (=0ffh is extent written ok). +RDWRTFLG:DB 0 ;read/write flag (0ffh=read, 0=write). +FNDSTAT DB 0 ;filename found status (0=found first entry). +MODE DB 0 ;I/o mode select (0=random, 1=sequential, 2=special random). +EPARAM DB 0 ;storage for register (E) on entry to bdos. +RELBLOCK:DB 0 ;relative position within fcb of block number written. +COUNTER DB 0 ;byte counter for directory name searches. +SAVEFCB DW 0,0 ;save space for address of fcb (for directory searches). +BIGDISK DB 0 ;if =0 then disk is > 256 blocks long. +AUTO DB 0 ;if non-zero, then auto select activated. +OLDDRV DB 0 ;on auto select, storage for previous drive. +AUTOFLAG:DB 0 ;if non-zero, then auto select changed drives. +SAVNXT DB 0 ;storage for next record number to access. +SAVEXT DB 0 ;storage for extent number of file. +SAVNREC DW 0 ;storage for number of records in file. +BLKNMBR DW 0 ;block number (physical sector) used within a file or logical sector. +LOGSECT DW 0 ;starting logical (128 byte) sector of block (physical sector). +FCBPOS DB 0 ;relative position within buffer for fcb of file of interest. +FILEPOS DW 0 ;files position within directory (0 to max entries -1). +; +; Disk directory buffer checksum bytes. One for each of the +; 16 possible drives. +; +CKSUMTBL:DB 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +; +; Extra space ? +; + DB 0,0,0,0 +; +;************************************************************** +;* +;* B I O S J U M P T A B L E +;* +;************************************************************** +; +BOOT JMP 0 ;NOTE WE USE FAKE DESTINATIONS +WBOOT JMP 0 +CONST JMP 0 +CONIN JMP 0 +CONOUT JMP 0 +LIST JMP 0 +PUNCH JMP 0 +READER JMP 0 +HOME JMP 0 +SELDSK JMP 0 +SETTRK JMP 0 +SETSEC JMP 0 +SETDMA JMP 0 +READ JMP 0 +WRITE JMP 0 +PRSTAT JMP 0 +SECTRN JMP 0 +; +;* +;****************** E N D O F C P / M ***************** +;* + diff --git a/runtime/CSharp/tests/issue-2693/test.sh b/runtime/CSharp/tests/issue-2693/test.sh new file mode 100644 index 0000000000..8efb0a1e0b --- /dev/null +++ b/runtime/CSharp/tests/issue-2693/test.sh @@ -0,0 +1,11 @@ +#!/usr/bin/bash + +dotnet restore +dotnet build +dotnet run -file cpm22.asm +if [[ "$?" != "0" ]] +then + echo "Issue 2693 test failed." + exit 1 +fi + diff --git a/runtime/CSharp/tests/issue-3079/Arithmetic.g4 b/runtime/CSharp/tests/issue-3079/Arithmetic.g4 new file mode 100644 index 0000000000..672ebbaca3 --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/Arithmetic.g4 @@ -0,0 +1,33 @@ + +// Template generated code from Antlr4BuildTasks.dotnet-antlr v 2.2 + +grammar Arithmetic; + +file : expression (SEMI expression)* EOF; +expression : expression POW expression | expression (TIMES | DIV) expression | expression (PLUS | MINUS) expression | LPAREN expression RPAREN | (PLUS | MINUS)* atom ; +atom : scientific | variable ; +scientific : SCIENTIFIC_NUMBER ; +variable : VARIABLE ; + +VARIABLE : VALID_ID_START VALID_ID_CHAR* ; +SCIENTIFIC_NUMBER : NUMBER (E SIGN? UNSIGNED_INTEGER)? ; +LPAREN : '(' ; +RPAREN : ')' ; +PLUS : '+' ; +MINUS : '-' ; +TIMES : '*' ; +DIV : '/' ; +GT : '>' ; +LT : '<' ; +EQ : '=' ; +POINT : '.' ; +POW : '^' ; +SEMI : ';' ; +WS : [ \r\n\t] + -> channel(HIDDEN) ; + +fragment VALID_ID_START : ('a' .. 'z') | ('A' .. 'Z') | '_' ; +fragment VALID_ID_CHAR : VALID_ID_START | ('0' .. '9') ; +fragment NUMBER : ('0' .. '9') + ('.' ('0' .. '9') +)? ; +fragment UNSIGNED_INTEGER : ('0' .. '9')+ ; +fragment E : 'E' | 'e' ; +fragment SIGN : ('+' | '-') ; diff --git a/runtime/CSharp/tests/issue-3079/ErrorListener.cs b/runtime/CSharp/tests/issue-3079/ErrorListener.cs new file mode 100644 index 0000000000..094eda52c7 --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/ErrorListener.cs @@ -0,0 +1,20 @@ +// Template generated code from Antlr4BuildTasks.dotnet-antlr v 2.2 + +using Antlr4.Runtime; +using Antlr4.Runtime.Misc; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +public class ErrorListener : ConsoleErrorListener +{ + public bool had_error; + + public override void SyntaxError(TextWriter output, IRecognizer recognizer, S offendingSymbol, int line, + int col, string msg, RecognitionException e) + { + had_error = true; + base.SyntaxError(output, recognizer, offendingSymbol, line, col, msg, e); + } +} diff --git a/runtime/CSharp/tests/issue-3079/Program.cs b/runtime/CSharp/tests/issue-3079/Program.cs new file mode 100644 index 0000000000..3f74094153 --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/Program.cs @@ -0,0 +1,110 @@ +// Template generated code from Antlr4BuildTasks.dotnet-antlr v 2.2 + +using Antlr4.Runtime; +using Antlr4.Runtime.Tree; +using System; +using System.IO; +using System.Linq; +using System.Text; +using System.Runtime.CompilerServices; + +public class Program +{ + public static Parser Parser { get; set; } + public static Lexer Lexer { get; set; } + public static ITokenStream TokenStream { get; set; } + public static IParseTree Tree { get; set; } + public static IParseTree Parse(string input) + { + var str = new AntlrInputStream(input); + var lexer = new ArithmeticLexer(str); + Lexer = lexer; + var tokens = new CommonTokenStream(lexer); + TokenStream = tokens; + var parser = new ArithmeticParser(tokens); + Parser = parser; + var tree = parser.file(); + Tree = tree; + return tree; + } + + static void Main(string[] args) + { + bool show_tree = false; + bool show_tokens = false; + string file_name = null; + string input = null; + for (int i = 0; i < args.Length; ++i) + { + if (args[i].Equals("-tokens")) + { + show_tokens = true; + continue; + } + else if (args[i].Equals("-tree")) + { + show_tree = true; + continue; + } + else if (args[i].Equals("-input")) + input = args[++i]; + else if (args[i].Equals("-file")) + file_name = args[++i]; + } + ICharStream str = null; + if (input == null && file_name == null) + { + StringBuilder sb = new StringBuilder(); + int ch; + while ((ch = System.Console.Read()) != -1) + { + sb.Append((char)ch); + } + input = sb.ToString(); + +str = CharStreams.fromString(input); + } else if (input != null) + { + str = CharStreams.fromString(input); + } else if (file_name != null) + { + str = CharStreams.fromPath(file_name); + } + var lexer = new ArithmeticLexer(str); + if (show_tokens) + { + StringBuilder new_s = new StringBuilder(); + for (int i = 0; ; ++i) + { + var ro_token = lexer.NextToken(); + var token = (CommonToken)ro_token; + token.TokenIndex = i; + new_s.AppendLine(token.ToString()); + if (token.Type == Antlr4.Runtime.TokenConstants.EOF) + break; + } + System.Console.Error.WriteLine(new_s.ToString()); + lexer.Reset(); + } + var tokens = new CommonTokenStream(lexer); + var parser = new ArithmeticParser(tokens); + var listener_lexer = new ErrorListener(); + var listener_parser = new ErrorListener(); + lexer.AddErrorListener(listener_lexer); + parser.AddErrorListener(listener_parser); + var tree = parser.file(); + if (listener_lexer.had_error || listener_parser.had_error) + { + System.Console.Error.WriteLine("parse failed."); + } + else + { + System.Console.Error.WriteLine("parse succeeded."); + } + if (show_tree) + { + System.Console.Error.WriteLine(tree.ToStringTree(parser)); + } + System.Environment.Exit(listener_lexer.had_error || listener_parser.had_error ? 1 : 0); + } +} diff --git a/runtime/CSharp/tests/issue-3079/Test.csproj b/runtime/CSharp/tests/issue-3079/Test.csproj new file mode 100644 index 0000000000..6984882ae5 --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/Test.csproj @@ -0,0 +1,65 @@ + + + + net5.0 + Exe + + + + + + + + ../../../../tool/target/antlr4-*-SNAPSHOT-complete.jar + + + + + + + + + PackageReference + + + 1701;1702;3021 + + + + + + + + + + + + + + + + diff --git a/runtime/CSharp/tests/issue-3079/Test.sln b/runtime/CSharp/tests/issue-3079/Test.sln new file mode 100644 index 0000000000..f4f77747ac --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/Test.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.31019.35 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Test", "Test.csproj", "{1B229E17-E0E5-4D3B-8978-A4E61B9233E5}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Antlr4", "..\..\src\Antlr4.csproj", "{95247929-4C60-4CDF-B202-1BAE1C12AA57}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {1B229E17-E0E5-4D3B-8978-A4E61B9233E5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1B229E17-E0E5-4D3B-8978-A4E61B9233E5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1B229E17-E0E5-4D3B-8978-A4E61B9233E5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1B229E17-E0E5-4D3B-8978-A4E61B9233E5}.Release|Any CPU.Build.0 = Release|Any CPU + {95247929-4C60-4CDF-B202-1BAE1C12AA57}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {95247929-4C60-4CDF-B202-1BAE1C12AA57}.Debug|Any CPU.Build.0 = Debug|Any CPU + {95247929-4C60-4CDF-B202-1BAE1C12AA57}.Release|Any CPU.ActiveCfg = Release|Any CPU + {95247929-4C60-4CDF-B202-1BAE1C12AA57}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4819731D-3C62-4CFA-A99C-09103728C086} + EndGlobalSection +EndGlobal diff --git a/runtime/CSharp/tests/issue-3079/readme.md b/runtime/CSharp/tests/issue-3079/readme.md new file mode 100644 index 0000000000..e68f2a53bf --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/readme.md @@ -0,0 +1,10 @@ +# How to test Issue 3079 + +1) Build the Antlr Tool first. +2) `bash test.sh` in this directory. + +NB: The CSharp runtime source is modified by the test.sh script to +change "debug = true;" for ParserATNSimulator.cs. There is no way to +change the value of the static readonly variable using System.Reflection +after the static initializer for the class has loaded. This is why +it is change in the source here for this test and only this test. diff --git a/runtime/CSharp/tests/issue-3079/test.sh b/runtime/CSharp/tests/issue-3079/test.sh new file mode 100644 index 0000000000..49f6313995 --- /dev/null +++ b/runtime/CSharp/tests/issue-3079/test.sh @@ -0,0 +1,14 @@ +# + +cat ../../src/Atn/ParserATNSimulator.cs > ParserATNSimulator.save +cat ParserATNSimulator.save | sed 's/bool debug = false;/bool debug = true;/' > ../../src/Atn/ParserATNSimulator.cs +dotnet restore +dotnet build +dotnet run -input "1+2" +if [[ "$?" != "0" ]] +then + echo "Issue 2693 test failed." + exit 1 +else + echo "Test passed--did not crash." +fi diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 3900781515..863dd9d618 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -1,6 +1,11 @@ # -*- mode:cmake -*- -cmake_minimum_required (VERSION 2.8) -# 2.8 needed because of ExternalProject +cmake_minimum_required (VERSION 3.15) +# 3.14 needed because of FetchContent +# 3.15 needed to avid spew of warnings related to overriding cl command line flags + +set(CMAKE_MACOSX_RPATH OFF) + +enable_testing() # Detect build type, fallback to release and throw a warning if use didn't specify any if(NOT CMAKE_BUILD_TYPE) @@ -19,30 +24,20 @@ if(NOT WITH_DEMO) FORCE) endif(NOT WITH_DEMO) -option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" On) +option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" Off) option(WITH_STATIC_CRT "(Visual C++) Enable to statically link CRT, which avoids requiring users to install the redistribution package. To disable with: -DWITH_STATIC_CRT=Off" On) +option(DISABLE_WARNINGS "Suppress compiler warnings for all built ANTLR targets" OFF) + +cmake_policy(SET CMP0091 NEW) # Enable use of CMAKE_MSVC_RUNTIME_LIBRARY +if(WITH_STATIC_CRT) + set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") +else() + set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>DLL") +endif(WITH_STATIC_CRT) project(LIBANTLR4) -if(CMAKE_VERSION VERSION_EQUAL "3.0.0" OR - CMAKE_VERSION VERSION_GREATER "3.0.0") - CMAKE_POLICY(SET CMP0026 NEW) - CMAKE_POLICY(SET CMP0054 OLD) - CMAKE_POLICY(SET CMP0045 OLD) - CMAKE_POLICY(SET CMP0042 OLD) -endif() - -if(CMAKE_VERSION VERSION_EQUAL "3.3.0" OR - CMAKE_VERSION VERSION_GREATER "3.3.0") - CMAKE_POLICY(SET CMP0059 OLD) - CMAKE_POLICY(SET CMP0054 OLD) -endif() - -if(CMAKE_SYSTEM_NAME MATCHES "Linux") - find_package(PkgConfig REQUIRED) - pkg_check_modules(UUID REQUIRED uuid) -endif() if(APPLE) find_library(COREFOUNDATION_LIBRARY CoreFoundation) endif() @@ -65,26 +60,35 @@ if(WITH_DEMO) endif() endif(WITH_DEMO) -if(MSVC_VERSION) +if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") set(MY_CXX_WARNING_FLAGS " /W4") + + if(DISABLE_WARNINGS) + set(MY_CXX_WARNING_FLAGS " /w") + endif() else() set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") + + if(DISABLE_WARNINGS) + set(MY_CXX_WARNING_FLAGS " -w") + endif() endif() +# Define USE_UTF8_INSTEAD_OF_CODECVT macro. +# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_UTF8_INSTEAD_OF_CODECVT") + # Initialize CXXFLAGS. -if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0) - set(CMAKE_CXX_STANDARD 11) - set(CMAKE_CXX_STANDARD_REQUIRED ON) -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -std=c++11") - set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -std=c++11") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -std=c++11") - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++11") +if(NOT DEFINED CMAKE_CXX_STANDARD) + # only set CMAKE_CXX_STANDARD if not already set + # this allows the standard to be set by the caller, for example with -DCMAKE_CXX_STANDARD:STRING=17 + set(CMAKE_CXX_STANDARD 17) endif() +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}") -if(MSVC_VERSION) + +if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MP ${MY_CXX_WARNING_FLAGS}") set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLAGS}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLGAS}") @@ -96,21 +100,21 @@ else() set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}") endif() -# Compiler-specific C++11 activation. -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Intel") +# Compiler-specific C++17 activation. +if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") execute_process( COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) # Just g++-5.0 and greater contain header. (test in ubuntu) if(NOT (GCC_VERSION VERSION_GREATER 5.0 OR GCC_VERSION VERSION_EQUAL 5.0)) message(FATAL_ERROR "${PROJECT_NAME} requires g++ 5.0 or greater.") endif () -elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND ANDROID) +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND ANDROID) # Need -Os cflag and cxxflags here to work with exception handling on armeabi. # see https://github.com/android-ndk/ndk/issues/573 # and without -stdlib=libc++ cxxflags -elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -stdlib=libc++") -elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND ( CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "FreeBSD") ) +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -stdlib=libc++") +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND ( CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "FreeBSD") ) execute_process( COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE CLANG_VERSION) if(NOT (CLANG_VERSION VERSION_GREATER 4.2.1 OR CLANG_VERSION VERSION_EQUAL 4.2.1)) @@ -121,11 +125,11 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND ( CMAKE_SYSTEM_NAME MATCH set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() elseif(MSVC_VERSION GREATER 1800 OR MSVC_VERSION EQUAL 1800) - # Visual Studio 2012+ supports c++11 features + # Visual Studio 2012+ supports C++17 features elseif(CMAKE_SYSTEM_NAME MATCHES "Emscripten") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -stdlib=libc++") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -stdlib=libc++") else() - message(FATAL_ERROR "Your C++ compiler does not support C++11.") + message(FATAL_ERROR "Your C++ compiler does not support C++17.") endif() @@ -134,69 +138,79 @@ if(WITH_DEMO) add_subdirectory(demo) endif(WITH_DEMO) +include(GNUInstallDirs) + # Generate CMake Package Files only if install is active if (ANTLR4_INSTALL) - include(GNUInstallDirs) include(CMakePackageConfigHelpers) if(NOT ANTLR4_CMAKE_DIR) - set(ANTLR4_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/antlr4 CACHE STRING + set(ANTLR4_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake CACHE STRING "Installation directory for cmake files." FORCE ) endif(NOT ANTLR4_CMAKE_DIR) - set(version_config ${PROJECT_BINARY_DIR}/antlr4-config-version.cmake) + set(version_runtime_config ${PROJECT_BINARY_DIR}/antlr4-runtime-config-version.cmake) + set(version_generator_config ${PROJECT_BINARY_DIR}/antlr4-generator-config-version.cmake) set(project_runtime_config ${PROJECT_BINARY_DIR}/antlr4-runtime-config.cmake) set(project_generator_config ${PROJECT_BINARY_DIR}/antlr4-generator-config.cmake) set(targets_export_name antlr4-targets) set(ANTLR4_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE STRING "Installation directory for libraries, relative to ${CMAKE_INSTALL_PREFIX}.") - + set(ANTLR4_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR}/antlr4-runtime CACHE STRING "Installation directory for include files, relative to ${CMAKE_INSTALL_PREFIX}.") configure_package_config_file( cmake/antlr4-runtime.cmake.in ${project_runtime_config} - INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR} - PATH_VARS + INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-runtime + PATH_VARS ANTLR4_INCLUDE_DIR ANTLR4_LIB_DIR ) - -configure_package_config_file( + + configure_package_config_file( cmake/antlr4-generator.cmake.in ${project_generator_config} - INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR} - PATH_VARS + INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-generator + PATH_VARS ANTLR4_INCLUDE_DIR ANTLR4_LIB_DIR ) - + + write_basic_package_version_file( + ${version_runtime_config} + VERSION ${ANTLR_VERSION} + COMPATIBILITY SameMajorVersion ) + write_basic_package_version_file( - ${version_config} + ${version_generator_config} VERSION ${ANTLR_VERSION} COMPATIBILITY SameMajorVersion ) install(EXPORT ${targets_export_name} - DESTINATION ${ANTLR4_CMAKE_DIR} ) + DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-runtime ) install(FILES ${project_runtime_config} - ${project_generator_config} - ${version_config} - DESTINATION ${ANTLR4_CMAKE_DIR} ) + ${version_runtime_config} + DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-runtime ) + + install(FILES ${project_generator_config} + ${version_generator_config} + DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-generator ) endif(ANTLR4_INSTALL) if(EXISTS LICENSE.txt) install(FILES LICENSE.txt - DESTINATION "share/doc/libantlr4") -elseif(EXISTS ../../LICENSE.txt) + DESTINATION ${CMAKE_INSTALL_DOCDIR}) +elseif(EXISTS ../../LICENSE.txt) install(FILES ../../LICENSE.txt - DESTINATION "share/doc/libantlr4") + DESTINATION ${CMAKE_INSTALL_DOCDIR}) endif() -install(FILES README.md VERSION - DESTINATION "share/doc/libantlr4") +install(FILES README.md VERSION + DESTINATION ${CMAKE_INSTALL_DOCDIR}) set(CPACK_PACKAGE_CONTACT "antlr-discussion@googlegroups.com") set(CPACK_PACKAGE_VERSION ${ANTLR_VERSION}) diff --git a/runtime/Cpp/CMakeSettings.json b/runtime/Cpp/CMakeSettings.json index 9eec934673..b17e0da38e 100644 --- a/runtime/Cpp/CMakeSettings.json +++ b/runtime/Cpp/CMakeSettings.json @@ -6,13 +6,21 @@ "generator": "Ninja", "configurationType": "Debug", "inheritEnvironments": [ "msvc_x86" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", @@ -23,13 +31,21 @@ "generator": "Ninja", "configurationType": "RelWithDebInfo", "inheritEnvironments": [ "msvc_x86" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", @@ -40,13 +56,21 @@ "generator": "Ninja", "configurationType": "Debug", "inheritEnvironments": [ "msvc_x64_x64" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", @@ -57,13 +81,21 @@ "generator": "Ninja", "configurationType": "RelWithDebInfo", "inheritEnvironments": [ "msvc_x64_x64" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", diff --git a/runtime/Cpp/README.md b/runtime/Cpp/README.md index 3a3523c794..fb5b22da4e 100644 --- a/runtime/Cpp/README.md +++ b/runtime/Cpp/README.md @@ -5,18 +5,18 @@ This folder contains the C++ runtime support for ANTLR. See [the canonical antl ## Authors and major contributors ANTLR 4 is the result of substantial effort of the following people: - + * [Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu ANTLR project lead and supreme dictator for life [University of San Francisco](http://www.usfca.edu/) -* [Sam Harwell](http://tunnelvisionlabs.com/) +* [Sam Harwell](http://tunnelvisionlabs.com/) Tool co-author, Java and C# target) The C++ target has been the work of the following people: * Dan McLaughlin, dan.mclaughlin@gmail.com (initial port, got code to compile) * David Sisson, dsisson@google.com (initial port, made the runtime C++ tests runnable) -* [Mike Lischke](www.soft-gems.net), mike@lischke-online.de (brought the initial port to a working library, made most runtime tests passing) +* [Mike Lischke](http://www.soft-gems.net), mike@lischke-online.de (brought the initial port to a working library, made most runtime tests passing) ## Other contributors @@ -35,16 +35,17 @@ The C++ target has been the work of the following people: ### Build + Usage Notes -The minimum C++ version to compile the ANTLR C++ runtime with is C++11. The supplied projects can built the runtime either as static or dynamic library, as both 32bit and 64bit arch. The macOS project contains a target for iOS and can also be built using cmake (instead of XCode). +The minimum C++ version to compile the ANTLR C++ runtime with is C++17. The supplied projects can built the runtime either as static or dynamic library, as both 32bit and 64bit arch. The macOS project contains a target for iOS and can also be built using cmake (instead of XCode). Include the antlr4-runtime.h umbrella header in your target application to get everything needed to use the library. If you are compiling with cmake, the minimum version required is cmake 2.8. +By default, the libraries produced by the CMake build target C++17. If you want to target a different C++ standard, you can explicitly pass the standard - e.g. `-DCMAKE_CXX_STANDARD=17`. #### Compiling on Windows with Visual Studio using he Visual Studio projects -Simply open the VS project from the runtime folder (VS 2013+) and build it. +Simply open the VS project from the runtime folder (VS 2019+) and build it. -#### Compiling on Windows using cmake with Visual Studio VS2017 and later +#### Compiling on Windows using cmake with Visual Studio VS2019 and later Use the "Open Folder" Feature from the File->Open->Folder menu to open the runtime/Cpp directory. It will automatically use the CMake description to open up a Visual Studio Solution. @@ -55,13 +56,13 @@ Either open the included XCode project and build that or use the cmake compilati Try run cmake -DCMAKE_ANDROID_NDK=/folder/of/android_ndkr17_and_above -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_API=14 -DCMAKE_ANDROID_ARCH_ABI=x86 -DCMAKE_ANDROID_STL_TYPE=c++_shared -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang -DCMAKE_BUILD_TYPE=Release /folder/antlr4_src_dir -G Ninja. #### Compiling on Linux -- cd /runtime/Cpp (this is where this readme is located) +- cd \/runtime/Cpp (this is where this readme is located) - mkdir build && mkdir run && cd build - cmake .. -DANTLR_JAR_LOCATION=full/path/to/antlr4-4.5.4-SNAPSHOT.jar -DWITH_DEMO=True - make -- DESTDIR=/runtime/Cpp/run make install +- DESTDIR=\/runtime/Cpp/run make install -If you don't want to build the demo then simply run cmake without parameters. +If you don't want to build the demo then replace the "cmake .. -DANTLR_JAR_LOCATION<...>" command in the above recipe with "cmake .." without any further parameters. There is another cmake script available in the subfolder cmake/ for those who prefer the superbuild cmake pattern. #### CMake Package support @@ -69,5 +70,3 @@ If the CMake variable 'ANTLR4_INSTALL' is set, CMake Packages will be build and They expose two packages: antlr4_runtime and antlr4_generator which can be referenced to ease up the use of the ANTLR Generator and runtime. Use and Sample can be found [here](cmake/Antlr4Package.md) - - diff --git a/runtime/Cpp/VERSION b/runtime/Cpp/VERSION index ef216a53f5..650edfe9ae 100644 --- a/runtime/Cpp/VERSION +++ b/runtime/Cpp/VERSION @@ -1 +1 @@ -4.8 +4.13.2 diff --git a/runtime/Cpp/cmake/Antlr4Package.md b/runtime/Cpp/cmake/Antlr4Package.md index 17a630379b..6abc4fd452 100644 --- a/runtime/Cpp/cmake/Antlr4Package.md +++ b/runtime/Cpp/cmake/Antlr4Package.md @@ -2,17 +2,17 @@ ## The `antlr4-generator` Package -To use the Package you must insert a +To use the Package you must insert a ```cmake find_package(antlr4-generator REQUIRED) ``` line in your `CMakeList.txt` file. -The package exposes a function `antlr4_generate` that generates the required setup to call ANTLR for a +The package exposes a function `antlr4_generate` that generates the required setup to call ANTLR for a given input file during build. The following table lists the parameters that can be used with the function: - + Argument# | Required | Default | Use ----------|-----------|---------|--- 0 | Yes | n/a | Unique target name. It is used to generate CMake Variables to reference the various outputs of the generation @@ -42,7 +42,7 @@ Output variable | Meaning ```cmake # generate parser with visitor classes. # put the classes in C++ namespace 'antlrcpptest::' - antlr4_generate( + antlr4_generate( antlrcpptest_parser ${CMAKE_CURRENT_SOURCE_DIR}/TLexer.g4 LEXER @@ -56,7 +56,7 @@ Output variable | Meaning ## The `antlr4-runtime` Package -To use the Package you must insert a +To use the Package you must insert a ```cmake find_package(antlr4-runtime REQUIRED) ``` @@ -85,7 +85,7 @@ include_directories( ${ANTLR4_INCLUDE_DIR} ) add_dependencies( Parsertest antlr4_shared ) # add runtime to project link libraries -target_link_libraries( Parsertest PRIVATE +target_link_libraries( Parsertest PRIVATE antlr4_shared) ``` @@ -94,12 +94,12 @@ target_link_libraries( Parsertest PRIVATE # Bring in the required packages find_package(antlr4-runtime REQUIRED) find_package(antlr4-generator REQUIRED) - + # Set path to generator - set(ANTLR4_JAR_LOCATION ${PROJECT_SOURCE_DIR}/thirdparty/antlr/antlr-4.8-complete.jar) - + set(ANTLR4_JAR_LOCATION ${PROJECT_SOURCE_DIR}/thirdparty/antlr/antlr-4.13.2-complete.jar) + # generate lexer - antlr4_generate( + antlr4_generate( antlrcpptest_lexer ${CMAKE_CURRENT_SOURCE_DIR}/TLexer.g4 LEXER @@ -107,9 +107,9 @@ target_link_libraries( Parsertest PRIVATE FALSE "antlrcpptest" ) - + # generate parser - antlr4_generate( + antlr4_generate( antlrcpptest_parser ${CMAKE_CURRENT_SOURCE_DIR}/TParser.g4 PARSER @@ -119,18 +119,18 @@ target_link_libraries( Parsertest PRIVATE "${ANTLR4_TOKEN_FILES_antlrcpptest_lexer}" "${ANTLR4_TOKEN_DIRECTORY_antlrcpptest_lexer}" ) - + # add directories for generated include files include_directories( ${PROJECT_BINARY_DIR} ${ANTLR4_INCLUDE_DIR} ${ANTLR4_INCLUDE_DIR_antlrcpptest_lexer} ${ANTLR4_INCLUDE_DIR_antlrcpptest_parser} ) - + # add generated source files add_executable( Parsertest main.cpp ${ANTLR4_SRC_FILES_antlrcpptest_lexer} ${ANTLR4_SRC_FILES_antlrcpptest_parser} ) - + # add required runtime library add_dependencies( Parsertest antlr4_shared ) - - target_link_libraries( Parsertest PRIVATE + + target_link_libraries( Parsertest PRIVATE antlr4_shared) - + ``` - + diff --git a/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake b/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake index db8ed6f402..3534a25850 100644 --- a/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake +++ b/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake @@ -1,5 +1,9 @@ cmake_minimum_required(VERSION 3.7) +if(POLICY CMP0114) + cmake_policy(SET CMP0114 NEW) +endif() + include(ExternalProject) set(ANTLR4_ROOT ${CMAKE_CURRENT_BINARY_DIR}/antlr4_runtime/src/antlr4_runtime) @@ -11,12 +15,16 @@ if(NOT DEFINED ANTLR4_TAG) set(ANTLR4_TAG master) endif() +# Ensure that the include dir already exists at configure time (to avoid cmake erroring +# on non-existent include dirs) +file(MAKE_DIRECTORY "${ANTLR4_INCLUDE_DIRS}") + if(${CMAKE_GENERATOR} MATCHES "Visual Studio.*") - set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/dist/$(Configuration)) + set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/runtime/$(Configuration)) elseif(${CMAKE_GENERATOR} MATCHES "Xcode.*") - set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/dist/$(CONFIGURATION)) + set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/runtime/$(CONFIGURATION)) else() - set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/dist) + set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/runtime) endif() if(MSVC) @@ -38,7 +46,7 @@ else() set(ANTLR4_SHARED_LIBRARIES ${ANTLR4_OUTPUT_DIR}/libantlr4-runtime.dll.a) set(ANTLR4_RUNTIME_LIBRARIES - ${ANTLR4_OUTPUT_DIR}/cygantlr4-runtime-4.8.dll) + ${ANTLR4_OUTPUT_DIR}/cygantlr4-runtime-4.13.2.dll) elseif(APPLE) set(ANTLR4_RUNTIME_LIBRARIES ${ANTLR4_OUTPUT_DIR}/libantlr4-runtime.dylib) @@ -88,6 +96,9 @@ if(ANTLR4_ZIP_REPOSITORY) CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DWITH_STATIC_CRT:BOOL=${ANTLR4_WITH_STATIC_CRT} + -DDISABLE_WARNINGS:BOOL=ON + # -DCMAKE_CXX_STANDARD:STRING=17 # if desired, compile the runtime with a different C++ standard + # -DCMAKE_CXX_STANDARD:STRING=${CMAKE_CXX_STANDARD} # alternatively, compile the runtime with the same C++ standard as the outer project INSTALL_COMMAND "" EXCLUDE_FROM_ALL 1) else() @@ -104,11 +115,14 @@ else() CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DWITH_STATIC_CRT:BOOL=${ANTLR4_WITH_STATIC_CRT} + -DDISABLE_WARNINGS:BOOL=ON + # -DCMAKE_CXX_STANDARD:STRING=17 # if desired, compile the runtime with a different C++ standard + # -DCMAKE_CXX_STANDARD:STRING=${CMAKE_CXX_STANDARD} # alternatively, compile the runtime with the same C++ standard as the outer project INSTALL_COMMAND "" EXCLUDE_FROM_ALL 1) endif() -# Seperate build step as rarely people want both +# Separate build step as rarely people want both set(ANTLR4_BUILD_DIR ${ANTLR4_ROOT}) if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14.0") # CMake 3.14 builds in above's SOURCE_SUBDIR when BUILD_IN_SOURCE is true @@ -131,6 +145,10 @@ add_library(antlr4_static STATIC IMPORTED) add_dependencies(antlr4_static antlr4_runtime-build_static) set_target_properties(antlr4_static PROPERTIES IMPORTED_LOCATION ${ANTLR4_STATIC_LIBRARIES}) +target_include_directories(antlr4_static + INTERFACE + ${ANTLR4_INCLUDE_DIRS} +) ExternalProject_Add_Step( antlr4_runtime @@ -148,6 +166,11 @@ add_library(antlr4_shared SHARED IMPORTED) add_dependencies(antlr4_shared antlr4_runtime-build_shared) set_target_properties(antlr4_shared PROPERTIES IMPORTED_LOCATION ${ANTLR4_RUNTIME_LIBRARIES}) +target_include_directories(antlr4_shared + INTERFACE + ${ANTLR4_INCLUDE_DIRS} +) + if(ANTLR4_SHARED_LIBRARIES) set_target_properties(antlr4_shared PROPERTIES IMPORTED_IMPLIB ${ANTLR4_SHARED_LIBRARIES}) diff --git a/runtime/Cpp/cmake/FindANTLR.cmake b/runtime/Cpp/cmake/FindANTLR.cmake index 5ff866fc2b..695cc17b47 100644 --- a/runtime/Cpp/cmake/FindANTLR.cmake +++ b/runtime/Cpp/cmake/FindANTLR.cmake @@ -2,7 +2,7 @@ find_package(Java QUIET COMPONENTS Runtime) if(NOT ANTLR_EXECUTABLE) find_program(ANTLR_EXECUTABLE - NAMES antlr.jar antlr4.jar antlr-4.jar antlr-4.8-complete.jar) + NAMES antlr.jar antlr4.jar antlr-4.jar antlr-4.13.2-complete.jar) endif() if(ANTLR_EXECUTABLE AND Java_JAVA_EXECUTABLE) @@ -14,7 +14,7 @@ if(ANTLR_EXECUTABLE AND Java_JAVA_EXECUTABLE) OUTPUT_STRIP_TRAILING_WHITESPACE) if(ANTLR_COMMAND_RESULT EQUAL 0) - string(REGEX MATCH "Version [0-9]+(\\.[0-9])*" ANTLR_VERSION ${ANTLR_COMMAND_OUTPUT}) + string(REGEX MATCH "Version [0-9]+(\\.[0-9]+)*" ANTLR_VERSION ${ANTLR_COMMAND_OUTPUT}) string(REPLACE "Version " "" ANTLR_VERSION ${ANTLR_VERSION}) else() message( diff --git a/runtime/Cpp/cmake/README.md b/runtime/Cpp/cmake/README.md index 77e9da6c64..da8e4dedfb 100644 --- a/runtime/Cpp/cmake/README.md +++ b/runtime/Cpp/cmake/README.md @@ -6,7 +6,7 @@ Here is how you can use this external project to create the antlr4cpp demo to st 1. Make a subfolder cmake 2. Copy the files in this folder to srcfolder/cmake 3. Cut below and use it to create srcfolder/CMakeLists.txt - 4. Copy main.cpp, TLexer.g4 and TParser.g4 to ./srcfolder/ from [here](https://github.com/antlr/antlr4/tree/master/runtime/Cpp/demo) + 4. Copy main.cpp, TLexer.g4 and TParser.g4 to srcfolder/ from [here](https://github.com/antlr/antlr4/tree/master/runtime/Cpp/demo) 2. Make a build folder e.g. ~/buildfolder/ 3. From the buildfolder, run `cmake ~/srcfolder; make` @@ -16,22 +16,30 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.7 FATAL_ERROR) list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) -# compiler must be 11 or 14 -set(CMAKE_CXX_STANDARD 11) +# compiler must be 17 +set(CMAKE_CXX_STANDARD 17) # required if linking to static library add_definitions(-DANTLR4CPP_STATIC) # using /MD flag for antlr4_runtime (for Visual C++ compilers only) set(ANTLR4_WITH_STATIC_CRT OFF) + +# Specify the version of the antlr4 library needed for this project. +# By default the latest version of antlr4 will be used. You can specify a +# specific, stable version by setting a repository tag value or a link +# to a zip file containing the libary source. +# set(ANTLR4_TAG 4.13.2) +# set(ANTLR4_ZIP_REPOSITORY https://github.com/antlr/antlr4/archive/refs/tags/4.13.2.zip) + # add external build for antlrcpp include(ExternalAntlr4Cpp) -# add antrl4cpp artifacts to project environment +# add antlr4cpp artifacts to project environment include_directories(${ANTLR4_INCLUDE_DIRS}) # set variable pointing to the antlr tool that supports C++ # this is not required if the jar file can be found under PATH environment -set(ANTLR_EXECUTABLE /home/user/antlr-4.8-complete.jar) +set(ANTLR_EXECUTABLE /home/user/antlr-4.13.2-complete.jar) # add macros to generate ANTLR Cpp code from grammar find_package(ANTLR REQUIRED) @@ -123,12 +131,14 @@ ANTLR4_RUNTIME_LIBRARIES - path to antlr4 shared runtime library (such as DLL, D ANTLR4_TAG - branch/tag used for building antlr4 library ``` -`ANTLR4_TAG` is set to master branch by default to keep antlr4 updated. However, it will be required to rebuild after every `clean` is called. Set `ANTLR4_TAG` to a desired commit hash value to avoid rebuilding after every `clean` and keep the build stable, at the cost of not automatically update to latest commit. +`ANTLR4_TAG` is set to master branch by default to keep the antlr4 library up to date. However, this will require a rebuild after every `clean` is called. Set `ANTLR4_TAG` to a desired commit hash value to avoid rebuilding after every `clean` and keep the build stable, at the cost of not automatically updating to latest commit. -The ANTLR C++ runtime source is downloaded from GitHub by default. However, users may specify `ANTLR4_ZIP_REPOSITORY` to list the zip file from [ANTLR downloads](http://www.antlr.org/download.html) (under *C++ Target*). This variable can list a zip file included in the project directory; this is useful for maintaining a canonical source for each new build. +By defualt the ANTLR C++ runtime source is cloned from GitHub. However, users may specify `ANTLR4_ZIP_REPOSITORY` in order to download source as a zip file from [ANTLR downloads](http://www.antlr.org/download.html) (under *C++ Target*) or other locations. For example, this variable could list a zip file included in your the project directory. This is useful for maintaining a canonical source tree for each new build. Visual C++ compiler users may want to additionally define `ANTLR4_WITH_STATIC_CRT` before including the file. Set `ANTLR4_WITH_STATIC_CRT` to true if ANTLR4 C++ runtime library should be compiled with `/MT` flag, otherwise will be compiled with `/MD` flag. This variable has a default value of `OFF`. Changing `ANTLR4_WITH_STATIC_CRT` after building the library may require reinitialization of CMake or `clean` for the library to get rebuilt. +You may need to modify your local copy of ExternalAntlr4Cpp.cmake to modify some build settings. For example, to specify the C++ standard to use when building the runtime, add `-DCMAKE_CXX_STANDARD:STRING=17` to `CMAKE_CACHE_ARGS`. + ### Examples To build and link ANTLR4 static library to a target one may call: diff --git a/runtime/Cpp/cmake/antlr4-generator.cmake.in b/runtime/Cpp/cmake/antlr4-generator.cmake.in index 5839cbe285..63996514b0 100644 --- a/runtime/Cpp/cmake/antlr4-generator.cmake.in +++ b/runtime/Cpp/cmake/antlr4-generator.cmake.in @@ -88,7 +88,7 @@ function(antlr4_generate else() set(Antlr4_BuildListenerOption "-no-listener") endif () - + if ( ( ARGC GREATER_EQUAL 5 ) AND ARGV4 ) set(Antlr4_BuildVisitorOption "-visitor") @@ -101,7 +101,7 @@ function(antlr4_generate else() set(Antlr4_BuildVisitorOption "-no-visitor") endif () - + if ( (ARGC GREATER_EQUAL 6 ) AND (NOT ${ARGV5} STREQUAL "") ) set(Antlr4_NamespaceOption "-package;${ARGV5}") @@ -109,7 +109,7 @@ function(antlr4_generate else() set(Antlr4_NamespaceOption "") endif () - + if ( (ARGC GREATER_EQUAL 7 ) AND (NOT ${ARGV6} STREQUAL "") ) set(Antlr4_AdditionalDependencies ${ARGV6}) else() @@ -157,7 +157,7 @@ function(antlr4_generate # export generated cpp files into list foreach(generated_file ${Antlr4_GeneratedTargets}) - + if (NOT CMAKE_CXX_COMPILER_ID MATCHES "MSVC") set_source_files_properties( ${generated_file} diff --git a/runtime/Cpp/cmake/antlr4-runtime.cmake.in b/runtime/Cpp/cmake/antlr4-runtime.cmake.in index 860aeb6012..697b36c628 100644 --- a/runtime/Cpp/cmake/antlr4-runtime.cmake.in +++ b/runtime/Cpp/cmake/antlr4-runtime.cmake.in @@ -5,6 +5,9 @@ set(ANTLR_VERSION @ANTLR_VERSION@) set_and_check(ANTLR4_INCLUDE_DIR "@PACKAGE_ANTLR4_INCLUDE_DIR@") set_and_check(ANTLR4_LIB_DIR "@PACKAGE_ANTLR4_LIB_DIR@") +include(CMakeFindDependencyMacro) +find_dependency(Threads) + include(${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake) check_required_components(antlr) diff --git a/runtime/Cpp/demo/Mac/antlrcpp-demo.xcodeproj/project.pbxproj b/runtime/Cpp/demo/Mac/antlrcpp-demo.xcodeproj/project.pbxproj index 5f136b03e7..fd5ff927da 100644 --- a/runtime/Cpp/demo/Mac/antlrcpp-demo.xcodeproj/project.pbxproj +++ b/runtime/Cpp/demo/Mac/antlrcpp-demo.xcodeproj/project.pbxproj @@ -7,8 +7,7 @@ objects = { /* Begin PBXBuildFile section */ - 270925AC1CDB427200522D32 /* libantlr4-runtime.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 270925A71CDB409400522D32 /* libantlr4-runtime.dylib */; }; - 270925AF1CDB428A00522D32 /* libantlr4-runtime.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 270925A91CDB409400522D32 /* libantlr4-runtime.a */; }; + 2707D9C22764C11300D99A45 /* libantlr4-runtime.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 2707D9BC2764C04C00D99A45 /* libantlr4-runtime.dylib */; }; 270925B11CDB455B00522D32 /* TLexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 27A23EA11CC2A8D60036D8A3 /* TLexer.cpp */; }; 2747A7131CA6C46C0030247B /* InputHandlingTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 2747A7121CA6C46C0030247B /* InputHandlingTests.mm */; }; 274FC6D91CA96B6C008D4374 /* MiscClassTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 274FC6D81CA96B6C008D4374 /* MiscClassTests.mm */; }; @@ -22,40 +21,26 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ - 270925A61CDB409400522D32 /* PBXContainerItemProxy */ = { + 2707D9BB2764C04C00D99A45 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; - containerPortal = 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */; + containerPortal = 2707D9B52764C04C00D99A45 /* antlrcpp.xcodeproj */; proxyType = 2; remoteGlobalIDString = 37D727AA1867AF1E007B6D10; - remoteInfo = antlrcpp; + remoteInfo = antlr4; }; - 270925A81CDB409400522D32 /* PBXContainerItemProxy */ = { + 2707D9BD2764C04C00D99A45 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; - containerPortal = 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */; + containerPortal = 2707D9B52764C04C00D99A45 /* antlrcpp.xcodeproj */; proxyType = 2; remoteGlobalIDString = 37C147171B4D5A04008EDDDB; - remoteInfo = antlrcpp_static; + remoteInfo = antlr4_static; }; - 270925AA1CDB426900522D32 /* PBXContainerItemProxy */ = { + 2707D9BF2764C04C00D99A45 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; - containerPortal = 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */; - proxyType = 1; - remoteGlobalIDString = 37D727A91867AF1E007B6D10; - remoteInfo = antlrcpp; - }; - 270925AD1CDB428400522D32 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */; - proxyType = 1; - remoteGlobalIDString = 37C147161B4D5A04008EDDDB; - remoteInfo = antlrcpp_static; - }; - 273DC2BC1CDB619900DB7B2B /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */; + containerPortal = 2707D9B52764C04C00D99A45 /* antlrcpp.xcodeproj */; proxyType = 2; remoteGlobalIDString = 270C67F01CDB4F1E00116E17; - remoteInfo = antlrcpp_ios; + remoteInfo = antlr4_ios; }; /* End PBXContainerItemProxy section */ @@ -72,7 +57,7 @@ /* End PBXCopyFilesBuildPhase section */ /* Begin PBXFileReference section */ - 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = antlrcpp.xcodeproj; path = ../../runtime/antlrcpp.xcodeproj; sourceTree = ""; }; + 2707D9B52764C04C00D99A45 /* antlrcpp.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = antlrcpp.xcodeproj; path = ../../runtime/antlrcpp.xcodeproj; sourceTree = ""; }; 2747A7121CA6C46C0030247B /* InputHandlingTests.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = InputHandlingTests.mm; sourceTree = ""; wrapsLines = 0; }; 274FC6D81CA96B6C008D4374 /* MiscClassTests.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MiscClassTests.mm; sourceTree = ""; wrapsLines = 0; }; 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; }; @@ -102,7 +87,7 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 270925AC1CDB427200522D32 /* libantlr4-runtime.dylib in Frameworks */, + 2707D9C22764C11300D99A45 /* libantlr4-runtime.dylib in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -110,23 +95,29 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 270925AF1CDB428A00522D32 /* libantlr4-runtime.a in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ - 270925A21CDB409400522D32 /* Products */ = { + 2707D9B62764C04C00D99A45 /* Products */ = { isa = PBXGroup; children = ( - 270925A71CDB409400522D32 /* libantlr4-runtime.dylib */, - 270925A91CDB409400522D32 /* libantlr4-runtime.a */, - 273DC2BD1CDB619900DB7B2B /* antlr4_ios.framework */, + 2707D9BC2764C04C00D99A45 /* libantlr4-runtime.dylib */, + 2707D9BE2764C04C00D99A45 /* libantlr4-runtime.a */, + 2707D9C02764C04C00D99A45 /* antlr4_ios.framework */, ); name = Products; sourceTree = ""; }; + 2707D9C12764C11300D99A45 /* Frameworks */ = { + isa = PBXGroup; + children = ( + ); + name = Frameworks; + sourceTree = ""; + }; 27874F221CCBB34200AF1C53 /* Linked Frameworks */ = { isa = PBXGroup; children = ( @@ -167,12 +158,13 @@ 37D727A11867AF1E007B6D10 = { isa = PBXGroup; children = ( - 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */, + 2707D9B52764C04C00D99A45 /* antlrcpp.xcodeproj */, 27C66A681C9591280021E494 /* antlr4-cpp-demo */, 37F135691B4AC02800E0CACF /* antlrcpp Tests */, 27C66A5C1C958EB50021E494 /* generated */, 27874F221CCBB34200AF1C53 /* Linked Frameworks */, 37D727AB1867AF1E007B6D10 /* Products */, + 2707D9C12764C11300D99A45 /* Frameworks */, ); sourceTree = ""; }; @@ -219,7 +211,6 @@ buildRules = ( ); dependencies = ( - 270925AB1CDB426900522D32 /* PBXTargetDependency */, ); name = "antlr4-cpp-demo"; productName = "antlr4-cpp-demo"; @@ -237,7 +228,6 @@ buildRules = ( ); dependencies = ( - 270925AE1CDB428400522D32 /* PBXTargetDependency */, ); name = "antlrcpp Tests"; productName = "antlrcpp Tests"; @@ -266,6 +256,7 @@ developmentRegion = English; hasScannedForEncodings = 0; knownRegions = ( + English, en, ); mainGroup = 37D727A11867AF1E007B6D10; @@ -273,8 +264,8 @@ projectDirPath = ""; projectReferences = ( { - ProductGroup = 270925A21CDB409400522D32 /* Products */; - ProjectRef = 270925A11CDB409400522D32 /* antlrcpp.xcodeproj */; + ProductGroup = 2707D9B62764C04C00D99A45 /* Products */; + ProjectRef = 2707D9B52764C04C00D99A45 /* antlrcpp.xcodeproj */; }, ); projectRoot = ""; @@ -286,25 +277,25 @@ /* End PBXProject section */ /* Begin PBXReferenceProxy section */ - 270925A71CDB409400522D32 /* libantlr4-runtime.dylib */ = { + 2707D9BC2764C04C00D99A45 /* libantlr4-runtime.dylib */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.dylib"; path = "libantlr4-runtime.dylib"; - remoteRef = 270925A61CDB409400522D32 /* PBXContainerItemProxy */; + remoteRef = 2707D9BB2764C04C00D99A45 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - 270925A91CDB409400522D32 /* libantlr4-runtime.a */ = { + 2707D9BE2764C04C00D99A45 /* libantlr4-runtime.a */ = { isa = PBXReferenceProxy; fileType = archive.ar; path = "libantlr4-runtime.a"; - remoteRef = 270925A81CDB409400522D32 /* PBXContainerItemProxy */; + remoteRef = 2707D9BD2764C04C00D99A45 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - 273DC2BD1CDB619900DB7B2B /* antlr4_ios.framework */ = { + 2707D9C02764C04C00D99A45 /* antlr4_ios.framework */ = { isa = PBXReferenceProxy; fileType = wrapper.framework; path = antlr4_ios.framework; - remoteRef = 273DC2BC1CDB619900DB7B2B /* PBXContainerItemProxy */; + remoteRef = 2707D9BF2764C04C00D99A45 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; /* End PBXReferenceProxy section */ @@ -364,19 +355,6 @@ }; /* End PBXSourcesBuildPhase section */ -/* Begin PBXTargetDependency section */ - 270925AB1CDB426900522D32 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - name = antlrcpp; - targetProxy = 270925AA1CDB426900522D32 /* PBXContainerItemProxy */; - }; - 270925AE1CDB428400522D32 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - name = antlrcpp_static; - targetProxy = 270925AD1CDB428400522D32 /* PBXContainerItemProxy */; - }; -/* End PBXTargetDependency section */ - /* Begin XCBuildConfiguration section */ 27C66A6C1C9591280021E494 /* Debug */ = { isa = XCBuildConfiguration; @@ -412,8 +390,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; - CLANG_CXX_LIBRARY = "libc++"; + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_OBJC_ARC = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; CLANG_WARN_BOOL_CONVERSION = YES; @@ -464,7 +441,7 @@ ../../runtime/src/atn, ../../runtime/src, ); - MACOSX_DEPLOYMENT_TARGET = 10.9; + MACOSX_DEPLOYMENT_TARGET = 11.1; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; }; @@ -474,8 +451,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; - CLANG_CXX_LIBRARY = "libc++"; + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_OBJC_ARC = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; CLANG_WARN_BOOL_CONVERSION = YES; @@ -520,7 +496,7 @@ ../../runtime/src/atn, ../../runtime/src, ); - MACOSX_DEPLOYMENT_TARGET = 10.9; + MACOSX_DEPLOYMENT_TARGET = 11.1; SDKROOT = macosx; }; name = Release; diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj deleted file mode 100644 index f004fb06ce..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj +++ /dev/null @@ -1,362 +0,0 @@ - - - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Debug Static - Win32 - - - Debug Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - - {24EC5104-7402-4C76-B66B-27ADBE062D68} - Win32Proj - antlr4cppdemo - antlr4cpp-demo - 8.1 - - - - Application - true - v140 - Unicode - - - Application - true - v140 - Unicode - - - Application - true - v140 - Unicode - - - Application - true - v140 - Unicode - - - Application - false - v140 - true - Unicode - - - Application - false - v140 - true - Unicode - - - Application - false - v140 - true - Unicode - - - Application - false - v140 - true - Unicode - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - - - - Level3 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - false - - - Console - true - - - - - - - Level3 - Disabled - %(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - false - - - Console - true - - - - - - - Level3 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - false - - - Console - true - - - - - - - Level3 - Disabled - %(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - false - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - %(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - %(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - true - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - - - {a9762991-1b57-4dce-90c0-ee42b96947be} - - - - - - \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj.filters b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj.filters deleted file mode 100644 index ed56184124..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj.filters +++ /dev/null @@ -1,63 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {ef397b7b-1192-4d44-93ed-fadaec7622e8} - - - - - Source Files - - - generated - - - generated - - - generated - - - generated - - - generated - - - generated - - - - - generated - - - generated - - - generated - - - generated - - - generated - - - generated - - - \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj new file mode 100644 index 0000000000..ef29a4ecce --- /dev/null +++ b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj @@ -0,0 +1,365 @@ + + + + + Debug DLL + Win32 + + + Debug DLL + x64 + + + Debug Static + Win32 + + + Debug Static + x64 + + + Release DLL + Win32 + + + Release DLL + x64 + + + Release Static + Win32 + + + Release Static + x64 + + + + {24EC5104-7402-4C76-B66B-27ADBE062D68} + Win32Proj + antlr4cppdemo + antlr4cpp-demo + 10.0 + + + + Application + true + v143 + Unicode + + + Application + true + v143 + Unicode + + + Application + true + v143 + Unicode + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + Application + false + v143 + true + Unicode + + + Application + false + v143 + true + Unicode + + + Application + false + v143 + true + Unicode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + false + + + Console + true + + + + + + + Level3 + Disabled + %(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + false + + + Console + true + + + + + + + Level3 + Disabled + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + false + stdcpp17 + + + Console + true + + + + + + + Level3 + Disabled + %(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + false + stdcpp17 + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + %(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + %(PreprocessorDefinitions) + true + $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) + + + 4251 + true + + + Console + true + true + true + + + + + + + + + + + + + + + + + + + + + + + {a9762991-1b57-4dce-90c0-ee42b96947be} + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj.filters b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj.filters new file mode 100644 index 0000000000..e191b9354b --- /dev/null +++ b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj.filters @@ -0,0 +1,66 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + {ef397b7b-1192-4d44-93ed-fadaec7622e8} + + + + + Source Files + + + generated + + + generated + + + generated + + + generated + + + generated + + + generated + + + + + generated + + + generated + + + generated + + + generated + + + generated + + + generated + + + Header Files + + + \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj deleted file mode 100644 index ec6240de0e..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj +++ /dev/null @@ -1,349 +0,0 @@ - - - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Debug Static - Win32 - - - Debug Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - - {24EC5104-7402-4C76-B66B-27ADBE062D68} - Win32Proj - antlr4cppdemo - antlr4cpp-demo - - - - Application - true - v120 - Unicode - - - Application - true - v120 - Unicode - - - Application - true - v120 - Unicode - - - Application - true - v120 - Unicode - - - Application - false - v120 - true - Unicode - - - Application - false - v120 - true - Unicode - - - Application - false - v120 - true - Unicode - - - Application - false - v120 - true - Unicode - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - - - {a9762991-1b57-4dce-90c0-ee42b96947be} - - - - - - \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj.filters b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj.filters deleted file mode 100644 index ed56184124..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj.filters +++ /dev/null @@ -1,63 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {ef397b7b-1192-4d44-93ed-fadaec7622e8} - - - - - Source Files - - - generated - - - generated - - - generated - - - generated - - - generated - - - generated - - - - - generated - - - generated - - - generated - - - generated - - - generated - - - generated - - - \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp index fa470e5ed6..b620ad018b 100644 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp +++ b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp @@ -25,17 +25,15 @@ using namespace antlr4; int main(int argc, const char * argv[]) { - ANTLRInputStream input("🍴 = 🍐 + \"😎\";(((x * π))) * µ + ∰; a + (x * (y ? 0 : 1) + z);"); + ANTLRInputStream input("a = b + \"c\";(((x * d))) * e + f; a + (x * (y ? 0 : 1) + z);"); TLexer lexer(&input); CommonTokenStream tokens(&lexer); TParser parser(&tokens); tree::ParseTree *tree = parser.main(); - std::wstring s = antlrcpp::s2ws(tree->toStringTree(&parser)) + L"\n"; - - OutputDebugString(s.data()); // Only works properly since VS 2015. - //std::wcout << "Parse Tree: " << s << std::endl; Unicode output in the console is very limited. + auto s = tree->toStringTree(&parser); + std::cout << "Parse Tree: " << s << std::endl; return 0; } diff --git a/runtime/Cpp/demo/Windows/antlr4cpp-vs2013.sln b/runtime/Cpp/demo/Windows/antlr4cpp-vs2013.sln deleted file mode 100644 index 931aeb3eb2..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4cpp-vs2013.sln +++ /dev/null @@ -1,58 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2013 -VisualStudioVersion = 12.0.40629.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-demo", "antlr4-cpp-demo\antlr4-cpp-demo.vcxproj", "{24EC5104-7402-4C76-B66B-27ADBE062D68}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-vs2013", "..\..\runtime\antlr4cpp-vs2013.vcxproj", "{A9762991-1B57-4DCE-90C0-EE42B96947BE}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug DLL|Win32 = Debug DLL|Win32 - Debug DLL|x64 = Debug DLL|x64 - Debug Static|Win32 = Debug Static|Win32 - Debug Static|x64 = Debug Static|x64 - Release DLL|Win32 = Release DLL|Win32 - Release DLL|x64 = Release DLL|x64 - Release Static|Win32 = Release Static|Win32 - Release Static|x64 = Release Static|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|Win32.Build.0 = Debug DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|Win32.ActiveCfg = Debug Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|Win32.Build.0 = Debug Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.Build.0 = Debug Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|Win32.ActiveCfg = Release DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|Win32.Build.0 = Release DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.Build.0 = Release DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|Win32.ActiveCfg = Release Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|Win32.Build.0 = Release Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.ActiveCfg = Release Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.Build.0 = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|Win32.Build.0 = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|Win32.ActiveCfg = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|Win32.Build.0 = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.Build.0 = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|Win32.ActiveCfg = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|Win32.Build.0 = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.Build.0 = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|Win32.ActiveCfg = Release Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|Win32.Build.0 = Release Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.ActiveCfg = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.Build.0 = Release Static|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/runtime/Cpp/demo/Windows/antlr4cpp-vs2015.sln b/runtime/Cpp/demo/Windows/antlr4cpp-vs2015.sln deleted file mode 100644 index 6bf253d08d..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4cpp-vs2015.sln +++ /dev/null @@ -1,58 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-vs2015", "..\..\runtime\antlr4cpp-vs2015.vcxproj", "{A9762991-1B57-4DCE-90C0-EE42B96947BE}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-demo", "antlr4-cpp-demo\antlr4-cpp-demo-vs2015.vcxproj", "{24EC5104-7402-4C76-B66B-27ADBE062D68}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug DLL|x64 = Debug DLL|x64 - Debug DLL|x86 = Debug DLL|x86 - Debug Static|x64 = Debug Static|x64 - Debug Static|x86 = Debug Static|x86 - Release DLL|x64 = Release DLL|x64 - Release DLL|x86 = Release DLL|x86 - Release Static|x64 = Release Static|x64 - Release Static|x86 = Release Static|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x86.Build.0 = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.Build.0 = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x86.ActiveCfg = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x86.Build.0 = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.Build.0 = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x86.ActiveCfg = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x86.Build.0 = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.ActiveCfg = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.Build.0 = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x86.ActiveCfg = Release Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x86.Build.0 = Release Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x86.Build.0 = Debug DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.Build.0 = Debug Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x86.ActiveCfg = Debug Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x86.Build.0 = Debug Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.Build.0 = Release DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x86.ActiveCfg = Release DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x86.Build.0 = Release DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.ActiveCfg = Release Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.Build.0 = Release Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x86.ActiveCfg = Release Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x86.Build.0 = Release Static|Win32 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/runtime/Cpp/demo/Windows/antlr4cpp-vs2022.sln b/runtime/Cpp/demo/Windows/antlr4cpp-vs2022.sln new file mode 100644 index 0000000000..bcda88e1e6 --- /dev/null +++ b/runtime/Cpp/demo/Windows/antlr4cpp-vs2022.sln @@ -0,0 +1,61 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.32014.148 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-vs2022", "..\..\runtime\antlr4cpp-vs2022.vcxproj", "{52618D4B-6EC4-49AD-8B83-52686244E8F3}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-demo", "antlr4-cpp-demo\antlr4-cpp-demo-vs2022.vcxproj", "{24EC5104-7402-4C76-B66B-27ADBE062D68}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug DLL|x64 = Debug DLL|x64 + Debug DLL|x86 = Debug DLL|x86 + Debug Static|x64 = Debug Static|x64 + Debug Static|x86 = Debug Static|x86 + Release DLL|x64 = Release DLL|x64 + Release DLL|x86 = Release DLL|x86 + Release Static|x64 = Release Static|x64 + Release Static|x86 = Release Static|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x64.Build.0 = Debug DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x86.Build.0 = Debug DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x64.ActiveCfg = Debug Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x64.Build.0 = Debug Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x86.ActiveCfg = Debug Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x86.Build.0 = Debug Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x64.ActiveCfg = Release DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x64.Build.0 = Release DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x86.ActiveCfg = Release DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x86.Build.0 = Release DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x64.ActiveCfg = Release Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x64.Build.0 = Release Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x86.ActiveCfg = Release Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x86.Build.0 = Release Static|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.Build.0 = Debug DLL|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x86.Build.0 = Debug DLL|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.ActiveCfg = Debug Static|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.Build.0 = Debug Static|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x86.ActiveCfg = Debug Static|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x86.Build.0 = Debug Static|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.ActiveCfg = Release DLL|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.Build.0 = Release DLL|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x86.ActiveCfg = Release DLL|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x86.Build.0 = Release DLL|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.ActiveCfg = Release Static|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.Build.0 = Release Static|x64 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x86.ActiveCfg = Release Static|Win32 + {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x86.Build.0 = Release Static|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {93CE9298-807C-4EAD-B1E6-7109DD1A78FA} + EndGlobalSection +EndGlobal diff --git a/runtime/Cpp/demo/generate.cmd b/runtime/Cpp/demo/generate.cmd index a7130c2b13..86b7d118d5 100644 --- a/runtime/Cpp/demo/generate.cmd +++ b/runtime/Cpp/demo/generate.cmd @@ -6,7 +6,7 @@ :: Download the ANLTR jar and place it in the same folder as this script (or adjust the LOCATION var accordingly). -set LOCATION=antlr-4.8-complete.jar +set LOCATION=antlr-4.13.2-complete.jar java -jar %LOCATION% -Dlanguage=Cpp -listener -visitor -o generated/ -package antlrcpptest TLexer.g4 TParser.g4 ::java -jar %LOCATION% -Dlanguage=Cpp -listener -visitor -o generated/ -package antlrcpptest -XdbgST TLexer.g4 TParser.g4 ::java -jar %LOCATION% -Dlanguage=Java -listener -visitor -o generated/ -package antlrcpptest TLexer.g4 TParser.g4 diff --git a/runtime/Cpp/deploy-macos.sh b/runtime/Cpp/deploy-macos.sh index 55528470f7..f8f4041071 100755 --- a/runtime/Cpp/deploy-macos.sh +++ b/runtime/Cpp/deploy-macos.sh @@ -1,16 +1,17 @@ #!/bin/bash # Clean left overs from previous builds if there are any -rm -f -R antlr4-runtime build lib 2> /dev/null -rm antlr4-cpp-runtime-macos.zip 2> /dev/null +rm -rf antlr4-runtime build lib +rm -f antlr4-cpp-runtime-macos.zip # Binaries -xcodebuild -project runtime/antlrcpp.xcodeproj -target antlr4 -configuration Release -xcodebuild -project runtime/antlrcpp.xcodeproj -target antlr4_static -configuration Release -rm -f -R lib +cmake . -D CMAKE_OSX_ARCHITECTURES="arm64; x86_64" -DCMAKE_BUILD_TYPE=Release &> /dev/null +make -j 8 + +rm -rf lib mkdir lib -mv runtime/build/Release/libantlr4-runtime.a lib/ -mv runtime/build/Release/libantlr4-runtime.dylib lib/ +cp runtime/libantlr4-runtime.dylib lib +cp runtime/libantlr4-runtime.a lib # Headers rm -f -R antlr4-runtime diff --git a/runtime/Cpp/deploy-source.sh b/runtime/Cpp/deploy-source.sh index d079821aa7..fc2fa90a00 100755 --- a/runtime/Cpp/deploy-source.sh +++ b/runtime/Cpp/deploy-source.sh @@ -3,7 +3,8 @@ # Zip it rm -f antlr4-cpp-runtime-source.zip zip -r antlr4-cpp-runtime-source.zip "README.md" "cmake" "demo" "runtime" "CMakeLists.txt" "deploy-macos.sh" "deploy-source.sh" "deploy-windows.cmd" "VERSION" \ - -X -x "*.DS_Store*" "antlrcpp.xcodeproj/xcuserdata/*" "*Build*" "*DerivedData*" "*.jar" "demo/generated/*" "*.vscode*" "runtime/build/*" + -X -x "*.DS_Store*" "antlrcpp.xcodeproj/xcuserdata/*" "*Build*" "*DerivedData*" "*.jar" "demo/generated/*" "*.vscode*" "runtime/build/*" \ + "runtime/*.dylib" "*.a" "runtime/thirdparty/*" "runtime/CMakeFiles/*" # Add the license file from the ANTLR root as well. pushd ../../ diff --git a/runtime/Cpp/deploy-windows.cmd b/runtime/Cpp/deploy-windows.cmd index 8fc22ab5b1..0a7b3564c0 100644 --- a/runtime/Cpp/deploy-windows.cmd +++ b/runtime/Cpp/deploy-windows.cmd @@ -8,58 +8,58 @@ if exist bin rmdir /S /Q runtime\bin if exist obj rmdir /S /Q runtime\obj if exist lib rmdir /S /Q lib if exist antlr4-runtime rmdir /S /Q antlr4-runtime -if exist antlr4-cpp-runtime-vs2017.zip erase antlr4-cpp-runtime-vs2017.zip if exist antlr4-cpp-runtime-vs2019.zip erase antlr4-cpp-runtime-vs2019.zip +if exist antlr4-cpp-runtime-vs2022.zip erase antlr4-cpp-runtime-vs2022.zip rem Headers echo Copying header files ... xcopy runtime\src\*.h antlr4-runtime\ /s /q rem Binaries -rem VS 2017 disabled by default. Change the X to a C to enable it. -if exist "X:\Program Files (x86)\Microsoft Visual Studio\2017\%1\Common7\Tools\VsDevCmd.bat" ( +rem VS 2019 disabled by default. Change the X to a C to enable it. +if exist "X:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" ( echo. - - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\%1\Common7\Tools\VsDevCmd.bat" + + call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" pushd runtime - msbuild antlr4cpp-vs2017.vcxproj /p:configuration="Release DLL" /p:platform=Win32 - msbuild antlr4cpp-vs2017.vcxproj /p:configuration="Release DLL" /p:platform=x64 + msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=Win32 + msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=x64 popd - - 7z a antlr4-cpp-runtime-vs2017.zip antlr4-runtime + + 7z a antlr4-cpp-runtime-vs2019.zip antlr4-runtime xcopy runtime\bin\*.dll lib\ /s xcopy runtime\bin\*.lib lib\ /s - 7z a antlr4-cpp-runtime-vs2017.zip lib - + 7z a antlr4-cpp-runtime-vs2019.zip lib + rmdir /S /Q lib rmdir /S /Q runtime\bin rmdir /S /Q runtime\obj - - rem if exist antlr4-cpp-runtime-vs2017.zip copy antlr4-cpp-runtime-vs2017.zip ~/antlr/sites/website-antlr4/download + + rem if exist antlr4-cpp-runtime-vs2019.zip copy antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download ) -set VCTargetsPath=C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\MSBuild\Microsoft\VC\v160\ -if exist "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" ( +set VCTargetsPath=C:\Program Files\Microsoft Visual Studio\2022\%1\MSBuild\Microsoft\VC\v170\ +if exist "C:\Program Files\Microsoft Visual Studio\2022\%1\Common7\Tools\VsDevCmd.bat" ( echo. - call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" + call "C:\Program Files\Microsoft Visual Studio\2022\%1\Common7\Tools\VsDevCmd.bat" pushd runtime - msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=Win32 - msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=x64 + msbuild antlr4cpp-vs2022.vcxproj /p:configuration="Release DLL" /p:platform=Win32 + msbuild antlr4cpp-vs2022.vcxproj /p:configuration="Release DLL" /p:platform=x64 popd - - 7z a antlr4-cpp-runtime-vs2019.zip antlr4-runtime + + 7z a antlr4-cpp-runtime-vs2022.zip antlr4-runtime xcopy runtime\bin\*.dll lib\ /s xcopy runtime\bin\*.lib lib\ /s - 7z a antlr4-cpp-runtime-vs2019.zip lib - + 7z a antlr4-cpp-runtime-vs2022.zip lib + rmdir /S /Q lib rmdir /S /Q runtime\bin rmdir /S /Q runtime\obj - - rem if exist antlr4-cpp-runtime-vs2019.zip copy antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download + + rem if exist antlr4-cpp-runtime-vs2022.zip copy antlr4-cpp-runtime-vs2022.zip ~/antlr/sites/website-antlr4/download ) rmdir /S /Q antlr4-runtime @@ -70,7 +70,7 @@ goto end :Usage -echo This script builds Visual Studio 2017 and/or 2019 libraries of the ANTLR4 runtime. +echo This script builds Visual Studio 2019 and/or 2022 libraries of the ANTLR4 runtime. echo You have to specify the type of your VS installation (Community, Professional etc.) to construct echo the correct build tools path. echo. diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index 2c5e7376f9..c24302d210 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -1,8 +1,19 @@ +option(ANTLR_BUILD_CPP_TESTS "Build C++ tests." ON) +option(TRACE_ATN "Trace ATN simulation" OFF) +option(ANTLR_BUILD_SHARED "Build the shared library of the ANTLR runtime" ON) +option(ANTLR_BUILD_STATIC "Build the static library of the ANTLR runtime" ON) -include_directories( +if (NOT ANTLR_BUILD_SHARED AND NOT ANTLR_BUILD_STATIC) + message(FATAL_ERROR "Options ANTLR_BUILD_SHARED and ANTLR_BUILD_STATIC can't both be OFF") +endif() + +set(libantlrcpp_INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}/antlr4-runtime") + +set(libantlrcpp_INCLUDE_DIRS ${PROJECT_SOURCE_DIR}/runtime/src ${PROJECT_SOURCE_DIR}/runtime/src/atn ${PROJECT_SOURCE_DIR}/runtime/src/dfa + ${PROJECT_SOURCE_DIR}/runtime/src/internal ${PROJECT_SOURCE_DIR}/runtime/src/misc ${PROJECT_SOURCE_DIR}/runtime/src/support ${PROJECT_SOURCE_DIR}/runtime/src/tree @@ -15,6 +26,7 @@ file(GLOB libantlrcpp_SRC "${PROJECT_SOURCE_DIR}/runtime/src/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/atn/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/dfa/*.cpp" + "${PROJECT_SOURCE_DIR}/runtime/src/internal/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/misc/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/support/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/tree/*.cpp" @@ -22,26 +34,78 @@ file(GLOB libantlrcpp_SRC "${PROJECT_SOURCE_DIR}/runtime/src/tree/xpath/*.cpp" ) -add_library(antlr4_shared SHARED ${libantlrcpp_SRC}) -add_library(antlr4_static STATIC ${libantlrcpp_SRC}) +if (ANTLR_BUILD_SHARED) + add_library(antlr4_shared SHARED ${libantlrcpp_SRC}) + target_include_directories(antlr4_shared PUBLIC + "$" + "$") +endif() +if (ANTLR_BUILD_STATIC) + add_library(antlr4_static STATIC ${libantlrcpp_SRC}) + target_include_directories(antlr4_static PUBLIC + "$" + "$") +endif() -set(LIB_OUTPUT_DIR "${CMAKE_HOME_DIRECTORY}/dist") # put generated libraries here. -message(STATUS "Output libraries to ${LIB_OUTPUT_DIR}") +if (CMAKE_HOST_UNIX) + # Make sure to link against threads (pthreads) library in order to be able to + # make use of std::call_once in the code without producing runtime errors + # (see also https://github.com/antlr/antlr4/issues/3708 and/or https://stackoverflow.com/q/51584960). + find_package(Threads REQUIRED) -# make sure 'make' works fine even if ${LIB_OUTPUT_DIR} is deleted. -add_custom_target(make_lib_output_dir ALL - COMMAND ${CMAKE_COMMAND} -E make_directory ${LIB_OUTPUT_DIR} - ) + if (TARGET antlr4_shared) + target_link_libraries(antlr4_shared Threads::Threads) + endif() + if (TARGET antlr4_static) + target_link_libraries(antlr4_static Threads::Threads) + endif() +endif() + +IF(TRACE_ATN) + ADD_DEFINITIONS(-DTRACE_ATN_SIM=1) +ENDIF(TRACE_ATN) -add_dependencies(antlr4_shared make_lib_output_dir) -add_dependencies(antlr4_static make_lib_output_dir) +if (ANTLR_BUILD_CPP_TESTS) + include(FetchContent) -if(CMAKE_SYSTEM_NAME MATCHES "Linux") - target_link_libraries(antlr4_shared ${UUID_LIBRARIES}) - target_link_libraries(antlr4_static ${UUID_LIBRARIES}) -elseif(APPLE) - target_link_libraries(antlr4_shared ${COREFOUNDATION_LIBRARY}) - target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY}) + FetchContent_Declare( + googletest + URL https://github.com/google/googletest/archive/refs/tags/v1.16.0.zip + ) + + if(WITH_STATIC_CRT) + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + endif() + + FetchContent_MakeAvailable(googletest) + + file(GLOB libantlrcpp_TESTS + "${PROJECT_SOURCE_DIR}/runtime/tests/*.cpp" + ) + + add_executable( + antlr4_tests + ${libantlrcpp_TESTS} + ) + + target_link_libraries( + antlr4_tests + $,antlr4_static,antlr4_shared> + gtest_main + ) + + include(GoogleTest) + + gtest_discover_tests(antlr4_tests) +endif() + +if(APPLE) + if (TARGET antlr4_shared) + target_link_libraries(antlr4_shared ${COREFOUNDATION_LIBRARY}) + endif() + if (TARGET antlr4_static) + target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY}) + endif() endif() if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") @@ -51,68 +115,85 @@ else() endif() -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") +if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-dollar-in-identifier-extension -Wno-four-char-constants") -elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Intel") +elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-multichar") endif() set(extra_share_compile_flags "") set(extra_static_compile_flags "") -if(WIN32) - set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS") - set(extra_static_compile_flags "-DANTLR4CPP_STATIC") -endif(WIN32) -if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - if(WITH_STATIC_CRT) - target_compile_options(antlr4_shared PRIVATE "/MT$<$:d>") - target_compile_options(antlr4_static PRIVATE "/MT$<$:d>") - else() - target_compile_options(antlr4_shared PRIVATE "/MD$<$:d>") - target_compile_options(antlr4_static PRIVATE "/MD$<$:d>") +set(static_lib_suffix "") + +if (WIN32) + set(static_lib_suffix "-static") + if (TARGET antlr4_shared) + target_compile_definitions(antlr4_shared PUBLIC ANTLR4CPP_EXPORTS) + endif() + if (TARGET antlr4_static) + target_compile_definitions(antlr4_static PUBLIC ANTLR4CPP_STATIC) + endif() + if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") + set(extra_share_compile_flags "-MP /wd4251") + set(extra_static_compile_flags "-MP") endif() endif() -set(static_lib_suffix "") -if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - set(static_lib_suffix "-static") +if (TARGET antlr4_shared) + set_target_properties(antlr4_shared + PROPERTIES VERSION ${ANTLR_VERSION} + SOVERSION ${ANTLR_VERSION} + OUTPUT_NAME antlr4-runtime + COMPILE_FLAGS "${disabled_compile_warnings} ${extra_share_compile_flags}") endif() -if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS -MP /wd4251") - set(extra_static_compile_flags "-DANTLR4CPP_STATIC -MP") +if (TARGET antlr4_static) + set_target_properties(antlr4_static + PROPERTIES VERSION ${ANTLR_VERSION} + SOVERSION ${ANTLR_VERSION} + OUTPUT_NAME "antlr4-runtime${static_lib_suffix}" + COMPILE_PDB_NAME "antlr4-runtime${static_lib_suffix}" + COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}") endif() -set_target_properties(antlr4_shared - PROPERTIES VERSION ${ANTLR_VERSION} - SOVERSION ${ANTLR_VERSION} - OUTPUT_NAME antlr4-runtime - LIBRARY_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} - # TODO: test in windows. DLL is treated as runtime. - # see https://cmake.org/cmake/help/v3.0/prop_tgt/LIBRARY_OUTPUT_DIRECTORY.html - RUNTIME_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} - ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} - COMPILE_FLAGS "${disabled_compile_warnings} ${extra_share_compile_flags}") - -set_target_properties(antlr4_static - PROPERTIES VERSION ${ANTLR_VERSION} - SOVERSION ${ANTLR_VERSION} - OUTPUT_NAME "antlr4-runtime${static_lib_suffix}" - ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} - COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}") - -install(TARGETS antlr4_shared - DESTINATION lib - EXPORT antlr4-targets) -install(TARGETS antlr4_static - DESTINATION lib - EXPORT antlr4-targets) - -install(DIRECTORY "${PROJECT_SOURCE_DIR}/runtime/src/" - DESTINATION "include/antlr4-runtime" - COMPONENT dev - FILES_MATCHING PATTERN "*.h" - ) +if (ANTLR_BUILD_CPP_TESTS) + # Copy the generated binaries to dist folder (required by test suite) + if (TARGET antlr4_shared) + add_custom_command( + TARGET antlr4_shared + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_HOME_DIRECTORY}/dist + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ ${CMAKE_HOME_DIRECTORY}/dist + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ ${CMAKE_HOME_DIRECTORY}/dist) + endif() + + if (TARGET antlr4_static) + add_custom_command( + TARGET antlr4_static + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_HOME_DIRECTORY}/dist + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ ${CMAKE_HOME_DIRECTORY}/dist) + endif() +endif() +if (TARGET antlr4_shared) + install(TARGETS antlr4_shared + EXPORT antlr4-targets + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) +endif() +if (TARGET antlr4_static) + install(TARGETS antlr4_static + EXPORT antlr4-targets + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) +endif() +install(DIRECTORY "${PROJECT_SOURCE_DIR}/runtime/src/" + DESTINATION "${libantlrcpp_INCLUDE_INSTALL_DIR}" + COMPONENT dev + FILES_MATCHING PATTERN "*.h" + ) diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj deleted file mode 100644 index 47377c18af..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj +++ /dev/null @@ -1,637 +0,0 @@ - - - - - Debug Static - Win32 - - - Debug Static - x64 - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - - {229A61DC-1207-4E4E-88B0-F4CB7205672D} - Win32Proj - antlr4cpp - - - - DynamicLibrary - true - Unicode - v120 - - - StaticLibrary - true - Unicode - v120 - - - DynamicLibrary - true - Unicode - v120 - - - StaticLibrary - true - Unicode - v120 - - - DynamicLibrary - false - true - Unicode - v120 - - - StaticLibrary - false - true - Unicode - v120 - - - DynamicLibrary - false - true - Unicode - v120 - - - StaticLibrary - false - true - Unicode - v120 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - - Level4 - Disabled - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - - - Windows - true - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters deleted file mode 100644 index 499a82ed4d..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters +++ /dev/null @@ -1,984 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {587a2726-4856-4d21-937a-fbaebaa90232} - - - {2662156f-1508-4dad-b991-a8298a6db9bf} - - - {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} - - - {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} - - - {89fd3896-0ab1-476d-8d64-a57f10a5e73b} - - - {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} - - - {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} - - - {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} - - - {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} - - - {1893fffe-7a2b-4708-8ce5-003aa9b749f7} - - - {053a0632-27bc-4043-b5e8-760951b3b5b9} - - - {048c180d-44cf-49ca-a7aa-d0053fea07f5} - - - {3181cae5-cc15-4050-8c45-22af44a823de} - - - {290632d2-c56e-4005-a417-eb83b9531e1a} - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\xpath - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\misc - - - Header Files - - - Header Files - - - Header Files\support - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files - - - Header Files - - - Header Files\tree - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\misc - - - Source Files\misc - - - Source Files\misc - - - Source Files\support - - - Source Files\support - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files - - - Source Files\tree - - - Source Files\tree - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\support - - - Source Files\atn - - - Source Files\atn - - - Source Files\tree\pattern - - - Source Files\misc - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj deleted file mode 100644 index 9085761e8d..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj +++ /dev/null @@ -1,652 +0,0 @@ - - - - - Debug Static - Win32 - - - Debug Static - x64 - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - - {A9762991-1B57-4DCE-90C0-EE42B96947BE} - Win32Proj - antlr4cpp - 8.1 - - - - DynamicLibrary - true - Unicode - v140 - - - StaticLibrary - true - Unicode - v140 - - - DynamicLibrary - true - Unicode - v140 - - - StaticLibrary - true - Unicode - v140 - - - DynamicLibrary - false - true - Unicode - v140 - - - StaticLibrary - false - true - Unicode - v140 - - - DynamicLibrary - false - true - Unicode - v140 - - - StaticLibrary - false - true - Unicode - v140 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters deleted file mode 100644 index cc1986923d..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters +++ /dev/null @@ -1,990 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {587a2726-4856-4d21-937a-fbaebaa90232} - - - {2662156f-1508-4dad-b991-a8298a6db9bf} - - - {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} - - - {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} - - - {89fd3896-0ab1-476d-8d64-a57f10a5e73b} - - - {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} - - - {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} - - - {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} - - - {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} - - - {1893fffe-7a2b-4708-8ce5-003aa9b749f7} - - - {053a0632-27bc-4043-b5e8-760951b3b5b9} - - - {048c180d-44cf-49ca-a7aa-d0053fea07f5} - - - {3181cae5-cc15-4050-8c45-22af44a823de} - - - {290632d2-c56e-4005-a417-eb83b9531e1a} - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\xpath - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\misc - - - Header Files - - - Header Files - - - Header Files\support - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files - - - Header Files - - - Source Files\support - - - Header Files\tree - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\misc - - - Source Files\misc - - - Source Files\misc - - - Source Files\support - - - Source Files\support - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files - - - Source Files\tree - - - Source Files\tree - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\misc - - - Source Files - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj deleted file mode 100644 index 2c3611c861..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj +++ /dev/null @@ -1,652 +0,0 @@ - - - - - Debug Static - Win32 - - - Debug Static - x64 - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - - {83BE66CD-9C4F-4F84-B72A-DD1855C8FC8A} - Win32Proj - antlr4cpp - 10.0.16299.0 - - - - DynamicLibrary - true - Unicode - v141 - - - StaticLibrary - true - Unicode - v141 - - - DynamicLibrary - true - Unicode - v141 - - - StaticLibrary - true - Unicode - v141 - - - DynamicLibrary - false - true - Unicode - v141 - - - StaticLibrary - false - true - Unicode - v141 - - - DynamicLibrary - false - true - Unicode - v141 - - - StaticLibrary - false - true - Unicode - v141 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - - - Windows - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - - - Windows - true - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj.filters deleted file mode 100644 index cc1986923d..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj.filters +++ /dev/null @@ -1,990 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {587a2726-4856-4d21-937a-fbaebaa90232} - - - {2662156f-1508-4dad-b991-a8298a6db9bf} - - - {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} - - - {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} - - - {89fd3896-0ab1-476d-8d64-a57f10a5e73b} - - - {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} - - - {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} - - - {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} - - - {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} - - - {1893fffe-7a2b-4708-8ce5-003aa9b749f7} - - - {053a0632-27bc-4043-b5e8-760951b3b5b9} - - - {048c180d-44cf-49ca-a7aa-d0053fea07f5} - - - {3181cae5-cc15-4050-8c45-22af44a823de} - - - {290632d2-c56e-4005-a417-eb83b9531e1a} - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\xpath - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\misc - - - Header Files - - - Header Files - - - Header Files\support - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files - - - Header Files - - - Source Files\support - - - Header Files\tree - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\misc - - - Source Files\misc - - - Source Files\misc - - - Source Files\support - - - Source Files\support - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files - - - Source Files\tree - - - Source Files\tree - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\misc - - - Source Files - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj index 42a81fc06b..886a835fda 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj @@ -182,6 +182,8 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -201,6 +203,8 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -220,6 +224,8 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -239,6 +245,8 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -259,6 +267,8 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -281,6 +291,8 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -303,6 +315,8 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -325,6 +339,8 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 Windows @@ -338,7 +354,6 @@ - @@ -347,19 +362,13 @@ - - - - - - @@ -377,30 +386,24 @@ - - - + + - - - - - @@ -420,6 +423,7 @@ + @@ -444,13 +448,12 @@ - + - @@ -465,7 +468,6 @@ - @@ -485,12 +487,10 @@ - - @@ -499,7 +499,6 @@ - @@ -508,12 +507,10 @@ - - @@ -543,6 +540,9 @@ + + + @@ -575,6 +575,7 @@ + @@ -586,7 +587,6 @@ - @@ -600,10 +600,12 @@ + - + + @@ -626,8 +628,6 @@ - - @@ -649,4 +649,4 @@ - \ No newline at end of file + diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters index cc1986923d..f2cd0aa9d4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters @@ -55,6 +55,12 @@ {290632d2-c56e-4005-a417-eb83b9531e1a} + + {b4b32b3f-e97a-448a-98e6-cbf901862bd4} + + + {f8c2bdf9-7e81-4f31-ba17-06b16ba2f081} + @@ -177,9 +183,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -261,9 +264,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -285,9 +285,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -312,9 +309,6 @@ Header Files\atn - - Header Files\atn - Header Files\dfa @@ -336,9 +330,6 @@ Header Files\misc - - Header Files\misc - Header Files\support @@ -351,9 +342,6 @@ Header Files\support - - Header Files\support - Header Files\tree @@ -378,12 +366,6 @@ Header Files\tree - - Header Files\tree - - - Header Files\tree - Header Files\tree @@ -528,18 +510,36 @@ Header Files - - Header Files - - - Source Files\support - Header Files\tree Header Files + + Header Files\internal + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + @@ -641,9 +641,6 @@ Source Files - - Source Files\atn - Source Files\atn @@ -665,9 +662,6 @@ Source Files\atn - - Source Files\atn - Source Files\atn @@ -677,21 +671,9 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - Source Files\atn - - Source Files\atn - Source Files\atn @@ -704,9 +686,6 @@ Source Files\atn - - Source Files\atn - Source Files\atn @@ -716,12 +695,6 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -737,12 +710,6 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -755,18 +722,9 @@ Source Files\atn - - Source Files\atn - Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -800,9 +758,6 @@ Source Files\support - - Source Files\support - Source Files\tree @@ -950,9 +905,6 @@ Source Files - - Source Files\atn - Source Files\atn @@ -971,20 +923,26 @@ Source Files\support - - Source Files\tree - Source Files\tree Source Files\tree - - Source Files\tree - Source Files\tree\pattern + + Source Files\internal + + + Source Files\support + + + Source Files\atn + + + Source Files\atn + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj new file mode 100644 index 0000000000..9992beb6c4 --- /dev/null +++ b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj @@ -0,0 +1,652 @@ + + + + + Debug Static + Win32 + + + Debug Static + x64 + + + Debug DLL + Win32 + + + Debug DLL + x64 + + + Release Static + Win32 + + + Release Static + x64 + + + Release DLL + Win32 + + + Release DLL + x64 + + + + {52618D4B-6EC4-49AD-8B83-52686244E8F3} + Win32Proj + antlr4cpp + 10.0 + + + + DynamicLibrary + true + Unicode + v143 + + + StaticLibrary + true + Unicode + v143 + + + DynamicLibrary + true + Unicode + v143 + + + StaticLibrary + true + Unicode + v143 + + + DynamicLibrary + false + true + Unicode + v143 + + + StaticLibrary + false + true + Unicode + v143 + + + DynamicLibrary + false + true + Unicode + v143 + + + StaticLibrary + false + true + Unicode + v143 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + true + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + + Level4 + Disabled + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + + + + + Level4 + Disabled + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + + + + + Level4 + Disabled + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + + + + + Level4 + Disabled + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + true + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + true + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + true + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + stdcpp17 + + + Windows + true + true + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj.filters new file mode 100644 index 0000000000..f2cd0aa9d4 --- /dev/null +++ b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj.filters @@ -0,0 +1,948 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + {587a2726-4856-4d21-937a-fbaebaa90232} + + + {2662156f-1508-4dad-b991-a8298a6db9bf} + + + {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} + + + {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} + + + {89fd3896-0ab1-476d-8d64-a57f10a5e73b} + + + {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} + + + {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} + + + {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} + + + {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} + + + {1893fffe-7a2b-4708-8ce5-003aa9b749f7} + + + {053a0632-27bc-4043-b5e8-760951b3b5b9} + + + {048c180d-44cf-49ca-a7aa-d0053fea07f5} + + + {3181cae5-cc15-4050-8c45-22af44a823de} + + + {290632d2-c56e-4005-a417-eb83b9531e1a} + + + {b4b32b3f-e97a-448a-98e6-cbf901862bd4} + + + {f8c2bdf9-7e81-4f31-ba17-06b16ba2f081} + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\dfa + + + Header Files\dfa + + + Header Files\dfa + + + Header Files\dfa + + + Header Files\misc + + + Header Files\misc + + + Header Files\misc + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\xpath + + + Header Files + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\misc + + + Header Files + + + Header Files + + + Header Files\support + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files + + + Header Files\tree + + + Header Files + + + Header Files\internal + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\dfa + + + Source Files\dfa + + + Source Files\dfa + + + Source Files\dfa + + + Source Files\misc + + + Source Files\misc + + + Source Files\misc + + + Source Files\support + + + Source Files\support + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files + + + Source Files + + + Source Files\support + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files + + + Source Files\tree + + + Source Files\tree + + + Source Files + + + Source Files + + + Source Files + + + Source Files\atn + + + Source Files\misc + + + Source Files + + + Source Files + + + Source Files + + + Source Files\support + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree\pattern + + + Source Files\internal + + + Source Files\support + + + Source Files\atn + + + Source Files\atn + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj index a256e089a1..92e6bf1047 100644 --- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj +++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj @@ -3,12 +3,11 @@ archiveVersion = 1; classes = { }; - objectVersion = 46; + objectVersion = 54; objects = { /* Begin PBXBuildFile section */ 270C67F31CDB4F1E00116E17 /* antlrcpp_ios.h in Headers */ = {isa = PBXBuildFile; fileRef = 270C67F21CDB4F1E00116E17 /* antlrcpp_ios.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 270C69E01CDB536A00116E17 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 270C69DF1CDB536A00116E17 /* CoreFoundation.framework */; }; 276566E01DA93BFB000869BE /* ParseTree.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276566DF1DA93BFB000869BE /* ParseTree.cpp */; }; 276566E11DA93BFB000869BE /* ParseTree.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276566DF1DA93BFB000869BE /* ParseTree.cpp */; }; 276566E21DA93BFB000869BE /* ParseTree.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276566DF1DA93BFB000869BE /* ParseTree.cpp */; }; @@ -30,12 +29,6 @@ 276E5D3D1CDB57AA003FF4B4 /* ANTLRInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C111CDB57AA003FF4B4 /* ANTLRInputStream.h */; }; 276E5D3E1CDB57AA003FF4B4 /* ANTLRInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C111CDB57AA003FF4B4 /* ANTLRInputStream.h */; }; 276E5D3F1CDB57AA003FF4B4 /* ANTLRInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C111CDB57AA003FF4B4 /* ANTLRInputStream.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5D401CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C131CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp */; }; - 276E5D411CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C131CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp */; }; - 276E5D421CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C131CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp */; }; - 276E5D431CDB57AA003FF4B4 /* AbstractPredicateTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C141CDB57AA003FF4B4 /* AbstractPredicateTransition.h */; }; - 276E5D441CDB57AA003FF4B4 /* AbstractPredicateTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C141CDB57AA003FF4B4 /* AbstractPredicateTransition.h */; }; - 276E5D451CDB57AA003FF4B4 /* AbstractPredicateTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C141CDB57AA003FF4B4 /* AbstractPredicateTransition.h */; settings = {ATTRIBUTES = (Public, ); }; }; 276E5D461CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C151CDB57AA003FF4B4 /* ActionTransition.cpp */; }; 276E5D471CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C151CDB57AA003FF4B4 /* ActionTransition.cpp */; }; 276E5D481CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C151CDB57AA003FF4B4 /* ActionTransition.cpp */; }; @@ -84,12 +77,6 @@ 276E5D731CDB57AA003FF4B4 /* ATNDeserializer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C241CDB57AA003FF4B4 /* ATNDeserializer.h */; }; 276E5D741CDB57AA003FF4B4 /* ATNDeserializer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C241CDB57AA003FF4B4 /* ATNDeserializer.h */; }; 276E5D751CDB57AA003FF4B4 /* ATNDeserializer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C241CDB57AA003FF4B4 /* ATNDeserializer.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5D761CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C251CDB57AA003FF4B4 /* ATNSerializer.cpp */; }; - 276E5D771CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C251CDB57AA003FF4B4 /* ATNSerializer.cpp */; }; - 276E5D781CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C251CDB57AA003FF4B4 /* ATNSerializer.cpp */; }; - 276E5D791CDB57AA003FF4B4 /* ATNSerializer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C261CDB57AA003FF4B4 /* ATNSerializer.h */; }; - 276E5D7A1CDB57AA003FF4B4 /* ATNSerializer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C261CDB57AA003FF4B4 /* ATNSerializer.h */; }; - 276E5D7B1CDB57AA003FF4B4 /* ATNSerializer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C261CDB57AA003FF4B4 /* ATNSerializer.h */; settings = {ATTRIBUTES = (Public, ); }; }; 276E5D7C1CDB57AA003FF4B4 /* ATNSimulator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C271CDB57AA003FF4B4 /* ATNSimulator.cpp */; }; 276E5D7D1CDB57AA003FF4B4 /* ATNSimulator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C271CDB57AA003FF4B4 /* ATNSimulator.cpp */; }; 276E5D7E1CDB57AA003FF4B4 /* ATNSimulator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C271CDB57AA003FF4B4 /* ATNSimulator.cpp */; }; @@ -111,21 +98,12 @@ 276E5D911CDB57AA003FF4B4 /* AtomTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C2E1CDB57AA003FF4B4 /* AtomTransition.h */; }; 276E5D921CDB57AA003FF4B4 /* AtomTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C2E1CDB57AA003FF4B4 /* AtomTransition.h */; }; 276E5D931CDB57AA003FF4B4 /* AtomTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C2E1CDB57AA003FF4B4 /* AtomTransition.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5D941CDB57AA003FF4B4 /* BasicBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C2F1CDB57AA003FF4B4 /* BasicBlockStartState.cpp */; }; - 276E5D951CDB57AA003FF4B4 /* BasicBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C2F1CDB57AA003FF4B4 /* BasicBlockStartState.cpp */; }; - 276E5D961CDB57AA003FF4B4 /* BasicBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C2F1CDB57AA003FF4B4 /* BasicBlockStartState.cpp */; }; 276E5D971CDB57AA003FF4B4 /* BasicBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C301CDB57AA003FF4B4 /* BasicBlockStartState.h */; }; 276E5D981CDB57AA003FF4B4 /* BasicBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C301CDB57AA003FF4B4 /* BasicBlockStartState.h */; }; 276E5D991CDB57AA003FF4B4 /* BasicBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C301CDB57AA003FF4B4 /* BasicBlockStartState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5D9A1CDB57AA003FF4B4 /* BasicState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C311CDB57AA003FF4B4 /* BasicState.cpp */; }; - 276E5D9B1CDB57AA003FF4B4 /* BasicState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C311CDB57AA003FF4B4 /* BasicState.cpp */; }; - 276E5D9C1CDB57AA003FF4B4 /* BasicState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C311CDB57AA003FF4B4 /* BasicState.cpp */; }; 276E5D9D1CDB57AA003FF4B4 /* BasicState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C321CDB57AA003FF4B4 /* BasicState.h */; }; 276E5D9E1CDB57AA003FF4B4 /* BasicState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C321CDB57AA003FF4B4 /* BasicState.h */; }; 276E5D9F1CDB57AA003FF4B4 /* BasicState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C321CDB57AA003FF4B4 /* BasicState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5DA01CDB57AA003FF4B4 /* BlockEndState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */; }; - 276E5DA11CDB57AA003FF4B4 /* BlockEndState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */; }; - 276E5DA21CDB57AA003FF4B4 /* BlockEndState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */; }; 276E5DA31CDB57AA003FF4B4 /* BlockEndState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */; }; 276E5DA41CDB57AA003FF4B4 /* BlockEndState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */; }; 276E5DA51CDB57AA003FF4B4 /* BlockEndState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -156,12 +134,6 @@ 276E5DC11CDB57AA003FF4B4 /* DecisionState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C3E1CDB57AA003FF4B4 /* DecisionState.h */; }; 276E5DC21CDB57AA003FF4B4 /* DecisionState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C3E1CDB57AA003FF4B4 /* DecisionState.h */; }; 276E5DC31CDB57AA003FF4B4 /* DecisionState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C3E1CDB57AA003FF4B4 /* DecisionState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5DC41CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C3F1CDB57AA003FF4B4 /* EmptyPredictionContext.cpp */; }; - 276E5DC51CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C3F1CDB57AA003FF4B4 /* EmptyPredictionContext.cpp */; }; - 276E5DC61CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C3F1CDB57AA003FF4B4 /* EmptyPredictionContext.cpp */; }; - 276E5DC71CDB57AA003FF4B4 /* EmptyPredictionContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C401CDB57AA003FF4B4 /* EmptyPredictionContext.h */; }; - 276E5DC81CDB57AA003FF4B4 /* EmptyPredictionContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C401CDB57AA003FF4B4 /* EmptyPredictionContext.h */; }; - 276E5DC91CDB57AA003FF4B4 /* EmptyPredictionContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C401CDB57AA003FF4B4 /* EmptyPredictionContext.h */; settings = {ATTRIBUTES = (Public, ); }; }; 276E5DCA1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C411CDB57AA003FF4B4 /* EpsilonTransition.cpp */; }; 276E5DCB1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C411CDB57AA003FF4B4 /* EpsilonTransition.cpp */; }; 276E5DCC1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C411CDB57AA003FF4B4 /* EpsilonTransition.cpp */; }; @@ -264,9 +236,6 @@ 276E5E301CDB57AA003FF4B4 /* LookaheadEventInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C631CDB57AA003FF4B4 /* LookaheadEventInfo.h */; }; 276E5E311CDB57AA003FF4B4 /* LookaheadEventInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C631CDB57AA003FF4B4 /* LookaheadEventInfo.h */; }; 276E5E321CDB57AA003FF4B4 /* LookaheadEventInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C631CDB57AA003FF4B4 /* LookaheadEventInfo.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5E331CDB57AA003FF4B4 /* LoopEndState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C641CDB57AA003FF4B4 /* LoopEndState.cpp */; }; - 276E5E341CDB57AA003FF4B4 /* LoopEndState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C641CDB57AA003FF4B4 /* LoopEndState.cpp */; }; - 276E5E351CDB57AA003FF4B4 /* LoopEndState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C641CDB57AA003FF4B4 /* LoopEndState.cpp */; }; 276E5E361CDB57AA003FF4B4 /* LoopEndState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C651CDB57AA003FF4B4 /* LoopEndState.h */; }; 276E5E371CDB57AA003FF4B4 /* LoopEndState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C651CDB57AA003FF4B4 /* LoopEndState.h */; }; 276E5E381CDB57AA003FF4B4 /* LoopEndState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C651CDB57AA003FF4B4 /* LoopEndState.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -294,15 +263,9 @@ 276E5E511CDB57AA003FF4B4 /* ParserATNSimulator.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C6E1CDB57AA003FF4B4 /* ParserATNSimulator.h */; }; 276E5E521CDB57AA003FF4B4 /* ParserATNSimulator.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C6E1CDB57AA003FF4B4 /* ParserATNSimulator.h */; }; 276E5E531CDB57AA003FF4B4 /* ParserATNSimulator.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C6E1CDB57AA003FF4B4 /* ParserATNSimulator.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5E541CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C6F1CDB57AA003FF4B4 /* PlusBlockStartState.cpp */; }; - 276E5E551CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C6F1CDB57AA003FF4B4 /* PlusBlockStartState.cpp */; }; - 276E5E561CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C6F1CDB57AA003FF4B4 /* PlusBlockStartState.cpp */; }; 276E5E571CDB57AA003FF4B4 /* PlusBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C701CDB57AA003FF4B4 /* PlusBlockStartState.h */; }; 276E5E581CDB57AA003FF4B4 /* PlusBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C701CDB57AA003FF4B4 /* PlusBlockStartState.h */; }; 276E5E591CDB57AA003FF4B4 /* PlusBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C701CDB57AA003FF4B4 /* PlusBlockStartState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5E5A1CDB57AA003FF4B4 /* PlusLoopbackState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C711CDB57AA003FF4B4 /* PlusLoopbackState.cpp */; }; - 276E5E5B1CDB57AA003FF4B4 /* PlusLoopbackState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C711CDB57AA003FF4B4 /* PlusLoopbackState.cpp */; }; - 276E5E5C1CDB57AA003FF4B4 /* PlusLoopbackState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C711CDB57AA003FF4B4 /* PlusLoopbackState.cpp */; }; 276E5E5D1CDB57AA003FF4B4 /* PlusLoopbackState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C721CDB57AA003FF4B4 /* PlusLoopbackState.h */; }; 276E5E5E1CDB57AA003FF4B4 /* PlusLoopbackState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C721CDB57AA003FF4B4 /* PlusLoopbackState.h */; }; 276E5E5F1CDB57AA003FF4B4 /* PlusLoopbackState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C721CDB57AA003FF4B4 /* PlusLoopbackState.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -348,15 +311,9 @@ 276E5E871CDB57AA003FF4B4 /* RangeTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C801CDB57AA003FF4B4 /* RangeTransition.h */; }; 276E5E881CDB57AA003FF4B4 /* RangeTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C801CDB57AA003FF4B4 /* RangeTransition.h */; }; 276E5E891CDB57AA003FF4B4 /* RangeTransition.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C801CDB57AA003FF4B4 /* RangeTransition.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5E8A1CDB57AA003FF4B4 /* RuleStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C811CDB57AA003FF4B4 /* RuleStartState.cpp */; }; - 276E5E8B1CDB57AA003FF4B4 /* RuleStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C811CDB57AA003FF4B4 /* RuleStartState.cpp */; }; - 276E5E8C1CDB57AA003FF4B4 /* RuleStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C811CDB57AA003FF4B4 /* RuleStartState.cpp */; }; 276E5E8D1CDB57AA003FF4B4 /* RuleStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C821CDB57AA003FF4B4 /* RuleStartState.h */; }; 276E5E8E1CDB57AA003FF4B4 /* RuleStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C821CDB57AA003FF4B4 /* RuleStartState.h */; }; 276E5E8F1CDB57AA003FF4B4 /* RuleStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C821CDB57AA003FF4B4 /* RuleStartState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5E901CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C831CDB57AA003FF4B4 /* RuleStopState.cpp */; }; - 276E5E911CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C831CDB57AA003FF4B4 /* RuleStopState.cpp */; }; - 276E5E921CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C831CDB57AA003FF4B4 /* RuleStopState.cpp */; }; 276E5E931CDB57AA003FF4B4 /* RuleStopState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C841CDB57AA003FF4B4 /* RuleStopState.h */; }; 276E5E941CDB57AA003FF4B4 /* RuleStopState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C841CDB57AA003FF4B4 /* RuleStopState.h */; }; 276E5E951CDB57AA003FF4B4 /* RuleStopState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C841CDB57AA003FF4B4 /* RuleStopState.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -384,9 +341,6 @@ 276E5EAB1CDB57AA003FF4B4 /* SingletonPredictionContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C8C1CDB57AA003FF4B4 /* SingletonPredictionContext.h */; }; 276E5EAC1CDB57AA003FF4B4 /* SingletonPredictionContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C8C1CDB57AA003FF4B4 /* SingletonPredictionContext.h */; }; 276E5EAD1CDB57AA003FF4B4 /* SingletonPredictionContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C8C1CDB57AA003FF4B4 /* SingletonPredictionContext.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5EAE1CDB57AA003FF4B4 /* StarBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C8D1CDB57AA003FF4B4 /* StarBlockStartState.cpp */; }; - 276E5EAF1CDB57AA003FF4B4 /* StarBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C8D1CDB57AA003FF4B4 /* StarBlockStartState.cpp */; }; - 276E5EB01CDB57AA003FF4B4 /* StarBlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C8D1CDB57AA003FF4B4 /* StarBlockStartState.cpp */; }; 276E5EB11CDB57AA003FF4B4 /* StarBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C8E1CDB57AA003FF4B4 /* StarBlockStartState.h */; }; 276E5EB21CDB57AA003FF4B4 /* StarBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C8E1CDB57AA003FF4B4 /* StarBlockStartState.h */; }; 276E5EB31CDB57AA003FF4B4 /* StarBlockStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C8E1CDB57AA003FF4B4 /* StarBlockStartState.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -396,15 +350,9 @@ 276E5EB71CDB57AA003FF4B4 /* StarLoopbackState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C901CDB57AA003FF4B4 /* StarLoopbackState.h */; }; 276E5EB81CDB57AA003FF4B4 /* StarLoopbackState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C901CDB57AA003FF4B4 /* StarLoopbackState.h */; }; 276E5EB91CDB57AA003FF4B4 /* StarLoopbackState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C901CDB57AA003FF4B4 /* StarLoopbackState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5EBA1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C911CDB57AA003FF4B4 /* StarLoopEntryState.cpp */; }; - 276E5EBB1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C911CDB57AA003FF4B4 /* StarLoopEntryState.cpp */; }; - 276E5EBC1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C911CDB57AA003FF4B4 /* StarLoopEntryState.cpp */; }; 276E5EBD1CDB57AA003FF4B4 /* StarLoopEntryState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C921CDB57AA003FF4B4 /* StarLoopEntryState.h */; }; 276E5EBE1CDB57AA003FF4B4 /* StarLoopEntryState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C921CDB57AA003FF4B4 /* StarLoopEntryState.h */; }; 276E5EBF1CDB57AA003FF4B4 /* StarLoopEntryState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C921CDB57AA003FF4B4 /* StarLoopEntryState.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5EC01CDB57AA003FF4B4 /* TokensStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C931CDB57AA003FF4B4 /* TokensStartState.cpp */; }; - 276E5EC11CDB57AA003FF4B4 /* TokensStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C931CDB57AA003FF4B4 /* TokensStartState.cpp */; }; - 276E5EC21CDB57AA003FF4B4 /* TokensStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5C931CDB57AA003FF4B4 /* TokensStartState.cpp */; }; 276E5EC31CDB57AA003FF4B4 /* TokensStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C941CDB57AA003FF4B4 /* TokensStartState.h */; }; 276E5EC41CDB57AA003FF4B4 /* TokensStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C941CDB57AA003FF4B4 /* TokensStartState.h */; }; 276E5EC51CDB57AA003FF4B4 /* TokensStartState.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5C941CDB57AA003FF4B4 /* TokensStartState.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -645,12 +593,6 @@ 276E5FBC1CDB57AA003FF4B4 /* Declarations.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CEA1CDB57AA003FF4B4 /* Declarations.h */; }; 276E5FBD1CDB57AA003FF4B4 /* Declarations.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CEA1CDB57AA003FF4B4 /* Declarations.h */; }; 276E5FBE1CDB57AA003FF4B4 /* Declarations.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CEA1CDB57AA003FF4B4 /* Declarations.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5FBF1CDB57AA003FF4B4 /* guid.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CEB1CDB57AA003FF4B4 /* guid.cpp */; }; - 276E5FC01CDB57AA003FF4B4 /* guid.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CEB1CDB57AA003FF4B4 /* guid.cpp */; }; - 276E5FC11CDB57AA003FF4B4 /* guid.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CEB1CDB57AA003FF4B4 /* guid.cpp */; }; - 276E5FC21CDB57AA003FF4B4 /* guid.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CEC1CDB57AA003FF4B4 /* guid.h */; }; - 276E5FC31CDB57AA003FF4B4 /* guid.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CEC1CDB57AA003FF4B4 /* guid.h */; }; - 276E5FC41CDB57AA003FF4B4 /* guid.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CEC1CDB57AA003FF4B4 /* guid.h */; settings = {ATTRIBUTES = (Public, ); }; }; 276E5FC51CDB57AA003FF4B4 /* StringUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CED1CDB57AA003FF4B4 /* StringUtils.cpp */; }; 276E5FC61CDB57AA003FF4B4 /* StringUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CED1CDB57AA003FF4B4 /* StringUtils.cpp */; }; 276E5FC71CDB57AA003FF4B4 /* StringUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CED1CDB57AA003FF4B4 /* StringUtils.cpp */; }; @@ -806,12 +748,6 @@ 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; - 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; - 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; - 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; - 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; - 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; - 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; @@ -830,9 +766,6 @@ 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; - 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; - 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; - 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; @@ -921,13 +854,75 @@ 27DB44D91D0463DB007E790B /* XPathWildcardElement.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 27DB449B1D045537007E790B /* XPathWildcardElement.cpp */; }; 27DB44DA1D0463DB007E790B /* XPathWildcardElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 27DB449C1D045537007E790B /* XPathWildcardElement.h */; }; 27F4A8561D4CEB2A00E067EE /* Any.h in Headers */ = {isa = PBXBuildFile; fileRef = 27F4A8551D4CEB2A00E067EE /* Any.h */; }; + 9B25DCA12910249100DF9703 /* FlatHashSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DC9F2910249100DF9703 /* FlatHashSet.h */; }; + 9B25DCA22910249100DF9703 /* FlatHashSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DC9F2910249100DF9703 /* FlatHashSet.h */; }; + 9B25DCA32910249100DF9703 /* FlatHashSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DC9F2910249100DF9703 /* FlatHashSet.h */; }; + 9B25DCA42910249100DF9703 /* FlatHashMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCA02910249100DF9703 /* FlatHashMap.h */; }; + 9B25DCA52910249100DF9703 /* FlatHashMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCA02910249100DF9703 /* FlatHashMap.h */; }; + 9B25DCA62910249100DF9703 /* FlatHashMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCA02910249100DF9703 /* FlatHashMap.h */; }; + 9B25DCA82910252800DF9703 /* Version.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCA72910252800DF9703 /* Version.h */; }; + 9B25DCA92910252800DF9703 /* Version.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCA72910252800DF9703 /* Version.h */; }; + 9B25DCAA2910252800DF9703 /* Version.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCA72910252800DF9703 /* Version.h */; }; + 9B25DCAC291025B700DF9703 /* ATNStateType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCAB291025B700DF9703 /* ATNStateType.h */; }; + 9B25DCAD291025B700DF9703 /* ATNStateType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCAB291025B700DF9703 /* ATNStateType.h */; }; + 9B25DCAE291025B700DF9703 /* ATNStateType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCAB291025B700DF9703 /* ATNStateType.h */; }; + 9B25DCB0291026DE00DF9703 /* ParserATNSimulatorOptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCAF291026DE00DF9703 /* ParserATNSimulatorOptions.h */; }; + 9B25DCB1291026DE00DF9703 /* ParserATNSimulatorOptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCAF291026DE00DF9703 /* ParserATNSimulatorOptions.h */; }; + 9B25DCB2291026DE00DF9703 /* ParserATNSimulatorOptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCAF291026DE00DF9703 /* ParserATNSimulatorOptions.h */; }; + 9B25DCB92910278000DF9703 /* PredictionContextCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB32910278000DF9703 /* PredictionContextCache.h */; }; + 9B25DCBA2910278000DF9703 /* PredictionContextCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB32910278000DF9703 /* PredictionContextCache.h */; }; + 9B25DCBB2910278000DF9703 /* PredictionContextCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB32910278000DF9703 /* PredictionContextCache.h */; }; + 9B25DCBC2910278000DF9703 /* PredictionContextMergeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB42910278000DF9703 /* PredictionContextMergeCache.h */; }; + 9B25DCBD2910278000DF9703 /* PredictionContextMergeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB42910278000DF9703 /* PredictionContextMergeCache.h */; }; + 9B25DCBE2910278000DF9703 /* PredictionContextMergeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB42910278000DF9703 /* PredictionContextMergeCache.h */; }; + 9B25DCBF2910278000DF9703 /* PredictionContextType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB52910278000DF9703 /* PredictionContextType.h */; }; + 9B25DCC02910278000DF9703 /* PredictionContextType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB52910278000DF9703 /* PredictionContextType.h */; }; + 9B25DCC12910278000DF9703 /* PredictionContextType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB52910278000DF9703 /* PredictionContextType.h */; }; + 9B25DCC22910278000DF9703 /* PredictionContextCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCB62910278000DF9703 /* PredictionContextCache.cpp */; }; + 9B25DCC32910278000DF9703 /* PredictionContextCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCB62910278000DF9703 /* PredictionContextCache.cpp */; }; + 9B25DCC42910278000DF9703 /* PredictionContextCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCB62910278000DF9703 /* PredictionContextCache.cpp */; }; + 9B25DCC52910278000DF9703 /* PredictionContextMergeCacheOptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB72910278000DF9703 /* PredictionContextMergeCacheOptions.h */; }; + 9B25DCC62910278000DF9703 /* PredictionContextMergeCacheOptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB72910278000DF9703 /* PredictionContextMergeCacheOptions.h */; }; + 9B25DCC72910278000DF9703 /* PredictionContextMergeCacheOptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCB72910278000DF9703 /* PredictionContextMergeCacheOptions.h */; }; + 9B25DCC82910278000DF9703 /* PredictionContextMergeCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCB82910278000DF9703 /* PredictionContextMergeCache.cpp */; }; + 9B25DCC92910278000DF9703 /* PredictionContextMergeCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCB82910278000DF9703 /* PredictionContextMergeCache.cpp */; }; + 9B25DCCA2910278000DF9703 /* PredictionContextMergeCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCB82910278000DF9703 /* PredictionContextMergeCache.cpp */; }; + 9B25DCCD291027EE00DF9703 /* SemanticContextType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCCB291027ED00DF9703 /* SemanticContextType.h */; }; + 9B25DCCE291027EE00DF9703 /* SemanticContextType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCCB291027ED00DF9703 /* SemanticContextType.h */; }; + 9B25DCCF291027EE00DF9703 /* SemanticContextType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCCB291027ED00DF9703 /* SemanticContextType.h */; }; + 9B25DCD0291027EE00DF9703 /* SerializedATNView.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCCC291027EE00DF9703 /* SerializedATNView.h */; }; + 9B25DCD1291027EE00DF9703 /* SerializedATNView.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCCC291027EE00DF9703 /* SerializedATNView.h */; }; + 9B25DCD2291027EE00DF9703 /* SerializedATNView.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCCC291027EE00DF9703 /* SerializedATNView.h */; }; + 9B25DCD52910282B00DF9703 /* TransitionType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCD32910282B00DF9703 /* TransitionType.cpp */; }; + 9B25DCD62910282B00DF9703 /* TransitionType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCD32910282B00DF9703 /* TransitionType.cpp */; }; + 9B25DCD72910282B00DF9703 /* TransitionType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCD32910282B00DF9703 /* TransitionType.cpp */; }; + 9B25DCD82910282B00DF9703 /* TransitionType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCD42910282B00DF9703 /* TransitionType.h */; }; + 9B25DCD92910282B00DF9703 /* TransitionType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCD42910282B00DF9703 /* TransitionType.h */; }; + 9B25DCDA2910282B00DF9703 /* TransitionType.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCD42910282B00DF9703 /* TransitionType.h */; }; + 9B25DCDE2910287000DF9703 /* Synchronization.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCDC2910287000DF9703 /* Synchronization.cpp */; }; + 9B25DCDF2910287000DF9703 /* Synchronization.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCDC2910287000DF9703 /* Synchronization.cpp */; }; + 9B25DCE02910287000DF9703 /* Synchronization.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCDC2910287000DF9703 /* Synchronization.cpp */; }; + 9B25DCE12910287000DF9703 /* Synchronization.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCDD2910287000DF9703 /* Synchronization.h */; }; + 9B25DCE22910287000DF9703 /* Synchronization.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCDD2910287000DF9703 /* Synchronization.h */; }; + 9B25DCE32910287000DF9703 /* Synchronization.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCDD2910287000DF9703 /* Synchronization.h */; }; + 9B25DCE5291028BC00DF9703 /* Casts.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE4291028BC00DF9703 /* Casts.h */; }; + 9B25DCE6291028BC00DF9703 /* Casts.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE4291028BC00DF9703 /* Casts.h */; }; + 9B25DCE7291028BC00DF9703 /* Casts.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE4291028BC00DF9703 /* Casts.h */; }; + 9B25DCEB291028D000DF9703 /* Unicode.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE8291028D000DF9703 /* Unicode.h */; }; + 9B25DCEC291028D000DF9703 /* Unicode.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE8291028D000DF9703 /* Unicode.h */; }; + 9B25DCED291028D000DF9703 /* Unicode.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE8291028D000DF9703 /* Unicode.h */; }; + 9B25DCEE291028D000DF9703 /* Utf8.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE9291028D000DF9703 /* Utf8.h */; }; + 9B25DCEF291028D000DF9703 /* Utf8.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE9291028D000DF9703 /* Utf8.h */; }; + 9B25DCF0291028D000DF9703 /* Utf8.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B25DCE9291028D000DF9703 /* Utf8.h */; }; + 9B25DCF1291028D000DF9703 /* Utf8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCEA291028D000DF9703 /* Utf8.cpp */; }; + 9B25DCF2291028D000DF9703 /* Utf8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCEA291028D000DF9703 /* Utf8.cpp */; }; + 9B25DCF3291028D000DF9703 /* Utf8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9B25DCEA291028D000DF9703 /* Utf8.cpp */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ 270C67F01CDB4F1E00116E17 /* antlr4_ios.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = antlr4_ios.framework; sourceTree = BUILT_PRODUCTS_DIR; }; 270C67F21CDB4F1E00116E17 /* antlrcpp_ios.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlrcpp_ios.h; sourceTree = ""; wrapsLines = 0; }; 270C67F41CDB4F1E00116E17 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; - 270C69DF1CDB536A00116E17 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.3.sdk/System/Library/Frameworks/CoreFoundation.framework; sourceTree = DEVELOPER_DIR; }; 276566DF1DA93BFB000869BE /* ParseTree.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTree.cpp; sourceTree = ""; }; 276E5C0C1CDB57AA003FF4B4 /* ANTLRErrorListener.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRErrorListener.h; sourceTree = ""; wrapsLines = 0; }; 276E5C0D1CDB57AA003FF4B4 /* ANTLRErrorStrategy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRErrorStrategy.h; sourceTree = ""; }; @@ -935,8 +930,6 @@ 276E5C0F1CDB57AA003FF4B4 /* ANTLRFileStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRFileStream.h; sourceTree = ""; wrapsLines = 0; }; 276E5C101CDB57AA003FF4B4 /* ANTLRInputStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRInputStream.cpp; sourceTree = ""; }; 276E5C111CDB57AA003FF4B4 /* ANTLRInputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRInputStream.h; sourceTree = ""; }; - 276E5C131CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AbstractPredicateTransition.cpp; sourceTree = ""; wrapsLines = 0; }; - 276E5C141CDB57AA003FF4B4 /* AbstractPredicateTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AbstractPredicateTransition.h; sourceTree = ""; }; 276E5C151CDB57AA003FF4B4 /* ActionTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ActionTransition.cpp; sourceTree = ""; }; 276E5C161CDB57AA003FF4B4 /* ActionTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ActionTransition.h; sourceTree = ""; }; 276E5C171CDB57AA003FF4B4 /* AmbiguityInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AmbiguityInfo.cpp; sourceTree = ""; }; @@ -953,8 +946,6 @@ 276E5C221CDB57AA003FF4B4 /* ATNDeserializationOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATNDeserializationOptions.h; sourceTree = ""; }; 276E5C231CDB57AA003FF4B4 /* ATNDeserializer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ATNDeserializer.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C241CDB57AA003FF4B4 /* ATNDeserializer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATNDeserializer.h; sourceTree = ""; wrapsLines = 0; }; - 276E5C251CDB57AA003FF4B4 /* ATNSerializer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; lineEnding = 0; path = ATNSerializer.cpp; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.cpp; }; - 276E5C261CDB57AA003FF4B4 /* ATNSerializer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATNSerializer.h; sourceTree = ""; }; 276E5C271CDB57AA003FF4B4 /* ATNSimulator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; lineEnding = 0; path = ATNSimulator.cpp; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.cpp; }; 276E5C281CDB57AA003FF4B4 /* ATNSimulator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; path = ATNSimulator.h; sourceTree = ""; wrapsLines = 0; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; }; 276E5C291CDB57AA003FF4B4 /* ATNState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ATNState.cpp; sourceTree = ""; }; @@ -962,11 +953,8 @@ 276E5C2C1CDB57AA003FF4B4 /* ATNType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATNType.h; sourceTree = ""; }; 276E5C2D1CDB57AA003FF4B4 /* AtomTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AtomTransition.cpp; sourceTree = ""; }; 276E5C2E1CDB57AA003FF4B4 /* AtomTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AtomTransition.h; sourceTree = ""; }; - 276E5C2F1CDB57AA003FF4B4 /* BasicBlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BasicBlockStartState.cpp; sourceTree = ""; }; 276E5C301CDB57AA003FF4B4 /* BasicBlockStartState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BasicBlockStartState.h; sourceTree = ""; }; - 276E5C311CDB57AA003FF4B4 /* BasicState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BasicState.cpp; sourceTree = ""; }; 276E5C321CDB57AA003FF4B4 /* BasicState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BasicState.h; sourceTree = ""; }; - 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockEndState.cpp; sourceTree = ""; }; 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BlockEndState.h; sourceTree = ""; }; 276E5C351CDB57AA003FF4B4 /* BlockStartState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BlockStartState.h; sourceTree = ""; }; 276E5C371CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ContextSensitivityInfo.cpp; sourceTree = ""; }; @@ -977,8 +965,6 @@ 276E5C3C1CDB57AA003FF4B4 /* DecisionInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DecisionInfo.h; sourceTree = ""; }; 276E5C3D1CDB57AA003FF4B4 /* DecisionState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DecisionState.cpp; sourceTree = ""; }; 276E5C3E1CDB57AA003FF4B4 /* DecisionState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DecisionState.h; sourceTree = ""; }; - 276E5C3F1CDB57AA003FF4B4 /* EmptyPredictionContext.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = EmptyPredictionContext.cpp; sourceTree = ""; wrapsLines = 0; }; - 276E5C401CDB57AA003FF4B4 /* EmptyPredictionContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = EmptyPredictionContext.h; sourceTree = ""; }; 276E5C411CDB57AA003FF4B4 /* EpsilonTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = EpsilonTransition.cpp; sourceTree = ""; }; 276E5C421CDB57AA003FF4B4 /* EpsilonTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = EpsilonTransition.h; sourceTree = ""; }; 276E5C431CDB57AA003FF4B4 /* ErrorInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorInfo.cpp; sourceTree = ""; wrapsLines = 0; }; @@ -1013,7 +999,6 @@ 276E5C611CDB57AA003FF4B4 /* LL1Analyzer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LL1Analyzer.h; sourceTree = ""; wrapsLines = 0; }; 276E5C621CDB57AA003FF4B4 /* LookaheadEventInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LookaheadEventInfo.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C631CDB57AA003FF4B4 /* LookaheadEventInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LookaheadEventInfo.h; sourceTree = ""; wrapsLines = 0; }; - 276E5C641CDB57AA003FF4B4 /* LoopEndState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LoopEndState.cpp; sourceTree = ""; }; 276E5C651CDB57AA003FF4B4 /* LoopEndState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LoopEndState.h; sourceTree = ""; }; 276E5C671CDB57AA003FF4B4 /* NotSetTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NotSetTransition.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C681CDB57AA003FF4B4 /* NotSetTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NotSetTransition.h; sourceTree = ""; }; @@ -1023,9 +1008,7 @@ 276E5C6C1CDB57AA003FF4B4 /* ParseInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParseInfo.h; sourceTree = ""; }; 276E5C6D1CDB57AA003FF4B4 /* ParserATNSimulator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParserATNSimulator.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C6E1CDB57AA003FF4B4 /* ParserATNSimulator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParserATNSimulator.h; sourceTree = ""; wrapsLines = 0; }; - 276E5C6F1CDB57AA003FF4B4 /* PlusBlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PlusBlockStartState.cpp; sourceTree = ""; }; 276E5C701CDB57AA003FF4B4 /* PlusBlockStartState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PlusBlockStartState.h; sourceTree = ""; }; - 276E5C711CDB57AA003FF4B4 /* PlusLoopbackState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PlusLoopbackState.cpp; sourceTree = ""; }; 276E5C721CDB57AA003FF4B4 /* PlusLoopbackState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PlusLoopbackState.h; sourceTree = ""; }; 276E5C731CDB57AA003FF4B4 /* PrecedencePredicateTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PrecedencePredicateTransition.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C741CDB57AA003FF4B4 /* PrecedencePredicateTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PrecedencePredicateTransition.h; sourceTree = ""; }; @@ -1041,9 +1024,7 @@ 276E5C7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProfilingATNSimulator.h; sourceTree = ""; wrapsLines = 0; }; 276E5C7F1CDB57AA003FF4B4 /* RangeTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RangeTransition.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C801CDB57AA003FF4B4 /* RangeTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RangeTransition.h; sourceTree = ""; }; - 276E5C811CDB57AA003FF4B4 /* RuleStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleStartState.cpp; sourceTree = ""; }; 276E5C821CDB57AA003FF4B4 /* RuleStartState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleStartState.h; sourceTree = ""; }; - 276E5C831CDB57AA003FF4B4 /* RuleStopState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleStopState.cpp; sourceTree = ""; }; 276E5C841CDB57AA003FF4B4 /* RuleStopState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleStopState.h; sourceTree = ""; }; 276E5C851CDB57AA003FF4B4 /* RuleTransition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleTransition.cpp; sourceTree = ""; }; 276E5C861CDB57AA003FF4B4 /* RuleTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleTransition.h; sourceTree = ""; }; @@ -1053,13 +1034,10 @@ 276E5C8A1CDB57AA003FF4B4 /* SetTransition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SetTransition.h; sourceTree = ""; }; 276E5C8B1CDB57AA003FF4B4 /* SingletonPredictionContext.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SingletonPredictionContext.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5C8C1CDB57AA003FF4B4 /* SingletonPredictionContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SingletonPredictionContext.h; sourceTree = ""; wrapsLines = 0; }; - 276E5C8D1CDB57AA003FF4B4 /* StarBlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StarBlockStartState.cpp; sourceTree = ""; }; 276E5C8E1CDB57AA003FF4B4 /* StarBlockStartState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StarBlockStartState.h; sourceTree = ""; }; 276E5C8F1CDB57AA003FF4B4 /* StarLoopbackState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StarLoopbackState.cpp; sourceTree = ""; }; 276E5C901CDB57AA003FF4B4 /* StarLoopbackState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StarLoopbackState.h; sourceTree = ""; }; - 276E5C911CDB57AA003FF4B4 /* StarLoopEntryState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StarLoopEntryState.cpp; sourceTree = ""; }; 276E5C921CDB57AA003FF4B4 /* StarLoopEntryState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StarLoopEntryState.h; sourceTree = ""; }; - 276E5C931CDB57AA003FF4B4 /* TokensStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TokensStartState.cpp; sourceTree = ""; }; 276E5C941CDB57AA003FF4B4 /* TokensStartState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TokensStartState.h; sourceTree = ""; }; 276E5C951CDB57AA003FF4B4 /* Transition.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Transition.cpp; sourceTree = ""; }; 276E5C961CDB57AA003FF4B4 /* Transition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Transition.h; sourceTree = ""; }; @@ -1140,8 +1118,6 @@ 276E5CE81CDB57AA003FF4B4 /* CPPUtils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPPUtils.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5CE91CDB57AA003FF4B4 /* CPPUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; path = CPPUtils.h; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; }; 276E5CEA1CDB57AA003FF4B4 /* Declarations.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Declarations.h; sourceTree = ""; }; - 276E5CEB1CDB57AA003FF4B4 /* guid.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = guid.cpp; sourceTree = ""; }; - 276E5CEC1CDB57AA003FF4B4 /* guid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = guid.h; sourceTree = ""; }; 276E5CED1CDB57AA003FF4B4 /* StringUtils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StringUtils.cpp; sourceTree = ""; }; 276E5CEE1CDB57AA003FF4B4 /* StringUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StringUtils.h; sourceTree = ""; }; 276E5CF01CDB57AA003FF4B4 /* Token.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Token.h; sourceTree = ""; }; @@ -1194,15 +1170,12 @@ 2793DC841F08083F00A84290 /* TokenSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TokenSource.cpp; sourceTree = ""; }; 2793DC881F08087500A84290 /* Chunk.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Chunk.cpp; sourceTree = ""; }; 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeListener.cpp; sourceTree = ""; }; - 2793DC901F0808A200A84290 /* TerminalNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TerminalNode.cpp; sourceTree = ""; }; - 2793DC941F0808E100A84290 /* ErrorNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorNode.cpp; sourceTree = ""; }; 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeVisitor.cpp; sourceTree = ""; }; 2793DC9C1F08090D00A84290 /* Any.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Any.cpp; sourceTree = ""; }; 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorListener.cpp; sourceTree = ""; }; 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorStrategy.cpp; sourceTree = ""; }; 2793DCA21F08095F00A84290 /* Token.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Token.cpp; sourceTree = ""; }; 2793DCA31F08095F00A84290 /* WritableToken.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WritableToken.cpp; sourceTree = ""; }; - 2793DCB01F08099C00A84290 /* BlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockStartState.cpp; sourceTree = ""; }; 2793DCB11F08099C00A84290 /* LexerAction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerAction.cpp; sourceTree = ""; }; 2794D8551CE7821B00FADD0F /* antlr4-common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-common.h"; sourceTree = ""; }; 27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-runtime.h"; sourceTree = ""; }; @@ -1235,6 +1208,27 @@ 27F4A8551D4CEB2A00E067EE /* Any.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Any.h; sourceTree = ""; }; 37C147171B4D5A04008EDDDB /* libantlr4-runtime.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libantlr4-runtime.a"; sourceTree = BUILT_PRODUCTS_DIR; }; 37D727AA1867AF1E007B6D10 /* libantlr4-runtime.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = "libantlr4-runtime.dylib"; sourceTree = BUILT_PRODUCTS_DIR; }; + 9B25DC9F2910249100DF9703 /* FlatHashSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FlatHashSet.h; sourceTree = ""; }; + 9B25DCA02910249100DF9703 /* FlatHashMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FlatHashMap.h; sourceTree = ""; }; + 9B25DCA72910252800DF9703 /* Version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Version.h; sourceTree = ""; }; + 9B25DCAB291025B700DF9703 /* ATNStateType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATNStateType.h; sourceTree = ""; }; + 9B25DCAF291026DE00DF9703 /* ParserATNSimulatorOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParserATNSimulatorOptions.h; sourceTree = ""; }; + 9B25DCB32910278000DF9703 /* PredictionContextCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PredictionContextCache.h; sourceTree = ""; }; + 9B25DCB42910278000DF9703 /* PredictionContextMergeCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PredictionContextMergeCache.h; sourceTree = ""; }; + 9B25DCB52910278000DF9703 /* PredictionContextType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PredictionContextType.h; sourceTree = ""; }; + 9B25DCB62910278000DF9703 /* PredictionContextCache.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PredictionContextCache.cpp; sourceTree = ""; }; + 9B25DCB72910278000DF9703 /* PredictionContextMergeCacheOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PredictionContextMergeCacheOptions.h; sourceTree = ""; }; + 9B25DCB82910278000DF9703 /* PredictionContextMergeCache.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PredictionContextMergeCache.cpp; sourceTree = ""; }; + 9B25DCCB291027ED00DF9703 /* SemanticContextType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SemanticContextType.h; sourceTree = ""; }; + 9B25DCCC291027EE00DF9703 /* SerializedATNView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SerializedATNView.h; sourceTree = ""; }; + 9B25DCD32910282B00DF9703 /* TransitionType.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TransitionType.cpp; sourceTree = ""; }; + 9B25DCD42910282B00DF9703 /* TransitionType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TransitionType.h; sourceTree = ""; }; + 9B25DCDC2910287000DF9703 /* Synchronization.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Synchronization.cpp; sourceTree = ""; }; + 9B25DCDD2910287000DF9703 /* Synchronization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Synchronization.h; sourceTree = ""; }; + 9B25DCE4291028BC00DF9703 /* Casts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Casts.h; sourceTree = ""; }; + 9B25DCE8291028D000DF9703 /* Unicode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Unicode.h; sourceTree = ""; }; + 9B25DCE9291028D000DF9703 /* Utf8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Utf8.h; sourceTree = ""; }; + 9B25DCEA291028D000DF9703 /* Utf8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Utf8.cpp; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -1242,7 +1236,6 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 270C69E01CDB536A00116E17 /* CoreFoundation.framework in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1279,6 +1272,7 @@ children = ( 276E5C121CDB57AA003FF4B4 /* atn */, 276E5CAB1CDB57AA003FF4B4 /* dfa */, + 9B25DCDB2910287000DF9703 /* internal */, 276E5CC91CDB57AA003FF4B4 /* misc */, 276E5CE41CDB57AA003FF4B4 /* support */, 276E5CF91CDB57AA003FF4B4 /* tree */, @@ -1316,6 +1310,8 @@ 276E5CB71CDB57AA003FF4B4 /* Exceptions.h */, 276E5CB81CDB57AA003FF4B4 /* FailedPredicateException.cpp */, 276E5CB91CDB57AA003FF4B4 /* FailedPredicateException.h */, + 9B25DCA02910249100DF9703 /* FlatHashMap.h */, + 9B25DC9F2910249100DF9703 /* FlatHashSet.h */, 276E5CBA1CDB57AA003FF4B4 /* InputMismatchException.cpp */, 276E5CBB1CDB57AA003FF4B4 /* InputMismatchException.h */, 276E5CBC1CDB57AA003FF4B4 /* InterpreterRuleContext.cpp */, @@ -1363,6 +1359,7 @@ 276E5D231CDB57AA003FF4B4 /* UnbufferedCharStream.h */, 276E5D241CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp */, 276E5D251CDB57AA003FF4B4 /* UnbufferedTokenStream.h */, + 9B25DCA72910252800DF9703 /* Version.h */, 276E5D271CDB57AA003FF4B4 /* Vocabulary.cpp */, 276E5D281CDB57AA003FF4B4 /* Vocabulary.h */, 2793DCA31F08095F00A84290 /* WritableToken.cpp */, @@ -1375,8 +1372,6 @@ 276E5C121CDB57AA003FF4B4 /* atn */ = { isa = PBXGroup; children = ( - 276E5C131CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp */, - 276E5C141CDB57AA003FF4B4 /* AbstractPredicateTransition.h */, 276E5C151CDB57AA003FF4B4 /* ActionTransition.cpp */, 276E5C161CDB57AA003FF4B4 /* ActionTransition.h */, 276E5C171CDB57AA003FF4B4 /* AmbiguityInfo.cpp */, @@ -1393,22 +1388,17 @@ 276E5C221CDB57AA003FF4B4 /* ATNDeserializationOptions.h */, 276E5C231CDB57AA003FF4B4 /* ATNDeserializer.cpp */, 276E5C241CDB57AA003FF4B4 /* ATNDeserializer.h */, - 276E5C251CDB57AA003FF4B4 /* ATNSerializer.cpp */, - 276E5C261CDB57AA003FF4B4 /* ATNSerializer.h */, 276E5C271CDB57AA003FF4B4 /* ATNSimulator.cpp */, 276E5C281CDB57AA003FF4B4 /* ATNSimulator.h */, 276E5C291CDB57AA003FF4B4 /* ATNState.cpp */, 276E5C2A1CDB57AA003FF4B4 /* ATNState.h */, + 9B25DCAB291025B700DF9703 /* ATNStateType.h */, 276E5C2C1CDB57AA003FF4B4 /* ATNType.h */, 276E5C2D1CDB57AA003FF4B4 /* AtomTransition.cpp */, 276E5C2E1CDB57AA003FF4B4 /* AtomTransition.h */, - 276E5C2F1CDB57AA003FF4B4 /* BasicBlockStartState.cpp */, 276E5C301CDB57AA003FF4B4 /* BasicBlockStartState.h */, - 276E5C311CDB57AA003FF4B4 /* BasicState.cpp */, 276E5C321CDB57AA003FF4B4 /* BasicState.h */, - 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */, 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */, - 2793DCB01F08099C00A84290 /* BlockStartState.cpp */, 276E5C351CDB57AA003FF4B4 /* BlockStartState.h */, 276E5C371CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp */, 276E5C381CDB57AA003FF4B4 /* ContextSensitivityInfo.h */, @@ -1418,8 +1408,6 @@ 276E5C3C1CDB57AA003FF4B4 /* DecisionInfo.h */, 276E5C3D1CDB57AA003FF4B4 /* DecisionState.cpp */, 276E5C3E1CDB57AA003FF4B4 /* DecisionState.h */, - 276E5C3F1CDB57AA003FF4B4 /* EmptyPredictionContext.cpp */, - 276E5C401CDB57AA003FF4B4 /* EmptyPredictionContext.h */, 276E5C411CDB57AA003FF4B4 /* EpsilonTransition.cpp */, 276E5C421CDB57AA003FF4B4 /* EpsilonTransition.h */, 276E5C431CDB57AA003FF4B4 /* ErrorInfo.cpp */, @@ -1455,7 +1443,6 @@ 276E5C611CDB57AA003FF4B4 /* LL1Analyzer.h */, 276E5C621CDB57AA003FF4B4 /* LookaheadEventInfo.cpp */, 276E5C631CDB57AA003FF4B4 /* LookaheadEventInfo.h */, - 276E5C641CDB57AA003FF4B4 /* LoopEndState.cpp */, 276E5C651CDB57AA003FF4B4 /* LoopEndState.h */, 276E5C671CDB57AA003FF4B4 /* NotSetTransition.cpp */, 276E5C681CDB57AA003FF4B4 /* NotSetTransition.h */, @@ -1465,9 +1452,8 @@ 276E5C6C1CDB57AA003FF4B4 /* ParseInfo.h */, 276E5C6D1CDB57AA003FF4B4 /* ParserATNSimulator.cpp */, 276E5C6E1CDB57AA003FF4B4 /* ParserATNSimulator.h */, - 276E5C6F1CDB57AA003FF4B4 /* PlusBlockStartState.cpp */, + 9B25DCAF291026DE00DF9703 /* ParserATNSimulatorOptions.h */, 276E5C701CDB57AA003FF4B4 /* PlusBlockStartState.h */, - 276E5C711CDB57AA003FF4B4 /* PlusLoopbackState.cpp */, 276E5C721CDB57AA003FF4B4 /* PlusLoopbackState.h */, 276E5C731CDB57AA003FF4B4 /* PrecedencePredicateTransition.cpp */, 276E5C741CDB57AA003FF4B4 /* PrecedencePredicateTransition.h */, @@ -1477,34 +1463,39 @@ 276E5C781CDB57AA003FF4B4 /* PredicateTransition.h */, 276E5C791CDB57AA003FF4B4 /* PredictionContext.cpp */, 276E5C7A1CDB57AA003FF4B4 /* PredictionContext.h */, + 9B25DCB62910278000DF9703 /* PredictionContextCache.cpp */, + 9B25DCB32910278000DF9703 /* PredictionContextCache.h */, + 9B25DCB82910278000DF9703 /* PredictionContextMergeCache.cpp */, + 9B25DCB42910278000DF9703 /* PredictionContextMergeCache.h */, + 9B25DCB72910278000DF9703 /* PredictionContextMergeCacheOptions.h */, + 9B25DCB52910278000DF9703 /* PredictionContextType.h */, 276E5C7B1CDB57AA003FF4B4 /* PredictionMode.cpp */, 276E5C7C1CDB57AA003FF4B4 /* PredictionMode.h */, 276E5C7D1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp */, 276E5C7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.h */, 276E5C7F1CDB57AA003FF4B4 /* RangeTransition.cpp */, 276E5C801CDB57AA003FF4B4 /* RangeTransition.h */, - 276E5C811CDB57AA003FF4B4 /* RuleStartState.cpp */, 276E5C821CDB57AA003FF4B4 /* RuleStartState.h */, - 276E5C831CDB57AA003FF4B4 /* RuleStopState.cpp */, 276E5C841CDB57AA003FF4B4 /* RuleStopState.h */, 276E5C851CDB57AA003FF4B4 /* RuleTransition.cpp */, 276E5C861CDB57AA003FF4B4 /* RuleTransition.h */, 276E5C871CDB57AA003FF4B4 /* SemanticContext.cpp */, 276E5C881CDB57AA003FF4B4 /* SemanticContext.h */, + 9B25DCCB291027ED00DF9703 /* SemanticContextType.h */, + 9B25DCCC291027EE00DF9703 /* SerializedATNView.h */, 276E5C891CDB57AA003FF4B4 /* SetTransition.cpp */, 276E5C8A1CDB57AA003FF4B4 /* SetTransition.h */, 276E5C8B1CDB57AA003FF4B4 /* SingletonPredictionContext.cpp */, 276E5C8C1CDB57AA003FF4B4 /* SingletonPredictionContext.h */, - 276E5C8D1CDB57AA003FF4B4 /* StarBlockStartState.cpp */, 276E5C8E1CDB57AA003FF4B4 /* StarBlockStartState.h */, 276E5C8F1CDB57AA003FF4B4 /* StarLoopbackState.cpp */, 276E5C901CDB57AA003FF4B4 /* StarLoopbackState.h */, - 276E5C911CDB57AA003FF4B4 /* StarLoopEntryState.cpp */, 276E5C921CDB57AA003FF4B4 /* StarLoopEntryState.h */, - 276E5C931CDB57AA003FF4B4 /* TokensStartState.cpp */, 276E5C941CDB57AA003FF4B4 /* TokensStartState.h */, 276E5C951CDB57AA003FF4B4 /* Transition.cpp */, 276E5C961CDB57AA003FF4B4 /* Transition.h */, + 9B25DCD32910282B00DF9703 /* TransitionType.cpp */, + 9B25DCD42910282B00DF9703 /* TransitionType.h */, 276E5C971CDB57AA003FF4B4 /* WildcardTransition.cpp */, 276E5C981CDB57AA003FF4B4 /* WildcardTransition.h */, ); @@ -1550,13 +1541,15 @@ 276E5CE51CDB57AA003FF4B4 /* Arrays.cpp */, 276E5CE61CDB57AA003FF4B4 /* Arrays.h */, 276E5CE71CDB57AA003FF4B4 /* BitSet.h */, + 9B25DCE4291028BC00DF9703 /* Casts.h */, 276E5CE81CDB57AA003FF4B4 /* CPPUtils.cpp */, 276E5CE91CDB57AA003FF4B4 /* CPPUtils.h */, 276E5CEA1CDB57AA003FF4B4 /* Declarations.h */, - 276E5CEB1CDB57AA003FF4B4 /* guid.cpp */, - 276E5CEC1CDB57AA003FF4B4 /* guid.h */, 276E5CED1CDB57AA003FF4B4 /* StringUtils.cpp */, 276E5CEE1CDB57AA003FF4B4 /* StringUtils.h */, + 9B25DCE8291028D000DF9703 /* Unicode.h */, + 9B25DCEA291028D000DF9703 /* Utf8.cpp */, + 9B25DCE9291028D000DF9703 /* Utf8.h */, ); path = support; sourceTree = ""; @@ -1567,7 +1560,6 @@ 276E5D061CDB57AA003FF4B4 /* pattern */, 27DB448A1D045537007E790B /* xpath */, 276E5CFA1CDB57AA003FF4B4 /* AbstractParseTreeVisitor.h */, - 2793DC941F0808E100A84290 /* ErrorNode.cpp */, 276E5CFB1CDB57AA003FF4B4 /* ErrorNode.h */, 276E5CFC1CDB57AA003FF4B4 /* ErrorNodeImpl.cpp */, 276E5CFD1CDB57AA003FF4B4 /* ErrorNodeImpl.h */, @@ -1582,7 +1574,6 @@ 276E5D031CDB57AA003FF4B4 /* ParseTreeVisitor.h */, 276E5D041CDB57AA003FF4B4 /* ParseTreeWalker.cpp */, 276E5D051CDB57AA003FF4B4 /* ParseTreeWalker.h */, - 2793DC901F0808A200A84290 /* TerminalNode.cpp */, 276E5D181CDB57AA003FF4B4 /* TerminalNode.h */, 276E5D191CDB57AA003FF4B4 /* TerminalNodeImpl.cpp */, 276E5D1A1CDB57AA003FF4B4 /* TerminalNodeImpl.h */, @@ -1595,8 +1586,8 @@ 276E5D061CDB57AA003FF4B4 /* pattern */ = { isa = PBXGroup; children = ( - 276E5D071CDB57AA003FF4B4 /* Chunk.h */, 2793DC881F08087500A84290 /* Chunk.cpp */, + 276E5D071CDB57AA003FF4B4 /* Chunk.h */, 276E5D081CDB57AA003FF4B4 /* ParseTreeMatch.cpp */, 276E5D091CDB57AA003FF4B4 /* ParseTreeMatch.h */, 276E5D0A1CDB57AA003FF4B4 /* ParseTreePattern.cpp */, @@ -1618,7 +1609,6 @@ 27874F221CCBB34200AF1C53 /* Linked Frameworks */ = { isa = PBXGroup; children = ( - 270C69DF1CDB536A00116E17 /* CoreFoundation.framework */, 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */, ); name = "Linked Frameworks"; @@ -1671,6 +1661,15 @@ name = Products; sourceTree = ""; }; + 9B25DCDB2910287000DF9703 /* internal */ = { + isa = PBXGroup; + children = ( + 9B25DCDC2910287000DF9703 /* Synchronization.cpp */, + 9B25DCDD2910287000DF9703 /* Synchronization.h */, + ); + path = internal; + sourceTree = ""; + }; /* End PBXGroup section */ /* Begin PBXHeadersBuildPhase section */ @@ -1683,6 +1682,7 @@ 276E5F431CDB57AA003FF4B4 /* IntStream.h in Headers */, 276E5D5D1CDB57AA003FF4B4 /* ATN.h in Headers */, 276E60601CDB57AA003FF4B4 /* UnbufferedCharStream.h in Headers */, + 9B25DCAA2910252800DF9703 /* Version.h in Headers */, 276E5DD81CDB57AA003FF4B4 /* LexerAction.h in Headers */, 276E5FF71CDB57AA003FF4B4 /* ParseTree.h in Headers */, 276E5DA81CDB57AA003FF4B4 /* BlockStartState.h in Headers */, @@ -1690,11 +1690,11 @@ 276E5D6F1CDB57AA003FF4B4 /* ATNDeserializationOptions.h in Headers */, 27DB44CA1D0463DB007E790B /* XPath.h in Headers */, 276E5EDD1CDB57AA003FF4B4 /* BaseErrorListener.h in Headers */, + 9B25DCBE2910278000DF9703 /* PredictionContextMergeCache.h in Headers */, 276E5DB71CDB57AA003FF4B4 /* DecisionEventInfo.h in Headers */, 27DB44D01D0463DB007E790B /* XPathRuleAnywhereElement.h in Headers */, 27AC52D21CE773A80093AAAB /* antlr4-runtime.h in Headers */, 276E5E2C1CDB57AA003FF4B4 /* LL1Analyzer.h in Headers */, - 276E5D7B1CDB57AA003FF4B4 /* ATNSerializer.h in Headers */, 276E5EAD1CDB57AA003FF4B4 /* SingletonPredictionContext.h in Headers */, 276E5E1A1CDB57AA003FF4B4 /* LexerPushModeAction.h in Headers */, 276E5ECB1CDB57AA003FF4B4 /* Transition.h in Headers */, @@ -1723,6 +1723,7 @@ 276E5E8F1CDB57AA003FF4B4 /* RuleStartState.h in Headers */, 276E5E201CDB57AA003FF4B4 /* LexerSkipAction.h in Headers */, 276E5E381CDB57AA003FF4B4 /* LoopEndState.h in Headers */, + 9B25DCED291028D000DF9703 /* Unicode.h in Headers */, 276E5D691CDB57AA003FF4B4 /* ATNConfigSet.h in Headers */, 276E5D391CDB57AA003FF4B4 /* ANTLRFileStream.h in Headers */, 276E5D301CDB57AA003FF4B4 /* ANTLRErrorListener.h in Headers */, @@ -1732,9 +1733,12 @@ 276E5F191CDB57AA003FF4B4 /* DFAState.h in Headers */, 276E5FA61CDB57AA003FF4B4 /* Recognizer.h in Headers */, 276E60751CDB57AA003FF4B4 /* WritableToken.h in Headers */, + 9B25DCD2291027EE00DF9703 /* SerializedATNView.h in Headers */, 276E5D3F1CDB57AA003FF4B4 /* ANTLRInputStream.h in Headers */, 276E5FD01CDB57AA003FF4B4 /* Token.h in Headers */, + 9B25DCC12910278000DF9703 /* PredictionContextType.h in Headers */, 276E60421CDB57AA003FF4B4 /* TerminalNode.h in Headers */, + 9B25DCCF291027EE00DF9703 /* SemanticContextType.h in Headers */, 276E5D751CDB57AA003FF4B4 /* ATNDeserializer.h in Headers */, 276E5D871CDB57AA003FF4B4 /* ATNState.h in Headers */, 276E5E7D1CDB57AA003FF4B4 /* PredictionMode.h in Headers */, @@ -1747,12 +1751,12 @@ 276E5FB21CDB57AA003FF4B4 /* Arrays.h in Headers */, 276E5F821CDB57AA003FF4B4 /* NoViableAltException.h in Headers */, 276E5DEA1CDB57AA003FF4B4 /* LexerATNConfig.h in Headers */, + 9B25DCA32910249100DF9703 /* FlatHashSet.h in Headers */, 276E60481CDB57AA003FF4B4 /* TerminalNodeImpl.h in Headers */, 27745F081CE49C000067C6A3 /* RuntimeMetaData.h in Headers */, + 9B25DCBB2910278000DF9703 /* PredictionContextCache.h in Headers */, 276E5FF41CDB57AA003FF4B4 /* ErrorNodeImpl.h in Headers */, 276E5EC51CDB57AA003FF4B4 /* TokensStartState.h in Headers */, - 276E5DC91CDB57AA003FF4B4 /* EmptyPredictionContext.h in Headers */, - 276E5D451CDB57AA003FF4B4 /* AbstractPredicateTransition.h in Headers */, 276E5F2B1CDB57AA003FF4B4 /* Exceptions.h in Headers */, 276E5F251CDB57AA003FF4B4 /* DiagnosticErrorListener.h in Headers */, 276E5E141CDB57AA003FF4B4 /* LexerPopModeAction.h in Headers */, @@ -1764,13 +1768,15 @@ 276E5E771CDB57AA003FF4B4 /* PredictionContext.h in Headers */, 276E60151CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 27DB44CC1D0463DB007E790B /* XPathElement.h in Headers */, + 9B25DCF0291028D000DF9703 /* Utf8.h in Headers */, 276E5F581CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D811CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, 27DB44B61D0463CC007E790B /* XPathLexer.h in Headers */, - 276E5FC41CDB57AA003FF4B4 /* guid.h in Headers */, 276E602D1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E951CDB57AA003FF4B4 /* RuleStopState.h in Headers */, + 9B25DCE32910287000DF9703 /* Synchronization.h in Headers */, 276E5F761CDB57AA003FF4B4 /* Predicate.h in Headers */, + 9B25DCB2291026DE00DF9703 /* ParserATNSimulatorOptions.h in Headers */, 276E5F941CDB57AA003FF4B4 /* ParserRuleContext.h in Headers */, 276E5FEE1CDB57AA003FF4B4 /* ErrorNode.h in Headers */, 276E5EB91CDB57AA003FF4B4 /* StarLoopbackState.h in Headers */, @@ -1779,10 +1785,12 @@ 276E5E591CDB57AA003FF4B4 /* PlusBlockStartState.h in Headers */, 276E5D931CDB57AA003FF4B4 /* AtomTransition.h in Headers */, 276E5F521CDB57AA003FF4B4 /* LexerInterpreter.h in Headers */, + 9B25DCA62910249100DF9703 /* FlatHashMap.h in Headers */, 276E5F311CDB57AA003FF4B4 /* FailedPredicateException.h in Headers */, 276E5E321CDB57AA003FF4B4 /* LookaheadEventInfo.h in Headers */, 276E5F0D1CDB57AA003FF4B4 /* DFA.h in Headers */, 276E606F1CDB57AA003FF4B4 /* Vocabulary.h in Headers */, + 9B25DCAE291025B700DF9703 /* ATNStateType.h in Headers */, 276E60541CDB57AA003FF4B4 /* Trees.h in Headers */, 276E5FB51CDB57AA003FF4B4 /* BitSet.h in Headers */, 276E5F9A1CDB57AA003FF4B4 /* ProxyErrorListener.h in Headers */, @@ -1816,6 +1824,7 @@ 276E5ED11CDB57AA003FF4B4 /* WildcardTransition.h in Headers */, 276E600F1CDB57AA003FF4B4 /* Chunk.h in Headers */, 276E5FBB1CDB57AA003FF4B4 /* CPPUtils.h in Headers */, + 9B25DCC72910278000DF9703 /* PredictionContextMergeCacheOptions.h in Headers */, 276E5EE31CDB57AA003FF4B4 /* BufferedTokenStream.h in Headers */, 276E5DB11CDB57AA003FF4B4 /* ContextSensitivityInfo.h in Headers */, 276E5E021CDB57AA003FF4B4 /* LexerIndexedCustomAction.h in Headers */, @@ -1826,6 +1835,7 @@ 276E60211CDB57AA003FF4B4 /* ParseTreePatternMatcher.h in Headers */, 276E5D631CDB57AA003FF4B4 /* ATNConfig.h in Headers */, 27DB44D41D0463DB007E790B /* XPathTokenAnywhereElement.h in Headers */, + 9B25DCE7291028BC00DF9703 /* Casts.h in Headers */, 27DB44D81D0463DB007E790B /* XPathWildcardAnywhereElement.h in Headers */, 276E5E4D1CDB57AA003FF4B4 /* ParseInfo.h in Headers */, 276E5F881CDB57AA003FF4B4 /* Parser.h in Headers */, @@ -1834,6 +1844,7 @@ 276E5E6B1CDB57AA003FF4B4 /* PredicateEvalInfo.h in Headers */, 276E5EEF1CDB57AA003FF4B4 /* CommonToken.h in Headers */, 270C67F31CDB4F1E00116E17 /* antlrcpp_ios.h in Headers */, + 9B25DCDA2910282B00DF9703 /* TransitionType.h in Headers */, 276E60391CDB57AA003FF4B4 /* TokenTagToken.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; @@ -1854,22 +1865,25 @@ 276E5FE11CDB57AA003FF4B4 /* TokenStream.h in Headers */, 276E5D6E1CDB57AA003FF4B4 /* ATNDeserializationOptions.h in Headers */, 276E5EDC1CDB57AA003FF4B4 /* BaseErrorListener.h in Headers */, + 9B25DCBA2910278000DF9703 /* PredictionContextCache.h in Headers */, 276E5DB61CDB57AA003FF4B4 /* DecisionEventInfo.h in Headers */, 276E5E2B1CDB57AA003FF4B4 /* LL1Analyzer.h in Headers */, 27DB44BA1D0463DA007E790B /* XPathElement.h in Headers */, - 276E5D7A1CDB57AA003FF4B4 /* ATNSerializer.h in Headers */, 27C375881EA1059C00B5883C /* InterpreterDataReader.h in Headers */, 276E5EAC1CDB57AA003FF4B4 /* SingletonPredictionContext.h in Headers */, 276E5E191CDB57AA003FF4B4 /* LexerPushModeAction.h in Headers */, 276E5ECA1CDB57AA003FF4B4 /* Transition.h in Headers */, + 9B25DCBD2910278000DF9703 /* PredictionContextMergeCache.h in Headers */, 276E5EA01CDB57AA003FF4B4 /* SemanticContext.h in Headers */, 276E5F5D1CDB57AA003FF4B4 /* ListTokenSource.h in Headers */, + 9B25DCE22910287000DF9703 /* Synchronization.h in Headers */, 276E5F8D1CDB57AA003FF4B4 /* ParserInterpreter.h in Headers */, 27D414561DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.h in Headers */, 276E5DDD1CDB57AA003FF4B4 /* LexerActionExecutor.h in Headers */, 276E5F4B1CDB57AA003FF4B4 /* Lexer.h in Headers */, 276E5F631CDB57AA003FF4B4 /* Interval.h in Headers */, 276E5DA41CDB57AA003FF4B4 /* BlockEndState.h in Headers */, + 9B25DCA92910252800DF9703 /* Version.h in Headers */, 27DB44C21D0463DA007E790B /* XPathTokenAnywhereElement.h in Headers */, 276E5E821CDB57AA003FF4B4 /* ProfilingATNSimulator.h in Headers */, 27DB44C41D0463DA007E790B /* XPathTokenElement.h in Headers */, @@ -1877,6 +1891,8 @@ 276E5E9A1CDB57AA003FF4B4 /* RuleTransition.h in Headers */, 27DB44B81D0463DA007E790B /* XPath.h in Headers */, 276E60021CDB57AA003FF4B4 /* ParseTreeProperty.h in Headers */, + 9B25DCC02910278000DF9703 /* PredictionContextType.h in Headers */, + 9B25DCEC291028D000DF9703 /* Unicode.h in Headers */, 276E5D8C1CDB57AA003FF4B4 /* ATNType.h in Headers */, 276E5FFC1CDB57AA003FF4B4 /* ParseTreeListener.h in Headers */, 276E5D9E1CDB57AA003FF4B4 /* BasicState.h in Headers */, @@ -1915,26 +1931,26 @@ 276E5E461CDB57AA003FF4B4 /* OrderedATNConfigSet.h in Headers */, 276E5DF51CDB57AA003FF4B4 /* LexerChannelAction.h in Headers */, 276E5FB11CDB57AA003FF4B4 /* Arrays.h in Headers */, + 9B25DCA22910249100DF9703 /* FlatHashSet.h in Headers */, 276E5F811CDB57AA003FF4B4 /* NoViableAltException.h in Headers */, 276E5DE91CDB57AA003FF4B4 /* LexerATNConfig.h in Headers */, 276E60471CDB57AA003FF4B4 /* TerminalNodeImpl.h in Headers */, 276E5FF31CDB57AA003FF4B4 /* ErrorNodeImpl.h in Headers */, 276E5EC41CDB57AA003FF4B4 /* TokensStartState.h in Headers */, - 276E5DC81CDB57AA003FF4B4 /* EmptyPredictionContext.h in Headers */, - 276E5D441CDB57AA003FF4B4 /* AbstractPredicateTransition.h in Headers */, 276E5F2A1CDB57AA003FF4B4 /* Exceptions.h in Headers */, 27DB44C61D0463DA007E790B /* XPathWildcardAnywhereElement.h in Headers */, + 9B25DCE6291028BC00DF9703 /* Casts.h in Headers */, 276E5F241CDB57AA003FF4B4 /* DiagnosticErrorListener.h in Headers */, 276E5E131CDB57AA003FF4B4 /* LexerPopModeAction.h in Headers */, 276E5ED61CDB57AA003FF4B4 /* BailErrorStrategy.h in Headers */, 276E5DCE1CDB57AA003FF4B4 /* EpsilonTransition.h in Headers */, 276E5FBD1CDB57AA003FF4B4 /* Declarations.h in Headers */, 276E600B1CDB57AA003FF4B4 /* ParseTreeWalker.h in Headers */, + 9B25DCCE291027EE00DF9703 /* SemanticContextType.h in Headers */, 276E5E761CDB57AA003FF4B4 /* PredictionContext.h in Headers */, 276E60141CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 276E5F571CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D801CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5FC31CDB57AA003FF4B4 /* guid.h in Headers */, 276E602C1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E941CDB57AA003FF4B4 /* RuleStopState.h in Headers */, 276E5F751CDB57AA003FF4B4 /* Predicate.h in Headers */, @@ -1946,6 +1962,7 @@ 276E5E581CDB57AA003FF4B4 /* PlusBlockStartState.h in Headers */, 276E5D921CDB57AA003FF4B4 /* AtomTransition.h in Headers */, 276E5F511CDB57AA003FF4B4 /* LexerInterpreter.h in Headers */, + 9B25DCB1291026DE00DF9703 /* ParserATNSimulatorOptions.h in Headers */, 276E5F301CDB57AA003FF4B4 /* FailedPredicateException.h in Headers */, 276E5E311CDB57AA003FF4B4 /* LookaheadEventInfo.h in Headers */, 276E5F0C1CDB57AA003FF4B4 /* DFA.h in Headers */, @@ -1954,18 +1971,23 @@ 276E5FB41CDB57AA003FF4B4 /* BitSet.h in Headers */, 276E5F991CDB57AA003FF4B4 /* ProxyErrorListener.h in Headers */, 276E5E401CDB57AA003FF4B4 /* NotSetTransition.h in Headers */, + 9B25DCD92910282B00DF9703 /* TransitionType.h in Headers */, + 9B25DCD1291027EE00DF9703 /* SerializedATNView.h in Headers */, 276E5E881CDB57AA003FF4B4 /* RangeTransition.h in Headers */, 276E601A1CDB57AA003FF4B4 /* ParseTreePattern.h in Headers */, 276E5DFB1CDB57AA003FF4B4 /* LexerCustomAction.h in Headers */, 276E5FE71CDB57AA003FF4B4 /* TokenStreamRewriter.h in Headers */, + 9B25DCC62910278000DF9703 /* PredictionContextMergeCacheOptions.h in Headers */, 276E5DEF1CDB57AA003FF4B4 /* LexerATNSimulator.h in Headers */, 276E5DD41CDB57AA003FF4B4 /* ErrorInfo.h in Headers */, 276E5E251CDB57AA003FF4B4 /* LexerTypeAction.h in Headers */, 276E5DE31CDB57AA003FF4B4 /* LexerActionType.h in Headers */, 276E5D501CDB57AA003FF4B4 /* AmbiguityInfo.h in Headers */, 276E5E701CDB57AA003FF4B4 /* PredicateTransition.h in Headers */, + 9B25DCEF291028D000DF9703 /* Utf8.h in Headers */, 276E5EE81CDB57AA003FF4B4 /* CharStream.h in Headers */, 276E60051CDB57AA003FF4B4 /* ParseTreeVisitor.h in Headers */, + 9B25DCAD291025B700DF9703 /* ATNStateType.h in Headers */, 276E5D561CDB57AA003FF4B4 /* ArrayPredictionContext.h in Headers */, 276E5E521CDB57AA003FF4B4 /* ParserATNSimulator.h in Headers */, 2794D8571CE7821B00FADD0F /* antlr4-common.h in Headers */, @@ -1995,6 +2017,7 @@ 276E5F871CDB57AA003FF4B4 /* Parser.h in Headers */, 276E5DBC1CDB57AA003FF4B4 /* DecisionInfo.h in Headers */, 276E5DC21CDB57AA003FF4B4 /* DecisionState.h in Headers */, + 9B25DCA52910249100DF9703 /* FlatHashMap.h in Headers */, 276E5E6A1CDB57AA003FF4B4 /* PredicateEvalInfo.h in Headers */, 276E5EEE1CDB57AA003FF4B4 /* CommonToken.h in Headers */, 276E60381CDB57AA003FF4B4 /* TokenTagToken.h in Headers */, @@ -2010,6 +2033,7 @@ 276E60311CDB57AA003FF4B4 /* TextChunk.h in Headers */, 276E5F411CDB57AA003FF4B4 /* IntStream.h in Headers */, 276E5D5B1CDB57AA003FF4B4 /* ATN.h in Headers */, + 9B25DCA82910252800DF9703 /* Version.h in Headers */, 276E605E1CDB57AA003FF4B4 /* UnbufferedCharStream.h in Headers */, 276E5DD61CDB57AA003FF4B4 /* LexerAction.h in Headers */, 27DB44A41D045537007E790B /* XPathRuleAnywhereElement.h in Headers */, @@ -2017,11 +2041,11 @@ 27AC52D01CE773A80093AAAB /* antlr4-runtime.h in Headers */, 276E5DA61CDB57AA003FF4B4 /* BlockStartState.h in Headers */, 276E5FE01CDB57AA003FF4B4 /* TokenStream.h in Headers */, + 9B25DCBC2910278000DF9703 /* PredictionContextMergeCache.h in Headers */, 276E5D6D1CDB57AA003FF4B4 /* ATNDeserializationOptions.h in Headers */, 276E5EDB1CDB57AA003FF4B4 /* BaseErrorListener.h in Headers */, 276E5DB51CDB57AA003FF4B4 /* DecisionEventInfo.h in Headers */, 276E5E2A1CDB57AA003FF4B4 /* LL1Analyzer.h in Headers */, - 276E5D791CDB57AA003FF4B4 /* ATNSerializer.h in Headers */, 276E5EAB1CDB57AA003FF4B4 /* SingletonPredictionContext.h in Headers */, 276E5E181CDB57AA003FF4B4 /* LexerPushModeAction.h in Headers */, 276E5EC91CDB57AA003FF4B4 /* Transition.h in Headers */, @@ -2050,6 +2074,7 @@ 276E5E1E1CDB57AA003FF4B4 /* LexerSkipAction.h in Headers */, 276E5E361CDB57AA003FF4B4 /* LoopEndState.h in Headers */, 276E5D671CDB57AA003FF4B4 /* ATNConfigSet.h in Headers */, + 9B25DCEB291028D000DF9703 /* Unicode.h in Headers */, 276E5D371CDB57AA003FF4B4 /* ANTLRFileStream.h in Headers */, 27DB44B41D0463CC007E790B /* XPathLexer.h in Headers */, 276E5D2E1CDB57AA003FF4B4 /* ANTLRErrorListener.h in Headers */, @@ -2059,9 +2084,12 @@ 276E5F171CDB57AA003FF4B4 /* DFAState.h in Headers */, 276E5FA41CDB57AA003FF4B4 /* Recognizer.h in Headers */, 276E60731CDB57AA003FF4B4 /* WritableToken.h in Headers */, + 9B25DCD0291027EE00DF9703 /* SerializedATNView.h in Headers */, 276E5D3D1CDB57AA003FF4B4 /* ANTLRInputStream.h in Headers */, 276E5FCE1CDB57AA003FF4B4 /* Token.h in Headers */, + 9B25DCBF2910278000DF9703 /* PredictionContextType.h in Headers */, 276E60401CDB57AA003FF4B4 /* TerminalNode.h in Headers */, + 9B25DCCD291027EE00DF9703 /* SemanticContextType.h in Headers */, 276E5D731CDB57AA003FF4B4 /* ATNDeserializer.h in Headers */, 276E5D851CDB57AA003FF4B4 /* ATNState.h in Headers */, 276E5E7B1CDB57AA003FF4B4 /* PredictionMode.h in Headers */, @@ -2074,12 +2102,12 @@ 276E5DF41CDB57AA003FF4B4 /* LexerChannelAction.h in Headers */, 276E5FB01CDB57AA003FF4B4 /* Arrays.h in Headers */, 276E5F801CDB57AA003FF4B4 /* NoViableAltException.h in Headers */, + 9B25DCA12910249100DF9703 /* FlatHashSet.h in Headers */, 276E5DE81CDB57AA003FF4B4 /* LexerATNConfig.h in Headers */, 276E60461CDB57AA003FF4B4 /* TerminalNodeImpl.h in Headers */, + 9B25DCB92910278000DF9703 /* PredictionContextCache.h in Headers */, 276E5FF21CDB57AA003FF4B4 /* ErrorNodeImpl.h in Headers */, 276E5EC31CDB57AA003FF4B4 /* TokensStartState.h in Headers */, - 276E5DC71CDB57AA003FF4B4 /* EmptyPredictionContext.h in Headers */, - 276E5D431CDB57AA003FF4B4 /* AbstractPredicateTransition.h in Headers */, 276E5F291CDB57AA003FF4B4 /* Exceptions.h in Headers */, 276E5F231CDB57AA003FF4B4 /* DiagnosticErrorListener.h in Headers */, 27DB449E1D045537007E790B /* XPath.h in Headers */, @@ -2091,13 +2119,15 @@ 276E5E751CDB57AA003FF4B4 /* PredictionContext.h in Headers */, 276E60131CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 276E5F561CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, + 9B25DCEE291028D000DF9703 /* Utf8.h in Headers */, 276E5D7F1CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5FC21CDB57AA003FF4B4 /* guid.h in Headers */, 276E602B1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E931CDB57AA003FF4B4 /* RuleStopState.h in Headers */, 276E5F741CDB57AA003FF4B4 /* Predicate.h in Headers */, 276E5F921CDB57AA003FF4B4 /* ParserRuleContext.h in Headers */, + 9B25DCE12910287000DF9703 /* Synchronization.h in Headers */, 276E5FEC1CDB57AA003FF4B4 /* ErrorNode.h in Headers */, + 9B25DCB0291026DE00DF9703 /* ParserATNSimulatorOptions.h in Headers */, 276E5EB71CDB57AA003FF4B4 /* StarLoopbackState.h in Headers */, 276E5E5D1CDB57AA003FF4B4 /* PlusLoopbackState.h in Headers */, 276E5E061CDB57AA003FF4B4 /* LexerModeAction.h in Headers */, @@ -2106,10 +2136,12 @@ 276E5F501CDB57AA003FF4B4 /* LexerInterpreter.h in Headers */, 27DB44AE1D045537007E790B /* XPathWildcardElement.h in Headers */, 276E5F2F1CDB57AA003FF4B4 /* FailedPredicateException.h in Headers */, + 9B25DCA42910249100DF9703 /* FlatHashMap.h in Headers */, 276E5E301CDB57AA003FF4B4 /* LookaheadEventInfo.h in Headers */, 276E5F0B1CDB57AA003FF4B4 /* DFA.h in Headers */, 276E606D1CDB57AA003FF4B4 /* Vocabulary.h in Headers */, 276E60521CDB57AA003FF4B4 /* Trees.h in Headers */, + 9B25DCAC291025B700DF9703 /* ATNStateType.h in Headers */, 276E5FB31CDB57AA003FF4B4 /* BitSet.h in Headers */, 27DB44AA1D045537007E790B /* XPathTokenElement.h in Headers */, 276E5F981CDB57AA003FF4B4 /* ProxyErrorListener.h in Headers */, @@ -2143,6 +2175,7 @@ 276E600D1CDB57AA003FF4B4 /* Chunk.h in Headers */, 276E5FB91CDB57AA003FF4B4 /* CPPUtils.h in Headers */, 276E5EE11CDB57AA003FF4B4 /* BufferedTokenStream.h in Headers */, + 9B25DCC52910278000DF9703 /* PredictionContextMergeCacheOptions.h in Headers */, 276E5DAF1CDB57AA003FF4B4 /* ContextSensitivityInfo.h in Headers */, 276E5E001CDB57AA003FF4B4 /* LexerIndexedCustomAction.h in Headers */, 27DB44A81D045537007E790B /* XPathTokenAnywhereElement.h in Headers */, @@ -2153,6 +2186,7 @@ 276E5F6E1CDB57AA003FF4B4 /* MurmurHash.h in Headers */, 276E601F1CDB57AA003FF4B4 /* ParseTreePatternMatcher.h in Headers */, 276E5D611CDB57AA003FF4B4 /* ATNConfig.h in Headers */, + 9B25DCE5291028BC00DF9703 /* Casts.h in Headers */, 27DB44A21D045537007E790B /* XPathLexerErrorListener.h in Headers */, 276E5E4B1CDB57AA003FF4B4 /* ParseInfo.h in Headers */, 276E5F861CDB57AA003FF4B4 /* Parser.h in Headers */, @@ -2161,6 +2195,7 @@ 276E5DC11CDB57AA003FF4B4 /* DecisionState.h in Headers */, 276E5E691CDB57AA003FF4B4 /* PredicateEvalInfo.h in Headers */, 276E5EED1CDB57AA003FF4B4 /* CommonToken.h in Headers */, + 9B25DCD82910282B00DF9703 /* TransitionType.h in Headers */, 276E60371CDB57AA003FF4B4 /* TokenTagToken.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; @@ -2226,7 +2261,7 @@ 37D727A21867AF1E007B6D10 /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 1030; + LastUpgradeCheck = 1240; ORGANIZATIONNAME = ANTLR; TargetAttributes = { 270C67EF1CDB4F1E00116E17 = { @@ -2238,7 +2273,7 @@ }; }; buildConfigurationList = 37D727A51867AF1E007B6D10 /* Build configuration list for PBXProject "antlrcpp" */; - compatibilityVersion = "Xcode 3.2"; + compatibilityVersion = "Xcode 12.0"; developmentRegion = en; hasScannedForEncodings = 0; knownRegions = ( @@ -2279,18 +2314,16 @@ 276E5D541CDB57AA003FF4B4 /* ArrayPredictionContext.cpp in Sources */, 276E5F0A1CDB57AA003FF4B4 /* DFA.cpp in Sources */, 276E5E231CDB57AA003FF4B4 /* LexerTypeAction.cpp in Sources */, - 276E5EC21CDB57AA003FF4B4 /* TokensStartState.cpp in Sources */, 276E5DB41CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */, 276E60451CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD21CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 276E5F551CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */, - 276E5E561CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 27C375861EA1059C00B5883C /* InterpreterDataReader.cpp in Sources */, 276E5E1D1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, - 276E5EBC1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 276E5D721CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */, + 9B25DCE02910287000DF9703 /* Synchronization.cpp in Sources */, 276E5E2F1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFF1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, 276E60511CDB57AA003FF4B4 /* Trees.cpp in Sources */, @@ -2312,20 +2345,16 @@ 276E5E741CDB57AA003FF4B4 /* PredictionContext.cpp in Sources */, 27DB44CB1D0463DB007E790B /* XPathElement.cpp in Sources */, 276E5E171CDB57AA003FF4B4 /* LexerPushModeAction.cpp in Sources */, - 276E5DA21CDB57AA003FF4B4 /* BlockEndState.cpp in Sources */, 276E5EF21CDB57AA003FF4B4 /* CommonTokenFactory.cpp in Sources */, 276E5DF31CDB57AA003FF4B4 /* LexerChannelAction.cpp in Sources */, - 276E5E921CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60631CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DDB1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, - 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */, 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9E1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 276E5EC81CDB57AA003FF4B4 /* Transition.cpp in Sources */, 276E601E1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */, 276E5F221CDB57AA003FF4B4 /* DiagnosticErrorListener.cpp in Sources */, 276E5D481CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, - 276E5DC61CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED41CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */, @@ -2335,12 +2364,12 @@ 27DB44D51D0463DB007E790B /* XPathTokenElement.cpp in Sources */, 27DB44D11D0463DB007E790B /* XPathRuleElement.cpp in Sources */, 276E5DED1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, - 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606C1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1C1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60181CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, 276E5DE71CDB57AA003FF4B4 /* LexerATNConfig.cpp in Sources */, 27B36AC81DACE7AF0069C868 /* RuleContextWithAltNum.cpp in Sources */, + 9B25DCC42910278000DF9703 /* PredictionContextCache.cpp in Sources */, 276E5F101CDB57AA003FF4B4 /* DFASerializer.cpp in Sources */, 276E5F2E1CDB57AA003FF4B4 /* FailedPredicateException.cpp in Sources */, 27D414541DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.cpp in Sources */, @@ -2350,14 +2379,12 @@ 276E60091CDB57AA003FF4B4 /* ParseTreeWalker.cpp in Sources */, 27DB44CD1D0463DB007E790B /* XPathLexerErrorListener.cpp in Sources */, 276E5F9D1CDB57AA003FF4B4 /* RecognitionException.cpp in Sources */, - 276E5E8C1CDB57AA003FF4B4 /* RuleStartState.cpp in Sources */, 276E5EA41CDB57AA003FF4B4 /* SetTransition.cpp in Sources */, 276E5D841CDB57AA003FF4B4 /* ATNState.cpp in Sources */, 276E60241CDB57AA003FF4B4 /* RuleTagToken.cpp in Sources */, 276E5E501CDB57AA003FF4B4 /* ParserATNSimulator.cpp in Sources */, 276E602A1CDB57AA003FF4B4 /* TagChunk.cpp in Sources */, 276E5F7F1CDB57AA003FF4B4 /* NoViableAltException.cpp in Sources */, - 276E5D781CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F051CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAE1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, @@ -2367,8 +2394,6 @@ 276E5ECE1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E861CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, 276E5D7E1CDB57AA003FF4B4 /* ATNSimulator.cpp in Sources */, - 276E5D9C1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, - 276E5FC11CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E801CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F401CDB57AA003FF4B4 /* IntStream.cpp in Sources */, @@ -2377,7 +2402,6 @@ 276E5FDF1CDB57AA003FF4B4 /* TokenStream.cpp in Sources */, 276E5FF11CDB57AA003FF4B4 /* ErrorNodeImpl.cpp in Sources */, 27DB44D71D0463DB007E790B /* XPathWildcardAnywhereElement.cpp in Sources */, - 276E5D961CDB57AA003FF4B4 /* BasicBlockStartState.cpp in Sources */, 276E5E4A1CDB57AA003FF4B4 /* ParseInfo.cpp in Sources */, 276E5E3E1CDB57AA003FF4B4 /* NotSetTransition.cpp in Sources */, 27DB44B31D0463CC007E790B /* XPathLexer.cpp in Sources */, @@ -2389,15 +2413,15 @@ 276E5D5A1CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE61CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 276E5EE01CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, + 9B25DCCA2910278000DF9703 /* PredictionContextMergeCache.cpp in Sources */, 276E5F041CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */, - 276E5D421CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */, - 276E5E5C1CDB57AA003FF4B4 /* PlusLoopbackState.cpp in Sources */, - 276E5E351CDB57AA003FF4B4 /* LoopEndState.cpp in Sources */, 276E5FE51CDB57AA003FF4B4 /* TokenStreamRewriter.cpp in Sources */, + 9B25DCD72910282B00DF9703 /* TransitionType.cpp in Sources */, 276E5FA91CDB57AA003FF4B4 /* RuleContext.cpp in Sources */, 276E5D601CDB57AA003FF4B4 /* ATNConfig.cpp in Sources */, 276E5EFE1CDB57AA003FF4B4 /* ConsoleErrorListener.cpp in Sources */, 276E5EAA1CDB57AA003FF4B4 /* SingletonPredictionContext.cpp in Sources */, + 9B25DCF3291028D000DF9703 /* Utf8.cpp in Sources */, 276E5E681CDB57AA003FF4B4 /* PredicateEvalInfo.cpp in Sources */, 276E5F281CDB57AA003FF4B4 /* Exceptions.cpp in Sources */, 276E5F851CDB57AA003FF4B4 /* Parser.cpp in Sources */, @@ -2405,7 +2429,6 @@ 276E5E981CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF81CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */, - 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60121CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E21DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEC1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2416,7 +2439,6 @@ 276E5DF91CDB57AA003FF4B4 /* LexerCustomAction.cpp in Sources */, 276E5F4F1CDB57AA003FF4B4 /* LexerInterpreter.cpp in Sources */, 276E5E291CDB57AA003FF4B4 /* LL1Analyzer.cpp in Sources */, - 276E5EB01CDB57AA003FF4B4 /* StarBlockStartState.cpp in Sources */, 27DB44D31D0463DB007E790B /* XPathTokenAnywhereElement.cpp in Sources */, 276E5FB81CDB57AA003FF4B4 /* CPPUtils.cpp in Sources */, ); @@ -2433,18 +2455,16 @@ 276E5D531CDB57AA003FF4B4 /* ArrayPredictionContext.cpp in Sources */, 276E5F091CDB57AA003FF4B4 /* DFA.cpp in Sources */, 276E5E221CDB57AA003FF4B4 /* LexerTypeAction.cpp in Sources */, - 276E5EC11CDB57AA003FF4B4 /* TokensStartState.cpp in Sources */, 276E5DB31CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */, 276E60441CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD11CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 276E5F541CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */, - 276E5E551CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 27C375851EA1059C00B5883C /* InterpreterDataReader.cpp in Sources */, 276E5E1C1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, - 276E5EBB1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 276E5D711CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */, + 9B25DCDF2910287000DF9703 /* Synchronization.cpp in Sources */, 276E5E2E1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFE1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, 276E60501CDB57AA003FF4B4 /* Trees.cpp in Sources */, @@ -2466,20 +2486,16 @@ 276E5E731CDB57AA003FF4B4 /* PredictionContext.cpp in Sources */, 27DB44B91D0463DA007E790B /* XPathElement.cpp in Sources */, 276E5E161CDB57AA003FF4B4 /* LexerPushModeAction.cpp in Sources */, - 276E5DA11CDB57AA003FF4B4 /* BlockEndState.cpp in Sources */, 276E5EF11CDB57AA003FF4B4 /* CommonTokenFactory.cpp in Sources */, 276E5DF21CDB57AA003FF4B4 /* LexerChannelAction.cpp in Sources */, - 276E5E911CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60621CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DDA1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, - 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */, 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9D1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 276E5EC71CDB57AA003FF4B4 /* Transition.cpp in Sources */, 276E601D1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */, 276E5F211CDB57AA003FF4B4 /* DiagnosticErrorListener.cpp in Sources */, 276E5D471CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, - 276E5DC51CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED31CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */, @@ -2489,12 +2505,12 @@ 27DB44C31D0463DA007E790B /* XPathTokenElement.cpp in Sources */, 27DB44BF1D0463DA007E790B /* XPathRuleElement.cpp in Sources */, 276E5DEC1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, - 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606B1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1B1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60171CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, 276E5DE61CDB57AA003FF4B4 /* LexerATNConfig.cpp in Sources */, 27B36AC71DACE7AF0069C868 /* RuleContextWithAltNum.cpp in Sources */, + 9B25DCC32910278000DF9703 /* PredictionContextCache.cpp in Sources */, 276E5F0F1CDB57AA003FF4B4 /* DFASerializer.cpp in Sources */, 276E5F2D1CDB57AA003FF4B4 /* FailedPredicateException.cpp in Sources */, 27D414531DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.cpp in Sources */, @@ -2504,14 +2520,12 @@ 276E60081CDB57AA003FF4B4 /* ParseTreeWalker.cpp in Sources */, 27DB44BB1D0463DA007E790B /* XPathLexerErrorListener.cpp in Sources */, 276E5F9C1CDB57AA003FF4B4 /* RecognitionException.cpp in Sources */, - 276E5E8B1CDB57AA003FF4B4 /* RuleStartState.cpp in Sources */, 276E5EA31CDB57AA003FF4B4 /* SetTransition.cpp in Sources */, 276E5D831CDB57AA003FF4B4 /* ATNState.cpp in Sources */, 276E60231CDB57AA003FF4B4 /* RuleTagToken.cpp in Sources */, 276E5E4F1CDB57AA003FF4B4 /* ParserATNSimulator.cpp in Sources */, 276E60291CDB57AA003FF4B4 /* TagChunk.cpp in Sources */, 276E5F7E1CDB57AA003FF4B4 /* NoViableAltException.cpp in Sources */, - 276E5D771CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F041CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAD1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, @@ -2521,8 +2535,6 @@ 276E5ECD1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E851CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, 276E5D7D1CDB57AA003FF4B4 /* ATNSimulator.cpp in Sources */, - 276E5D9B1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, - 276E5FC01CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E7F1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F3F1CDB57AA003FF4B4 /* IntStream.cpp in Sources */, @@ -2531,7 +2543,6 @@ 276E5FDE1CDB57AA003FF4B4 /* TokenStream.cpp in Sources */, 276E5FF01CDB57AA003FF4B4 /* ErrorNodeImpl.cpp in Sources */, 27DB44C51D0463DA007E790B /* XPathWildcardAnywhereElement.cpp in Sources */, - 276E5D951CDB57AA003FF4B4 /* BasicBlockStartState.cpp in Sources */, 276E5E491CDB57AA003FF4B4 /* ParseInfo.cpp in Sources */, 276E5E3D1CDB57AA003FF4B4 /* NotSetTransition.cpp in Sources */, 27DB44B21D0463CC007E790B /* XPathLexer.cpp in Sources */, @@ -2543,15 +2554,15 @@ 276E5D591CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE51CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 276E5EDF1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, + 9B25DCC92910278000DF9703 /* PredictionContextMergeCache.cpp in Sources */, 276E5F031CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */, - 276E5D411CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */, - 276E5E5B1CDB57AA003FF4B4 /* PlusLoopbackState.cpp in Sources */, - 276E5E341CDB57AA003FF4B4 /* LoopEndState.cpp in Sources */, 276E5FE41CDB57AA003FF4B4 /* TokenStreamRewriter.cpp in Sources */, + 9B25DCD62910282B00DF9703 /* TransitionType.cpp in Sources */, 276E5FA81CDB57AA003FF4B4 /* RuleContext.cpp in Sources */, 276E5D5F1CDB57AA003FF4B4 /* ATNConfig.cpp in Sources */, 276E5EFD1CDB57AA003FF4B4 /* ConsoleErrorListener.cpp in Sources */, 276E5EA91CDB57AA003FF4B4 /* SingletonPredictionContext.cpp in Sources */, + 9B25DCF2291028D000DF9703 /* Utf8.cpp in Sources */, 276E5E671CDB57AA003FF4B4 /* PredicateEvalInfo.cpp in Sources */, 276E5F271CDB57AA003FF4B4 /* Exceptions.cpp in Sources */, 276E5F841CDB57AA003FF4B4 /* Parser.cpp in Sources */, @@ -2559,7 +2570,6 @@ 276E5E971CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF71CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */, - 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60111CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E11DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEB1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2570,7 +2580,6 @@ 276E5DF81CDB57AA003FF4B4 /* LexerCustomAction.cpp in Sources */, 276E5F4E1CDB57AA003FF4B4 /* LexerInterpreter.cpp in Sources */, 276E5E281CDB57AA003FF4B4 /* LL1Analyzer.cpp in Sources */, - 276E5EAF1CDB57AA003FF4B4 /* StarBlockStartState.cpp in Sources */, 27DB44C11D0463DA007E790B /* XPathTokenAnywhereElement.cpp in Sources */, 276E5FB71CDB57AA003FF4B4 /* CPPUtils.cpp in Sources */, ); @@ -2588,17 +2597,15 @@ 276E5F081CDB57AA003FF4B4 /* DFA.cpp in Sources */, 276E5E211CDB57AA003FF4B4 /* LexerTypeAction.cpp in Sources */, 27DB449F1D045537007E790B /* XPathElement.cpp in Sources */, - 276E5EC01CDB57AA003FF4B4 /* TokensStartState.cpp in Sources */, 276E5DB21CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */, 276E60431CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD01CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5F531CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, 27C375841EA1059C00B5883C /* InterpreterDataReader.cpp in Sources */, - 276E5E541CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1B1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, - 276E5EBA1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 2793DC891F08087500A84290 /* Chunk.cpp in Sources */, + 9B25DCDE2910287000DF9703 /* Synchronization.cpp in Sources */, 276E5D701CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, 276E5E2D1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFD1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, @@ -2619,14 +2626,11 @@ 276E5F321CDB57AA003FF4B4 /* InputMismatchException.cpp in Sources */, 276E5E721CDB57AA003FF4B4 /* PredictionContext.cpp in Sources */, 276E5E151CDB57AA003FF4B4 /* LexerPushModeAction.cpp in Sources */, - 276E5DA01CDB57AA003FF4B4 /* BlockEndState.cpp in Sources */, 276E5EF01CDB57AA003FF4B4 /* CommonTokenFactory.cpp in Sources */, 276E5DF11CDB57AA003FF4B4 /* LexerChannelAction.cpp in Sources */, - 276E5E901CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60611CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DD91CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, 27DB449D1D045537007E790B /* XPath.cpp in Sources */, - 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */, 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9C1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 27DB44AD1D045537007E790B /* XPathWildcardElement.cpp in Sources */, @@ -2637,18 +2641,17 @@ 276E5D461CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */, - 276E5DC41CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED21CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, 276E5FA11CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6A1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60341CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 276E5DEB1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, - 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606A1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1A1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60161CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, 276E5DE51CDB57AA003FF4B4 /* LexerATNConfig.cpp in Sources */, 27B36AC61DACE7AF0069C868 /* RuleContextWithAltNum.cpp in Sources */, + 9B25DCC22910278000DF9703 /* PredictionContextCache.cpp in Sources */, 276E5F0E1CDB57AA003FF4B4 /* DFASerializer.cpp in Sources */, 276E5F2C1CDB57AA003FF4B4 /* FailedPredicateException.cpp in Sources */, 27D414521DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.cpp in Sources */, @@ -2658,14 +2661,12 @@ 276E5F141CDB57AA003FF4B4 /* DFAState.cpp in Sources */, 276E60071CDB57AA003FF4B4 /* ParseTreeWalker.cpp in Sources */, 276E5F9B1CDB57AA003FF4B4 /* RecognitionException.cpp in Sources */, - 276E5E8A1CDB57AA003FF4B4 /* RuleStartState.cpp in Sources */, 276E5EA21CDB57AA003FF4B4 /* SetTransition.cpp in Sources */, 276E5D821CDB57AA003FF4B4 /* ATNState.cpp in Sources */, 276E60221CDB57AA003FF4B4 /* RuleTagToken.cpp in Sources */, 276E5E4E1CDB57AA003FF4B4 /* ParserATNSimulator.cpp in Sources */, 276E60281CDB57AA003FF4B4 /* TagChunk.cpp in Sources */, 276E5F7D1CDB57AA003FF4B4 /* NoViableAltException.cpp in Sources */, - 276E5D761CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F031CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAC1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, @@ -2675,8 +2676,6 @@ 276E5ECC1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E841CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, 276E5D7C1CDB57AA003FF4B4 /* ATNSimulator.cpp in Sources */, - 276E5D9A1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, - 276E5FBF1CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F3E1CDB57AA003FF4B4 /* IntStream.cpp in Sources */, @@ -2684,7 +2683,6 @@ 276E5F6B1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, 276E5FDD1CDB57AA003FF4B4 /* TokenStream.cpp in Sources */, 276E5FEF1CDB57AA003FF4B4 /* ErrorNodeImpl.cpp in Sources */, - 276E5D941CDB57AA003FF4B4 /* BasicBlockStartState.cpp in Sources */, 276E5E481CDB57AA003FF4B4 /* ParseInfo.cpp in Sources */, 276E5E3C1CDB57AA003FF4B4 /* NotSetTransition.cpp in Sources */, 276E602E1CDB57AA003FF4B4 /* TextChunk.cpp in Sources */, @@ -2696,16 +2694,16 @@ 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5EDE1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, 276E5F021CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */, - 276E5D401CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */, - 276E5E5A1CDB57AA003FF4B4 /* PlusLoopbackState.cpp in Sources */, - 276E5E331CDB57AA003FF4B4 /* LoopEndState.cpp in Sources */, 276E5FE31CDB57AA003FF4B4 /* TokenStreamRewriter.cpp in Sources */, + 9B25DCC82910278000DF9703 /* PredictionContextMergeCache.cpp in Sources */, 27DB44A11D045537007E790B /* XPathLexerErrorListener.cpp in Sources */, 276E5FA71CDB57AA003FF4B4 /* RuleContext.cpp in Sources */, + 9B25DCD52910282B00DF9703 /* TransitionType.cpp in Sources */, 27DB44B11D0463CC007E790B /* XPathLexer.cpp in Sources */, 276E5D5E1CDB57AA003FF4B4 /* ATNConfig.cpp in Sources */, 276E5EFC1CDB57AA003FF4B4 /* ConsoleErrorListener.cpp in Sources */, 276E5EA81CDB57AA003FF4B4 /* SingletonPredictionContext.cpp in Sources */, + 9B25DCF1291028D000DF9703 /* Utf8.cpp in Sources */, 276E5E661CDB57AA003FF4B4 /* PredicateEvalInfo.cpp in Sources */, 276E5F261CDB57AA003FF4B4 /* Exceptions.cpp in Sources */, 276E5F831CDB57AA003FF4B4 /* Parser.cpp in Sources */, @@ -2713,7 +2711,6 @@ 276E5E961CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF61CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */, - 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60101CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E01DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEA1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2724,7 +2721,6 @@ 276E5DF71CDB57AA003FF4B4 /* LexerCustomAction.cpp in Sources */, 276E5F4D1CDB57AA003FF4B4 /* LexerInterpreter.cpp in Sources */, 276E5E271CDB57AA003FF4B4 /* LL1Analyzer.cpp in Sources */, - 276E5EAE1CDB57AA003FF4B4 /* StarBlockStartState.cpp in Sources */, 27DB44A91D045537007E790B /* XPathTokenElement.cpp in Sources */, 276E5FB61CDB57AA003FF4B4 /* CPPUtils.cpp in Sources */, ); @@ -2737,6 +2733,7 @@ isa = XCBuildConfiguration; buildSettings = { CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_MODULES = YES; CLANG_WARN_UNREACHABLE_CODE = YES; "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; @@ -2751,8 +2748,12 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; INFOPLIST_FILE = "antlrcpp-ios/Info.plist"; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; - IPHONEOS_DEPLOYMENT_TARGET = 9.3; - LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); MTL_ENABLE_DEBUG_INFO = YES; PRODUCT_BUNDLE_IDENTIFIER = "org.antlr.v4.runtime.antlrcpp-ios"; PRODUCT_NAME = "$(TARGET_NAME)"; @@ -2768,6 +2769,7 @@ isa = XCBuildConfiguration; buildSettings = { CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_MODULES = YES; CLANG_WARN_UNREACHABLE_CODE = YES; "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; @@ -2782,8 +2784,12 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; INFOPLIST_FILE = "antlrcpp-ios/Info.plist"; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; - IPHONEOS_DEPLOYMENT_TARGET = 9.3; - LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); MTL_ENABLE_DEBUG_INFO = NO; PRODUCT_BUNDLE_IDENTIFIER = "org.antlr.v4.runtime.antlrcpp-ios"; PRODUCT_NAME = "$(TARGET_NAME)"; @@ -2799,6 +2805,7 @@ 37C1471F1B4D5A04008EDDDB /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_MODULES = YES; CLANG_WARN_UNREACHABLE_CODE = YES; COMBINE_HIDPI_IMAGES = YES; @@ -2821,6 +2828,7 @@ 37C147201B4D5A04008EDDDB /* Release */ = { isa = XCBuildConfiguration; buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_MODULES = YES; CLANG_WARN_UNREACHABLE_CODE = YES; COMBINE_HIDPI_IMAGES = YES; @@ -2841,8 +2849,7 @@ buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; - CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; - CLANG_CXX_LIBRARY = "libc++"; + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_OBJC_ARC = YES; CLANG_WARN_ASSIGN_ENUM = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; @@ -2857,6 +2864,7 @@ CLANG_WARN_INT_CONVERSION = YES; CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; CLANG_WARN_STRICT_PROTOTYPES = YES; CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES; @@ -2889,8 +2897,12 @@ GCC_WARN_UNUSED_LABEL = YES; GCC_WARN_UNUSED_PARAMETER = YES; GCC_WARN_UNUSED_VARIABLE = YES; - HEADER_SEARCH_PATHS = src/; - MACOSX_DEPLOYMENT_TARGET = 10.9; + HEADER_SEARCH_PATHS = ( + src/, + thirdparty/utfcpp/source/, + thirdparty/utfcpp/source/utf8/, + ); + MACOSX_DEPLOYMENT_TARGET = 11.1; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; }; @@ -2901,8 +2913,7 @@ buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; - CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; - CLANG_CXX_LIBRARY = "libc++"; + CLANG_CXX_LANGUAGE_STANDARD = "c++17"; CLANG_ENABLE_OBJC_ARC = YES; CLANG_WARN_ASSIGN_ENUM = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; @@ -2917,6 +2928,7 @@ CLANG_WARN_INT_CONVERSION = YES; CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; CLANG_WARN_STRICT_PROTOTYPES = YES; CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES; @@ -2945,8 +2957,12 @@ GCC_WARN_UNUSED_LABEL = YES; GCC_WARN_UNUSED_PARAMETER = YES; GCC_WARN_UNUSED_VARIABLE = YES; - HEADER_SEARCH_PATHS = src/; - MACOSX_DEPLOYMENT_TARGET = 10.9; + HEADER_SEARCH_PATHS = ( + src/, + thirdparty/utfcpp/source/, + thirdparty/utfcpp/source/utf8/, + ); + MACOSX_DEPLOYMENT_TARGET = 11.1; SDKROOT = macosx; }; name = Release; diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme b/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme index dc8e3432a7..701bbf3838 100644 --- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme +++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme @@ -1,6 +1,6 @@ - - - - - - - - + + + ANTLR4.Runtime.cpp.vs$vs$.noarch + $version$$pre$ + ANTLR4 Runtime c++ vs$vs$ $link$ + Terence Parr & Contributors + true + BSL-1.0 + image\antlr4.jpg + https://www.antlr.org/ + ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build and walk parse trees.. + See project URL + Copyright 2014-2022 + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.targets b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.targets new file mode 100644 index 0000000000..d74dcbe5fa --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.targets @@ -0,0 +1,8 @@ + + + + + $(MSBuildThisFileDirectory)inc;%(AdditionalIncludeDirectories) + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.nuspec b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.nuspec new file mode 100644 index 0000000000..3481b2a743 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.nuspec @@ -0,0 +1,30 @@ + + + + ANTLR4.Runtime.cpp.vs$vs$.shared + $version$$pre$ + ANTLR4 Runtime c++ vs$vs$ shared + Terence Parr & Contributors + true + BSL-1.0 + image\antlr4.jpg + https://www.antlr.org/ + ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build and walk parse trees.. + releaseNotes + Copyright 2006-2022 + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.props b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.props new file mode 100644 index 0000000000..9d6b576541 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.props @@ -0,0 +1,21 @@ + + + + $(MSBuildThisFileDirectory)x86\dbg + + + $(MSBuildThisFileDirectory)x86\dbg + + + $(MSBuildThisFileDirectory)x64\dbg + + + $(MSBuildThisFileDirectory)x86\rel + + + $(MSBuildThisFileDirectory)x86\rel + + + $(MSBuildThisFileDirectory)x64\rel + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.targets b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.targets new file mode 100644 index 0000000000..c3733357f5 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.targets @@ -0,0 +1,44 @@ + + + + + antlr4-runtime.lib;%(AdditionalDependencies) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.nuspec b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.nuspec new file mode 100644 index 0000000000..d5c7b7ae7d --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.nuspec @@ -0,0 +1,29 @@ + + + + ANTLR4.Runtime.cpp.vs$vs$.static + $version$$pre$ + ANTLR4 Runtime c++ vs$vs$ static + Terence Parr & Contributors + true + BSL-1.0 + image\antlr4.jpg + https://www.antlr.org/ + ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build and walk parse trees.. + releaseNotes + Copyright 2006-2022 + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.targets b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.targets new file mode 100644 index 0000000000..05a51c7b3c --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.targets @@ -0,0 +1,44 @@ + + + + + antlr4-runtime.lib;%(AdditionalDependencies) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + diff --git a/runtime/Cpp/runtime/nuget/antlr4.jpg b/runtime/Cpp/runtime/nuget/antlr4.jpg new file mode 100644 index 0000000000..04be6f240d Binary files /dev/null and b/runtime/Cpp/runtime/nuget/antlr4.jpg differ diff --git a/runtime/Cpp/runtime/nuget/pack.cmd b/runtime/Cpp/runtime/nuget/pack.cmd new file mode 100644 index 0000000000..8eb70375c7 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/pack.cmd @@ -0,0 +1,93 @@ +echo off +rem echo Usage: +rem echo ------ +rem echo pack vsvers version [pre] // pack 2019 4.9.1 -beta +rem echo ------ +setlocal enableextensions enabledelayedexpansion + +if "%1"=="" goto usage +if "%2"=="" goto usage +set PRE=%3 +set PLATFORM=Win32 + +rem -version ^^[16.0^^,17.0^^) +set VS_VERSION=vs%1 +rem should be set "VSWHERE='%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe -property installationPath -version ^[16.0^,17.0^)'" +if %VS_VERSION%==vs2019 ( + set "VSWHERE='C:\PROGRA~2\"Microsoft Visual Studio"\Installer\vswhere.exe -latest -property installationPath -version ^[16.0^,17.0^)'" +) else ( +if %VS_VERSION%==vs2022 ( + set "VSWHERE='C:\PROGRA~2\"Microsoft Visual Studio"\Installer\vswhere.exe -latest -property installationPath -version ^[17.0^,18.0^)'" +) +) +for /f " delims=" %%a in (%VSWHERE%) do @set "VSCOMNTOOLS=%%a" + +echo ============= %VSCOMNTOOLS% ============= + +if %VS_VERSION%==vs2019 ( + set VS_VARSALL=..\..\VC\Auxiliary\Build\vcvarsall.bat + set "VS160COMNTOOLS=%VSCOMNTOOLS%\Common7\Tools\" +) else ( + if %VS_VERSION%==vs2022 ( + set VS_VARSALL=..\..\VC\Auxiliary\Build\vcvarsall.bat + set "VS170COMNTOOLS=%VSCOMNTOOLS%\Common7\Tools\" + ) else ( + set VS_VARSALL=..\..\VC\vcvarsall.bat + ) +) + +if not defined VCINSTALLDIR ( + if %VS_VERSION%==vs2019 ( + if %PLATFORM%==x64 ( + call "%VS160COMNTOOLS%%VS_VARSALL%" x86_amd64 8.1 + ) else ( + call "%VS160COMNTOOLS%%VS_VARSALL%" x86 8.1 + ) + ) else ( + if %VS_VERSION%==vs2022 ( + if %PLATFORM%==x64 ( + call "%VS170COMNTOOLS%%VS_VARSALL%" x86_amd64 8.1 + ) else ( + call "%VS170COMNTOOLS%%VS_VARSALL%" x86 8.1 + ) + ) + ) +) + +if not defined VSINSTALLDIR ( + echo Error: No Visual cpp environment found. + echo Please run this script from a Visual Studio Command Prompt + echo or run "%%VSnnCOMNTOOLS%%\vsvars32.bat" first. + goto :buildfailed +) + + +pushd ..\ +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Debug DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Release DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Debug Static" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Release Static" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Debug DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Release DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Debug Static" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Release Static" +popd + +del *nupkg +echo nuget pack ANTLR4.Runtime.cpp.noarch.nuspec -p vs=%1 -p version=%2 -p pre=%pre% +call nuget pack ANTLR4.Runtime.cpp.noarch.nuspec -p vs=%1 -p version=%2 -p pre=%pre% +echo nuget pack ANTLR4.Runtime.cpp.shared.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% +call nuget pack ANTLR4.Runtime.cpp.shared.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% +echo nuget pack ANTLR4.Runtime.cpp.static.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% +call nuget pack ANTLR4.Runtime.cpp.static.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% + +goto exit +:usage +echo Usage: +echo ------ +echo "pack vsvers version [pre]" // pack 2019 4.9.1 -beta +echo ------ +:exit +:buildfailed +endlocal +rem echo on \ No newline at end of file diff --git a/runtime/Cpp/runtime/src/ANTLRErrorListener.h b/runtime/Cpp/runtime/src/ANTLRErrorListener.h old mode 100755 new mode 100644 index d6efad1d9e..162b780d9e --- a/runtime/Cpp/runtime/src/ANTLRErrorListener.h +++ b/runtime/Cpp/runtime/src/ANTLRErrorListener.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "RecognitionException.h" namespace antlrcpp { diff --git a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h old mode 100755 new mode 100644 index a3eecd14c4..368eddbede --- a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h +++ b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h @@ -5,9 +5,12 @@ #pragma once +#include +#include "antlr4-common.h" #include "Token.h" namespace antlr4 { + class Parser; /// /// The interface for defining strategies to deal with syntax errors encountered diff --git a/runtime/Cpp/runtime/src/ANTLRFileStream.cpp b/runtime/Cpp/runtime/src/ANTLRFileStream.cpp old mode 100755 new mode 100644 index 64b0b110a9..853a3d08bb --- a/runtime/Cpp/runtime/src/ANTLRFileStream.cpp +++ b/runtime/Cpp/runtime/src/ANTLRFileStream.cpp @@ -1,30 +1,21 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -#include "support/StringUtils.h" - +#include +#include "ANTLRInputStream.h" #include "ANTLRFileStream.h" using namespace antlr4; -ANTLRFileStream::ANTLRFileStream(const std::string &fileName) { - _fileName = fileName; - loadFromFile(fileName); -} - void ANTLRFileStream::loadFromFile(const std::string &fileName) { _fileName = fileName; if (_fileName.empty()) { return; } -#ifdef _MSC_VER - std::ifstream stream(antlrcpp::s2ws(fileName), std::ios::binary); -#else std::ifstream stream(fileName, std::ios::binary); -#endif ANTLRInputStream::load(stream); } diff --git a/runtime/Cpp/runtime/src/ANTLRFileStream.h b/runtime/Cpp/runtime/src/ANTLRFileStream.h old mode 100755 new mode 100644 index 10c8550fe0..cae3140aab --- a/runtime/Cpp/runtime/src/ANTLRFileStream.h +++ b/runtime/Cpp/runtime/src/ANTLRFileStream.h @@ -1,10 +1,13 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ #pragma once +#include +#include +#include "antlr4-common.h" #include "ANTLRInputStream.h" namespace antlr4 { @@ -13,15 +16,18 @@ namespace antlr4 { /// when you construct the object (or call load()). // TODO: this class needs testing. class ANTLR4CPP_PUBLIC ANTLRFileStream : public ANTLRInputStream { - protected: - std::string _fileName; // UTF-8 encoded file name. - public: - // Assumes a file name encoded in UTF-8 and file content in the same encoding (with or w/o BOM). - ANTLRFileStream(const std::string &fileName); + ANTLRFileStream() = default; + ANTLRFileStream(const std::string &) = delete; + ANTLRFileStream(const char *data, size_t length) = delete; + ANTLRFileStream(std::istream &stream) = delete; + // Assumes a file name encoded in UTF-8 and file content in the same encoding (with or w/o BOM). virtual void loadFromFile(const std::string &fileName); - virtual std::string getSourceName() const override; + std::string getSourceName() const override; + + private: + std::string _fileName; // UTF-8 encoded file name. }; } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/ANTLRInputStream.cpp b/runtime/Cpp/runtime/src/ANTLRInputStream.cpp old mode 100755 new mode 100644 index a5e21618eb..a630dbed7d --- a/runtime/Cpp/runtime/src/ANTLRInputStream.cpp +++ b/runtime/Cpp/runtime/src/ANTLRInputStream.cpp @@ -1,13 +1,20 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include + #include "Exceptions.h" +#include "antlr4-common.h" #include "misc/Interval.h" #include "IntStream.h" -#include "support/StringUtils.h" +#include "support/Utf8.h" #include "support/CPPUtils.h" #include "ANTLRInputStream.h" @@ -17,38 +24,53 @@ using namespace antlrcpp; using misc::Interval; -ANTLRInputStream::ANTLRInputStream(const std::string &input) { +ANTLRInputStream::ANTLRInputStream() { InitializeInstanceFields(); - load(input); } -ANTLRInputStream::ANTLRInputStream(const char data_[], size_t numberOfActualCharsInArray) - : ANTLRInputStream(std::string(data_, numberOfActualCharsInArray)) { +ANTLRInputStream::ANTLRInputStream(std::string_view input): ANTLRInputStream() { + load(input.data(), input.length()); } -ANTLRInputStream::ANTLRInputStream(std::istream &stream) { - InitializeInstanceFields(); +ANTLRInputStream::ANTLRInputStream(const char *data, size_t length) { + load(data, length); +} + +ANTLRInputStream::ANTLRInputStream(std::istream &stream): ANTLRInputStream() { load(stream); } -void ANTLRInputStream::load(const std::string &input) { +void ANTLRInputStream::load(const std::string &input, bool lenient) { + load(input.data(), input.size(), lenient); +} + +void ANTLRInputStream::load(const char *data, size_t length, bool lenient) { // Remove the UTF-8 BOM if present. - const char bom[4] = "\xef\xbb\xbf"; - if (input.compare(0, 3, bom, 3) == 0) - _data = antlrcpp::utf8_to_utf32(input.data() + 3, input.data() + input.size()); - else - _data = antlrcpp::utf8_to_utf32(input.data(), input.data() + input.size()); + const char *bom = "\xef\xbb\xbf"; + if (length >= 3 && strncmp(data, bom, 3) == 0) { + data += 3; + length -= 3; + } + if (lenient) { + _data = Utf8::lenientDecode(std::string_view(data, length)); + } else { + auto maybe_utf32 = Utf8::strictDecode(std::string_view(data, length)); + if (!maybe_utf32.has_value()) { + throw IllegalArgumentException("UTF-8 string contains an illegal byte sequence"); + } + _data = std::move(maybe_utf32).value(); + } p = 0; } -void ANTLRInputStream::load(std::istream &stream) { +void ANTLRInputStream::load(std::istream &stream, bool lenient) { if (!stream.good() || stream.eof()) // No fail, bad or EOF. return; _data.clear(); std::string s((std::istreambuf_iterator(stream)), std::istreambuf_iterator()); - load(s); + load(s.data(), s.length(), lenient); } void ANTLRInputStream::reset() { @@ -136,7 +158,11 @@ std::string ANTLRInputStream::getText(const Interval &interval) { return ""; } - return antlrcpp::utf32_to_utf8(_data.substr(start, count)); + auto maybeUtf8 = Utf8::strictEncode(std::u32string_view(_data).substr(start, count)); + if (!maybeUtf8.has_value()) { + throw IllegalArgumentException("Input stream contains invalid Unicode code points"); + } + return std::move(maybeUtf8).value(); } std::string ANTLRInputStream::getSourceName() const { @@ -147,7 +173,11 @@ std::string ANTLRInputStream::getSourceName() const { } std::string ANTLRInputStream::toString() const { - return antlrcpp::utf32_to_utf8(_data); + auto maybeUtf8 = Utf8::strictEncode(_data); + if (!maybeUtf8.has_value()) { + throw IllegalArgumentException("Input stream contains invalid Unicode code points"); + } + return std::move(maybeUtf8).value(); } void ANTLRInputStream::InitializeInstanceFields() { diff --git a/runtime/Cpp/runtime/src/ANTLRInputStream.h b/runtime/Cpp/runtime/src/ANTLRInputStream.h old mode 100755 new mode 100644 index e9850504d8..f7011dac83 --- a/runtime/Cpp/runtime/src/ANTLRInputStream.h +++ b/runtime/Cpp/runtime/src/ANTLRInputStream.h @@ -1,10 +1,16 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ #pragma once +#include +#include +#include + +#include "antlr4-common.h" +#include "misc/Interval.h" #include "CharStream.h" namespace antlr4 { @@ -16,7 +22,7 @@ namespace antlr4 { protected: /// The data being scanned. // UTF-32 - UTF32String _data; + std::u32string _data; /// 0..n-1 index into string of next char size_t p; @@ -25,19 +31,27 @@ namespace antlr4 { /// What is name or source of this char stream? std::string name; - ANTLRInputStream(const std::string &input = ""); - ANTLRInputStream(const char data_[], size_t numberOfActualCharsInArray); + ANTLRInputStream(); + + ANTLRInputStream(std::string_view input); + + ANTLRInputStream(const char *data, size_t length); ANTLRInputStream(std::istream &stream); - virtual void load(const std::string &input); - virtual void load(std::istream &stream); + virtual void load(const std::string &input, bool lenient); + virtual void load(const char *data, size_t length, bool lenient); + virtual void load(std::istream &stream, bool lenient); + + virtual void load(const std::string &input) { load(input, false); } + virtual void load(const char *data, size_t length) { load(data, length, false); } + virtual void load(std::istream &stream) { load(stream, false); } /// Reset the stream so that it's in the same state it was /// when the object was created *except* the data array is not /// touched. virtual void reset(); - virtual void consume() override; - virtual size_t LA(ssize_t i) override; + void consume() override; + size_t LA(ssize_t i) override; virtual size_t LT(ssize_t i); /// @@ -45,22 +59,22 @@ namespace antlr4 { /// last symbol has been read. The index is the index of char to /// be returned from LA(1). /// - virtual size_t index() override; - virtual size_t size() override; + size_t index() override; + size_t size() override; /// /// mark/release do nothing; we have entire buffer - virtual ssize_t mark() override; - virtual void release(ssize_t marker) override; + ssize_t mark() override; + void release(ssize_t marker) override; /// /// consume() ahead until p==index; can't just set p=index as we must /// update line and charPositionInLine. If we seek backwards, just set p /// - virtual void seek(size_t index) override; - virtual std::string getText(const misc::Interval &interval) override; - virtual std::string getSourceName() const override; - virtual std::string toString() const override; + void seek(size_t index) override; + std::string getText(const misc::Interval &interval) override; + std::string getSourceName() const override; + std::string toString() const override; private: void InitializeInstanceFields(); diff --git a/runtime/Cpp/runtime/src/BailErrorStrategy.cpp b/runtime/Cpp/runtime/src/BailErrorStrategy.cpp old mode 100755 new mode 100644 index 5fbc011611..ee9503e954 --- a/runtime/Cpp/runtime/src/BailErrorStrategy.cpp +++ b/runtime/Cpp/runtime/src/BailErrorStrategy.cpp @@ -3,7 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "Exceptions.h" +#include "Token.h" #include "ParserRuleContext.h" #include "InputMismatchException.h" #include "Parser.h" diff --git a/runtime/Cpp/runtime/src/BailErrorStrategy.h b/runtime/Cpp/runtime/src/BailErrorStrategy.h old mode 100755 new mode 100644 index 2a8c36f9ed..696dc3346a --- a/runtime/Cpp/runtime/src/BailErrorStrategy.h +++ b/runtime/Cpp/runtime/src/BailErrorStrategy.h @@ -5,6 +5,9 @@ #pragma once +#include +#include "antlr4-common.h" +#include "Token.h" #include "DefaultErrorStrategy.h" namespace antlr4 { @@ -45,15 +48,15 @@ namespace antlr4 { /// original . /// public: - virtual void recover(Parser *recognizer, std::exception_ptr e) override; + void recover(Parser *recognizer, std::exception_ptr e) override; /// Make sure we don't attempt to recover inline; if the parser /// successfully recovers, it won't throw an exception. - virtual Token* recoverInline(Parser *recognizer) override; + Token* recoverInline(Parser *recognizer) override; /// /// Make sure we don't attempt to recover from problems in subrules. - virtual void sync(Parser *recognizer) override; + void sync(Parser *recognizer) override; }; } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/BaseErrorListener.cpp b/runtime/Cpp/runtime/src/BaseErrorListener.cpp old mode 100755 new mode 100644 index c035f09f0f..321f57931b --- a/runtime/Cpp/runtime/src/BaseErrorListener.cpp +++ b/runtime/Cpp/runtime/src/BaseErrorListener.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "BaseErrorListener.h" +#include "Token.h" #include "RecognitionException.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/BaseErrorListener.h b/runtime/Cpp/runtime/src/BaseErrorListener.h old mode 100755 new mode 100644 index aad2e5d755..749ed910e0 --- a/runtime/Cpp/runtime/src/BaseErrorListener.h +++ b/runtime/Cpp/runtime/src/BaseErrorListener.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "ANTLRErrorListener.h" namespace antlrcpp { @@ -20,16 +25,16 @@ namespace antlr4 { */ class ANTLR4CPP_PUBLIC BaseErrorListener : public ANTLRErrorListener { - virtual void syntaxError(Recognizer *recognizer, Token * offendingSymbol, size_t line, size_t charPositionInLine, + void syntaxError(Recognizer *recognizer, Token * offendingSymbol, size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr e) override; - virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, + void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) override; - virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, + void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) override; - virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, + void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, size_t prediction, atn::ATNConfigSet *configs) override; }; diff --git a/runtime/Cpp/runtime/src/BufferedTokenStream.cpp b/runtime/Cpp/runtime/src/BufferedTokenStream.cpp old mode 100755 new mode 100644 index 241dfe5c47..716a9a4142 --- a/runtime/Cpp/runtime/src/BufferedTokenStream.cpp +++ b/runtime/Cpp/runtime/src/BufferedTokenStream.cpp @@ -3,7 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include +#include #include "WritableToken.h" +#include "Token.h" +#include "antlr4-common.h" #include "Lexer.h" #include "RuleContext.h" #include "misc/Interval.h" diff --git a/runtime/Cpp/runtime/src/BufferedTokenStream.h b/runtime/Cpp/runtime/src/BufferedTokenStream.h old mode 100755 new mode 100644 index fab74d24c2..5d3cdbf7ec --- a/runtime/Cpp/runtime/src/BufferedTokenStream.h +++ b/runtime/Cpp/runtime/src/BufferedTokenStream.h @@ -5,6 +5,13 @@ #pragma once +#include +#include +#include +#include +#include "antlr4-common.h" +#include "misc/Interval.h" +#include "Token.h" #include "TokenStream.h" namespace antlr4 { @@ -28,24 +35,24 @@ namespace antlr4 { BufferedTokenStream& operator = (const BufferedTokenStream& other) = delete; - virtual TokenSource* getTokenSource() const override; - virtual size_t index() override; - virtual ssize_t mark() override; + TokenSource* getTokenSource() const override; + size_t index() override; + ssize_t mark() override; - virtual void release(ssize_t marker) override; + void release(ssize_t marker) override; virtual void reset(); - virtual void seek(size_t index) override; + void seek(size_t index) override; - virtual size_t size() override; - virtual void consume() override; + size_t size() override; + void consume() override; - virtual Token* get(size_t i) const override; + Token* get(size_t i) const override; /// Get all tokens from start..stop inclusively. virtual std::vector get(size_t start, size_t stop); - virtual size_t LA(ssize_t i) override; - virtual Token* LT(ssize_t k) override; + size_t LA(ssize_t i) override; + Token* LT(ssize_t k) override; /// Reset this token stream by setting its token source. virtual void setTokenSource(TokenSource *tokenSource); @@ -85,11 +92,11 @@ namespace antlr4 { ///
    virtual std::vector getHiddenTokensToLeft(size_t tokenIndex); - virtual std::string getSourceName() const override; - virtual std::string getText() override; - virtual std::string getText(const misc::Interval &interval) override; - virtual std::string getText(RuleContext *ctx) override; - virtual std::string getText(Token *start, Token *stop) override; + std::string getSourceName() const override; + std::string getText() override; + std::string getText(const misc::Interval &interval) override; + std::string getText(RuleContext *ctx) override; + std::string getText(Token *start, Token *stop) override; /// Get all tokens from lexer until EOF. virtual void fill(); diff --git a/runtime/Cpp/runtime/src/CharStream.h b/runtime/Cpp/runtime/src/CharStream.h old mode 100755 new mode 100644 index 5f2a3408d7..01ced8176f --- a/runtime/Cpp/runtime/src/CharStream.h +++ b/runtime/Cpp/runtime/src/CharStream.h @@ -5,7 +5,9 @@ #pragma once +#include #include "IntStream.h" +#include "antlr4-common.h" #include "misc/Interval.h" namespace antlr4 { @@ -13,7 +15,7 @@ namespace antlr4 { /// A source of characters for an ANTLR lexer. class ANTLR4CPP_PUBLIC CharStream : public IntStream { public: - virtual ~CharStream(); + ~CharStream() override; /// This method returns the text for a range of characters within this input /// stream. This method is guaranteed to not throw an exception if the diff --git a/runtime/Cpp/runtime/src/CommonToken.cpp b/runtime/Cpp/runtime/src/CommonToken.cpp old mode 100755 new mode 100644 index 200a6af94f..db1f0bcf30 --- a/runtime/Cpp/runtime/src/CommonToken.cpp +++ b/runtime/Cpp/runtime/src/CommonToken.cpp @@ -3,15 +3,21 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "TokenSource.h" +#include "Token.h" +#include "antlr4-common.h" #include "CharStream.h" #include "Recognizer.h" #include "Vocabulary.h" #include "misc/Interval.h" -#include "support/StringUtils.h" #include "support/CPPUtils.h" +#include "support/StringUtils.h" #include "CommonToken.h" @@ -165,9 +171,7 @@ std::string CommonToken::toString(Recognizer *r) const { } std::string txt = getText(); if (!txt.empty()) { - antlrcpp::replaceAll(txt, "\n", "\\n"); - antlrcpp::replaceAll(txt, "\r", "\\r"); - antlrcpp::replaceAll(txt, "\t", "\\t"); + txt = antlrcpp::escapeWhitespace(txt); } else { txt = ""; } diff --git a/runtime/Cpp/runtime/src/CommonToken.h b/runtime/Cpp/runtime/src/CommonToken.h old mode 100755 new mode 100644 index fdaab14de7..9e5a5c0c5b --- a/runtime/Cpp/runtime/src/CommonToken.h +++ b/runtime/Cpp/runtime/src/CommonToken.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "WritableToken.h" namespace antlr4 { @@ -111,7 +116,7 @@ namespace antlr4 { */ CommonToken(Token *oldToken); - virtual size_t getType() const override; + size_t getType() const override; /** * Explicitly set the text for this token. If {code text} is not @@ -122,33 +127,33 @@ namespace antlr4 { * should be obtained from the input along with the start and stop indexes * of the token. */ - virtual void setText(const std::string &text) override; - virtual std::string getText() const override; + void setText(const std::string &text) override; + std::string getText() const override; - virtual void setLine(size_t line) override; - virtual size_t getLine() const override; + void setLine(size_t line) override; + size_t getLine() const override; - virtual size_t getCharPositionInLine() const override; - virtual void setCharPositionInLine(size_t charPositionInLine) override; + size_t getCharPositionInLine() const override; + void setCharPositionInLine(size_t charPositionInLine) override; - virtual size_t getChannel() const override; - virtual void setChannel(size_t channel) override; + size_t getChannel() const override; + void setChannel(size_t channel) override; - virtual void setType(size_t type) override; + void setType(size_t type) override; - virtual size_t getStartIndex() const override; + size_t getStartIndex() const override; virtual void setStartIndex(size_t start); - virtual size_t getStopIndex() const override; + size_t getStopIndex() const override; virtual void setStopIndex(size_t stop); - virtual size_t getTokenIndex() const override; - virtual void setTokenIndex(size_t index) override; + size_t getTokenIndex() const override; + void setTokenIndex(size_t index) override; - virtual TokenSource *getTokenSource() const override; - virtual CharStream *getInputStream() const override; + TokenSource *getTokenSource() const override; + CharStream *getInputStream() const override; - virtual std::string toString() const override; + std::string toString() const override; virtual std::string toString(Recognizer *r) const; private: diff --git a/runtime/Cpp/runtime/src/CommonTokenFactory.cpp b/runtime/Cpp/runtime/src/CommonTokenFactory.cpp old mode 100755 new mode 100644 index b04d68fbc2..6a4cc5ae28 --- a/runtime/Cpp/runtime/src/CommonTokenFactory.cpp +++ b/runtime/Cpp/runtime/src/CommonTokenFactory.cpp @@ -3,6 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "misc/Interval.h" #include "CommonToken.h" #include "CharStream.h" diff --git a/runtime/Cpp/runtime/src/CommonTokenFactory.h b/runtime/Cpp/runtime/src/CommonTokenFactory.h old mode 100755 new mode 100644 index 129e9fc257..9e69f6ddaa --- a/runtime/Cpp/runtime/src/CommonTokenFactory.h +++ b/runtime/Cpp/runtime/src/CommonTokenFactory.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include +#include "antlr4-common.h" #include "TokenFactory.h" namespace antlr4 { @@ -65,10 +70,10 @@ namespace antlr4 { */ CommonTokenFactory(); - virtual std::unique_ptr create(std::pair source, size_t type, + std::unique_ptr create(std::pair source, size_t type, const std::string &text, size_t channel, size_t start, size_t stop, size_t line, size_t charPositionInLine) override; - virtual std::unique_ptr create(size_t type, const std::string &text) override; + std::unique_ptr create(size_t type, const std::string &text) override; }; } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/CommonTokenStream.cpp b/runtime/Cpp/runtime/src/CommonTokenStream.cpp old mode 100755 new mode 100644 index 7834296ae2..2be55c6494 --- a/runtime/Cpp/runtime/src/CommonTokenStream.cpp +++ b/runtime/Cpp/runtime/src/CommonTokenStream.cpp @@ -3,8 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "Token.h" +#include "antlr4-common.h" #include "CommonTokenStream.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/CommonTokenStream.h b/runtime/Cpp/runtime/src/CommonTokenStream.h old mode 100755 new mode 100644 index 628a986f7b..a68f33e972 --- a/runtime/Cpp/runtime/src/CommonTokenStream.h +++ b/runtime/Cpp/runtime/src/CommonTokenStream.h @@ -5,6 +5,9 @@ #pragma once +#include +#include "antlr4-common.h" +#include "Token.h" #include "BufferedTokenStream.h" namespace antlr4 { @@ -55,7 +58,7 @@ namespace antlr4 { */ CommonTokenStream(TokenSource *tokenSource, size_t channel); - virtual Token* LT(ssize_t k) override; + Token* LT(ssize_t k) override; /// Count EOF just once. virtual int getNumberOfOnChannelTokens(); @@ -70,9 +73,9 @@ namespace antlr4 { */ size_t channel; - virtual ssize_t adjustSeekIndex(size_t i) override; + ssize_t adjustSeekIndex(size_t i) override; - virtual Token* LB(size_t k) override; + Token* LB(size_t k) override; }; diff --git a/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp b/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp old mode 100755 new mode 100644 index c7925e4270..0cbf125b58 --- a/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp +++ b/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp @@ -3,6 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include "Token.h" #include "ConsoleErrorListener.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/ConsoleErrorListener.h b/runtime/Cpp/runtime/src/ConsoleErrorListener.h old mode 100755 new mode 100644 index 65c6f8c1eb..5cf46565d5 --- a/runtime/Cpp/runtime/src/ConsoleErrorListener.h +++ b/runtime/Cpp/runtime/src/ConsoleErrorListener.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "BaseErrorListener.h" namespace antlr4 { @@ -28,7 +33,7 @@ namespace antlr4 { * line line:charPositionInLine msg * */ - virtual void syntaxError(Recognizer *recognizer, Token * offendingSymbol, size_t line, size_t charPositionInLine, + void syntaxError(Recognizer *recognizer, Token * offendingSymbol, size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr e) override; }; diff --git a/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp b/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp old mode 100755 new mode 100644 index e0c9426391..69f855c4f4 --- a/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp +++ b/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp @@ -3,7 +3,13 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "NoViableAltException.h" +#include "atn/ATNStateType.h" +#include "Token.h" +#include "antlr4-common.h" #include "misc/IntervalSet.h" #include "atn/ParserATNSimulator.h" #include "InputMismatchException.h" @@ -12,10 +18,11 @@ #include "atn/RuleTransition.h" #include "atn/ATN.h" #include "atn/ATNState.h" +#include "support/StringUtils.h" +#include "support/Casts.h" #include "Parser.h" #include "CommonToken.h" #include "Vocabulary.h" -#include "support/StringUtils.h" #include "DefaultErrorStrategy.h" @@ -106,10 +113,10 @@ void DefaultErrorStrategy::sync(Parser *recognizer) { } switch (s->getStateType()) { - case atn::ATNState::BLOCK_START: - case atn::ATNState::STAR_BLOCK_START: - case atn::ATNState::PLUS_BLOCK_START: - case atn::ATNState::STAR_LOOP_ENTRY: + case atn::ATNStateType::BLOCK_START: + case atn::ATNStateType::STAR_BLOCK_START: + case atn::ATNStateType::PLUS_BLOCK_START: + case atn::ATNStateType::STAR_LOOP_ENTRY: // report error and recover if possible if (singleTokenDeletion(recognizer) != nullptr) { return; @@ -117,8 +124,8 @@ void DefaultErrorStrategy::sync(Parser *recognizer) { throw InputMismatchException(recognizer); - case atn::ATNState::PLUS_LOOP_BACK: - case atn::ATNState::STAR_LOOP_BACK: { + case atn::ATNStateType::PLUS_LOOP_BACK: + case atn::ATNStateType::STAR_LOOP_BACK: { reportUnwantedToken(recognizer); misc::IntervalSet expecting = recognizer->getExpectedTokens(); misc::IntervalSet whatFollowsLoopIterationOrRule = expecting.Or(getErrorRecoverySet(recognizer)); @@ -292,11 +299,13 @@ size_t DefaultErrorStrategy::getSymbolType(Token *symbol) { } std::string DefaultErrorStrategy::escapeWSAndQuote(const std::string &s) const { - std::string result = s; - antlrcpp::replaceAll(result, "\n", "\\n"); - antlrcpp::replaceAll(result, "\r","\\r"); - antlrcpp::replaceAll(result, "\t","\\t"); - return "'" + result + "'"; + std::string result; + result.reserve(s.size() + 2); + result.push_back('\''); + antlrcpp::escapeWhitespace(result, s); + result.push_back('\''); + result.shrink_to_fit(); + return result; } misc::IntervalSet DefaultErrorStrategy::getErrorRecoverySet(Parser *recognizer) { @@ -306,7 +315,7 @@ misc::IntervalSet DefaultErrorStrategy::getErrorRecoverySet(Parser *recognizer) while (ctx->invokingState != ATNState::INVALID_STATE_NUMBER) { // compute what follows who invoked us atn::ATNState *invokingState = atn.states[ctx->invokingState]; - atn::RuleTransition *rt = dynamic_cast(invokingState->transitions[0]); + const atn::RuleTransition *rt = downCast(invokingState->transitions[0].get()); misc::IntervalSet follow = atn.nextTokens(rt->followState); recoverSet.addAll(follow); diff --git a/runtime/Cpp/runtime/src/DefaultErrorStrategy.h b/runtime/Cpp/runtime/src/DefaultErrorStrategy.h old mode 100755 new mode 100644 index 47dabb8cda..5fbe8144b5 --- a/runtime/Cpp/runtime/src/DefaultErrorStrategy.h +++ b/runtime/Cpp/runtime/src/DefaultErrorStrategy.h @@ -5,7 +5,14 @@ #pragma once +#include +#include +#include +#include +#include #include "ANTLRErrorStrategy.h" +#include "Token.h" +#include "antlr4-common.h" #include "misc/IntervalSet.h" namespace antlr4 { @@ -18,7 +25,7 @@ namespace antlr4 { public: DefaultErrorStrategy(); DefaultErrorStrategy(DefaultErrorStrategy const& other) = delete; - virtual ~DefaultErrorStrategy(); + ~DefaultErrorStrategy() override; DefaultErrorStrategy& operator = (DefaultErrorStrategy const& other) = delete; @@ -49,7 +56,7 @@ namespace antlr4 { /// ensure that the handler is not in error recovery mode. ///
    public: - virtual void reset(Parser *recognizer) override; + void reset(Parser *recognizer) override; /// /// This method is called to enter error recovery mode when a recognition @@ -63,7 +70,7 @@ namespace antlr4 { /// {@inheritDoc} /// public: - virtual bool inErrorRecoveryMode(Parser *recognizer) override; + bool inErrorRecoveryMode(Parser *recognizer) override; /// /// This method is called to leave error recovery mode after recovering from @@ -79,7 +86,7 @@ namespace antlr4 { /// The default implementation simply calls . /// public: - virtual void reportMatch(Parser *recognizer) override; + void reportMatch(Parser *recognizer) override; /// {@inheritDoc} ///

    @@ -98,7 +105,7 @@ namespace antlr4 { ///

  • All other types: calls to report /// the exception
  • /// - virtual void reportError(Parser *recognizer, const RecognitionException &e) override; + void reportError(Parser *recognizer, const RecognitionException &e) override; /// /// {@inheritDoc} @@ -107,7 +114,7 @@ namespace antlr4 { /// until we find one in the resynchronization set--loosely the set of tokens /// that can follow the current rule. /// - virtual void recover(Parser *recognizer, std::exception_ptr e) override; + void recover(Parser *recognizer, std::exception_ptr e) override; /** * The default implementation of {@link ANTLRErrorStrategy#sync} makes sure @@ -155,7 +162,7 @@ namespace antlr4 { * some reason speed is suffering for you, you can turn off this * functionality by simply overriding this method as a blank { }.

    */ - virtual void sync(Parser *recognizer) override; + void sync(Parser *recognizer) override; /// /// This is called by when the exception is a @@ -278,7 +285,7 @@ namespace antlr4 { * is in the set of tokens that can follow the {@code ')'} token reference * in rule {@code atom}. It can assume that you forgot the {@code ')'}. */ - virtual Token* recoverInline(Parser *recognizer) override; + Token* recoverInline(Parser *recognizer) override; /// /// This method implements the single-token insertion inline error recovery diff --git a/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp b/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp old mode 100755 new mode 100644 index ea2eaa0d8c..1b2ae99b74 --- a/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp +++ b/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/PredictionContext.h" +#include "antlr4-common.h" #include "atn/ATNConfig.h" #include "atn/ATNConfigSet.h" #include "Parser.h" diff --git a/runtime/Cpp/runtime/src/DiagnosticErrorListener.h b/runtime/Cpp/runtime/src/DiagnosticErrorListener.h old mode 100755 new mode 100644 index 8419fdcfbd..809d79e2d4 --- a/runtime/Cpp/runtime/src/DiagnosticErrorListener.h +++ b/runtime/Cpp/runtime/src/DiagnosticErrorListener.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "BaseErrorListener.h" namespace antlr4 { @@ -52,13 +55,13 @@ namespace antlr4 { /// {@code false} to report all ambiguities. DiagnosticErrorListener(bool exactOnly); - virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, + void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) override; - virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, + void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) override; - virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, + void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, size_t prediction, atn::ATNConfigSet *configs) override; protected: diff --git a/runtime/Cpp/runtime/src/Exceptions.cpp b/runtime/Cpp/runtime/src/Exceptions.cpp index b6a7b06c65..2915a26636 100644 --- a/runtime/Cpp/runtime/src/Exceptions.cpp +++ b/runtime/Cpp/runtime/src/Exceptions.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "Exceptions.h" using namespace antlr4; @@ -10,7 +12,7 @@ using namespace antlr4; RuntimeException::RuntimeException(const std::string &msg) : std::exception(), _message(msg) { } -const char* RuntimeException::what() const NOEXCEPT { +const char* RuntimeException::what() const noexcept { return _message.c_str(); } @@ -19,7 +21,7 @@ const char* RuntimeException::what() const NOEXCEPT { IOException::IOException(const std::string &msg) : std::exception(), _message(msg) { } -const char* IOException::what() const NOEXCEPT { +const char* IOException::what() const noexcept { return _message.c_str(); } diff --git a/runtime/Cpp/runtime/src/Exceptions.h b/runtime/Cpp/runtime/src/Exceptions.h index d57b26a83d..2c7743d70e 100644 --- a/runtime/Cpp/runtime/src/Exceptions.h +++ b/runtime/Cpp/runtime/src/Exceptions.h @@ -5,6 +5,8 @@ #pragma once +#include +#include #include "antlr4-common.h" namespace antlr4 { @@ -16,14 +18,14 @@ namespace antlr4 { public: RuntimeException(const std::string &msg = ""); - virtual const char* what() const NOEXCEPT override; + const char* what() const noexcept override; }; class ANTLR4CPP_PUBLIC IllegalStateException : public RuntimeException { public: IllegalStateException(const std::string &msg = "") : RuntimeException(msg) {} IllegalStateException(IllegalStateException const&) = default; - ~IllegalStateException(); + ~IllegalStateException() override; IllegalStateException& operator=(IllegalStateException const&) = default; }; @@ -31,7 +33,7 @@ namespace antlr4 { public: IllegalArgumentException(IllegalArgumentException const&) = default; IllegalArgumentException(const std::string &msg = "") : RuntimeException(msg) {} - ~IllegalArgumentException(); + ~IllegalArgumentException() override; IllegalArgumentException& operator=(IllegalArgumentException const&) = default; }; @@ -39,7 +41,7 @@ namespace antlr4 { public: NullPointerException(const std::string &msg = "") : RuntimeException(msg) {} NullPointerException(NullPointerException const&) = default; - ~NullPointerException(); + ~NullPointerException() override; NullPointerException& operator=(NullPointerException const&) = default; }; @@ -47,7 +49,7 @@ namespace antlr4 { public: IndexOutOfBoundsException(const std::string &msg = "") : RuntimeException(msg) {} IndexOutOfBoundsException(IndexOutOfBoundsException const&) = default; - ~IndexOutOfBoundsException(); + ~IndexOutOfBoundsException() override; IndexOutOfBoundsException& operator=(IndexOutOfBoundsException const&) = default; }; @@ -55,7 +57,7 @@ namespace antlr4 { public: UnsupportedOperationException(const std::string &msg = "") : RuntimeException(msg) {} UnsupportedOperationException(UnsupportedOperationException const&) = default; - ~UnsupportedOperationException(); + ~UnsupportedOperationException() override; UnsupportedOperationException& operator=(UnsupportedOperationException const&) = default; }; @@ -64,7 +66,7 @@ namespace antlr4 { public: EmptyStackException(const std::string &msg = "") : RuntimeException(msg) {} EmptyStackException(EmptyStackException const&) = default; - ~EmptyStackException(); + ~EmptyStackException() override; EmptyStackException& operator=(EmptyStackException const&) = default; }; @@ -77,14 +79,14 @@ namespace antlr4 { public: IOException(const std::string &msg = ""); - virtual const char* what() const NOEXCEPT override; + const char* what() const noexcept override; }; class ANTLR4CPP_PUBLIC CancellationException : public IllegalStateException { public: CancellationException(const std::string &msg = "") : IllegalStateException(msg) {} CancellationException(CancellationException const&) = default; - ~CancellationException(); + ~CancellationException() override; CancellationException& operator=(CancellationException const&) = default; }; @@ -92,7 +94,7 @@ namespace antlr4 { public: ParseCancellationException(const std::string &msg = "") : CancellationException(msg) {} ParseCancellationException(ParseCancellationException const&) = default; - ~ParseCancellationException(); + ~ParseCancellationException() override; ParseCancellationException& operator=(ParseCancellationException const&) = default; }; diff --git a/runtime/Cpp/runtime/src/FailedPredicateException.cpp b/runtime/Cpp/runtime/src/FailedPredicateException.cpp old mode 100755 new mode 100644 index 3ec7b27f84..1313bcd265 --- a/runtime/Cpp/runtime/src/FailedPredicateException.cpp +++ b/runtime/Cpp/runtime/src/FailedPredicateException.cpp @@ -3,11 +3,14 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/ParserATNSimulator.h" +#include "atn/TransitionType.h" #include "Parser.h" #include "atn/PredicateTransition.h" #include "atn/ATN.h" #include "atn/ATNState.h" +#include "support/Casts.h" #include "support/CPPUtils.h" #include "FailedPredicateException.h" @@ -26,10 +29,10 @@ FailedPredicateException::FailedPredicateException(Parser *recognizer, const std recognizer->getInputStream(), recognizer->getContext(), recognizer->getCurrentToken()) { atn::ATNState *s = recognizer->getInterpreter()->atn.states[recognizer->getState()]; - atn::Transition *transition = s->transitions[0]; - if (is(transition)) { - _ruleIndex = static_cast(transition)->ruleIndex; - _predicateIndex = static_cast(transition)->predIndex; + const atn::Transition *transition = s->transitions[0].get(); + if (transition->getTransitionType() == atn::TransitionType::PREDICATE) { + _ruleIndex = downCast(*transition).getRuleIndex(); + _predicateIndex = downCast(*transition).getPredIndex(); } else { _ruleIndex = 0; _predicateIndex = 0; diff --git a/runtime/Cpp/runtime/src/FailedPredicateException.h b/runtime/Cpp/runtime/src/FailedPredicateException.h old mode 100755 new mode 100644 index 16e37f7ddb..0be6d89d00 --- a/runtime/Cpp/runtime/src/FailedPredicateException.h +++ b/runtime/Cpp/runtime/src/FailedPredicateException.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "RecognitionException.h" namespace antlr4 { @@ -15,7 +18,7 @@ namespace antlr4 { /// prediction. class ANTLR4CPP_PUBLIC FailedPredicateException : public RecognitionException { public: - FailedPredicateException(Parser *recognizer); + explicit FailedPredicateException(Parser *recognizer); FailedPredicateException(Parser *recognizer, const std::string &predicate); FailedPredicateException(Parser *recognizer, const std::string &predicate, const std::string &message); diff --git a/runtime/Cpp/runtime/src/FlatHashMap.h b/runtime/Cpp/runtime/src/FlatHashMap.h new file mode 100644 index 0000000000..ab4c6cc102 --- /dev/null +++ b/runtime/Cpp/runtime/src/FlatHashMap.h @@ -0,0 +1,58 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#if ANTLR4CPP_USING_ABSEIL +#include "absl/container/flat_hash_map.h" +#else +#include +#include +#endif + +// By default ANTLRv4 uses containers provided by the C++ standard library. In most deployments this +// is fine, however in some using custom containers may be preferred. This header allows that by +// optionally supporting some alternative implementations and allowing for more easier patching of +// other alternatives. + +namespace antlr4 { + +#if ANTLR4CPP_USING_ABSEIL + template ::hasher, + typename Equal = typename absl::flat_hash_map::key_equal, + typename Allocator = typename absl::flat_hash_map::allocator_type> + using FlatHashMap = absl::flat_hash_map; +#else + template , + typename Equal = std::equal_to, + typename Allocator = std::allocator>> + using FlatHashMap = std::unordered_map; +#endif + +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/FlatHashSet.h b/runtime/Cpp/runtime/src/FlatHashSet.h new file mode 100644 index 0000000000..2ee6705e58 --- /dev/null +++ b/runtime/Cpp/runtime/src/FlatHashSet.h @@ -0,0 +1,57 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#if ANTLR4CPP_USING_ABSEIL +#include "absl/container/flat_hash_set.h" +#else +#include +#endif + +// By default ANTLRv4 uses containers provided by the C++ standard library. In most deployments this +// is fine, however in some using custom containers may be preferred. This header allows that by +// optionally supporting some alternative implementations and allowing for more easier patching of +// other alternatives. + +namespace antlr4 { + +#if ANTLR4CPP_USING_ABSEIL + template ::hasher, + typename Equal = typename absl::flat_hash_set::key_equal, + typename Allocator = typename absl::flat_hash_set::allocator_type> + using FlatHashSet = absl::flat_hash_set; +#else + template , + typename Equal = std::equal_to, + typename Allocator = std::allocator> + using FlatHashSet = std::unordered_set; +#endif + +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/InputMismatchException.h b/runtime/Cpp/runtime/src/InputMismatchException.h old mode 100755 new mode 100644 index 051a2a4153..5f9b723552 --- a/runtime/Cpp/runtime/src/InputMismatchException.h +++ b/runtime/Cpp/runtime/src/InputMismatchException.h @@ -5,6 +5,7 @@ #pragma once +#include "antlr4-common.h" #include "RecognitionException.h" namespace antlr4 { @@ -17,7 +18,7 @@ namespace antlr4 { public: InputMismatchException(Parser *recognizer); InputMismatchException(InputMismatchException const&) = default; - ~InputMismatchException(); + ~InputMismatchException() override; InputMismatchException& operator=(InputMismatchException const&) = default; }; diff --git a/runtime/Cpp/runtime/src/IntStream.cpp b/runtime/Cpp/runtime/src/IntStream.cpp old mode 100755 new mode 100644 index 5408ae50f6..468e901fc3 --- a/runtime/Cpp/runtime/src/IntStream.cpp +++ b/runtime/Cpp/runtime/src/IntStream.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "IntStream.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/IntStream.h b/runtime/Cpp/runtime/src/IntStream.h old mode 100755 new mode 100644 index 9932a9722d..267274bf55 --- a/runtime/Cpp/runtime/src/IntStream.h +++ b/runtime/Cpp/runtime/src/IntStream.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include #include "antlr4-common.h" namespace antlr4 { @@ -27,7 +30,7 @@ namespace antlr4 { /// class ANTLR4CPP_PUBLIC IntStream { public: - static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013 + static constexpr size_t EOF = std::numeric_limits::max(); /// The value returned by when the end of the stream is /// reached. diff --git a/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp b/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp old mode 100755 new mode 100644 index f6dbdd3cba..1343c16fd2 --- a/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp +++ b/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "InterpreterRuleContext.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/InterpreterRuleContext.h b/runtime/Cpp/runtime/src/InterpreterRuleContext.h old mode 100755 new mode 100644 index cb6973e40d..5ef4c4ecdc --- a/runtime/Cpp/runtime/src/InterpreterRuleContext.h +++ b/runtime/Cpp/runtime/src/InterpreterRuleContext.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "ParserRuleContext.h" namespace antlr4 { @@ -35,7 +37,7 @@ namespace antlr4 { */ InterpreterRuleContext(ParserRuleContext *parent, size_t invokingStateNumber, size_t ruleIndex); - virtual size_t getRuleIndex() const override; + size_t getRuleIndex() const override; protected: /** This is the backing field for {@link #getRuleIndex}. */ diff --git a/runtime/Cpp/runtime/src/Lexer.cpp b/runtime/Cpp/runtime/src/Lexer.cpp old mode 100755 new mode 100644 index 6cb8cd004d..f30ce765f3 --- a/runtime/Cpp/runtime/src/Lexer.cpp +++ b/runtime/Cpp/runtime/src/Lexer.cpp @@ -3,7 +3,14 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "atn/LexerATNSimulator.h" +#include "Token.h" +#include "antlr4-common.h" #include "Exceptions.h" #include "misc/Interval.h" #include "CommonTokenFactory.h" @@ -11,7 +18,6 @@ #include "ANTLRErrorListener.h" #include "support/CPPUtils.h" #include "CommonToken.h" -#include "support/StringUtils.h" #include "Lexer.h" diff --git a/runtime/Cpp/runtime/src/Lexer.h b/runtime/Cpp/runtime/src/Lexer.h old mode 100755 new mode 100644 index 16ebd57c92..6e31eb902d --- a/runtime/Cpp/runtime/src/Lexer.h +++ b/runtime/Cpp/runtime/src/Lexer.h @@ -5,7 +5,13 @@ #pragma once +#include +#include +#include +#include +#include #include "Recognizer.h" +#include "antlr4-common.h" #include "TokenSource.h" #include "CharStream.h" #include "Token.h" @@ -18,14 +24,14 @@ namespace antlr4 { /// of speed. class ANTLR4CPP_PUBLIC Lexer : public Recognizer, public TokenSource { public: - static const size_t DEFAULT_MODE = 0; - static const size_t MORE = static_cast(-2); - static const size_t SKIP = static_cast(-3); + static constexpr size_t DEFAULT_MODE = 0; + static constexpr size_t MORE = std::numeric_limits::max() - 1; + static constexpr size_t SKIP = std::numeric_limits::max() - 2; - static const size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL; - static const size_t HIDDEN = Token::HIDDEN_CHANNEL; - static const size_t MIN_CHAR_VALUE = 0; - static const size_t MAX_CHAR_VALUE = 0x10FFFF; + static constexpr size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL; + static constexpr size_t HIDDEN = Token::HIDDEN_CHANNEL; + static constexpr size_t MIN_CHAR_VALUE = 0; + static constexpr size_t MAX_CHAR_VALUE = 0x10FFFF; CharStream *_input; // Pure reference, usually from statically allocated instance. @@ -77,12 +83,12 @@ namespace antlr4 { Lexer(); Lexer(CharStream *input); - virtual ~Lexer() {} + ~Lexer() override {} virtual void reset(); /// Return a token from this source; i.e., match a token on the char stream. - virtual std::unique_ptr nextToken() override; + std::unique_ptr nextToken() override; /// Instruct the lexer to skip creating a token for current lexer rule /// and look for another token. nextToken() knows to keep looking when @@ -100,14 +106,14 @@ namespace antlr4 { this->_factory = factory; } - virtual TokenFactory* getTokenFactory() override; + TokenFactory* getTokenFactory() override; /// Set the char stream and reset the lexer - virtual void setInputStream(IntStream *input) override; + void setInputStream(IntStream *input) override; - virtual std::string getSourceName() override; + std::string getSourceName() override; - virtual CharStream* getInputStream() override; + CharStream* getInputStream() override; /// By default does not support multiple emits per nextToken invocation /// for efficiency reasons. Subclasses can override this method, nextToken, @@ -124,9 +130,9 @@ namespace antlr4 { virtual Token* emitEOF(); - virtual size_t getLine() const override; + size_t getLine() const override; - virtual size_t getCharPositionInLine() override; + size_t getCharPositionInLine() override; virtual void setLine(size_t line); diff --git a/runtime/Cpp/runtime/src/LexerInterpreter.cpp b/runtime/Cpp/runtime/src/LexerInterpreter.cpp old mode 100755 new mode 100644 index c05bbb0390..5e18bcf148 --- a/runtime/Cpp/runtime/src/LexerInterpreter.cpp +++ b/runtime/Cpp/runtime/src/LexerInterpreter.cpp @@ -3,10 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/ATNType.h" #include "atn/LexerATNSimulator.h" #include "dfa/DFA.h" -#include "atn/EmptyPredictionContext.h" #include "Exceptions.h" #include "Vocabulary.h" @@ -14,12 +16,6 @@ using namespace antlr4; -LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const std::vector &tokenNames, - const std::vector &ruleNames, const std::vector &channelNames, const std::vector &modeNames, - const atn::ATN &atn, CharStream *input) - : LexerInterpreter(grammarFileName, dfa::Vocabulary::fromTokenNames(tokenNames), ruleNames, channelNames, modeNames, atn, input) { -} - LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary, const std::vector &ruleNames, const std::vector &channelNames, const std::vector &modeNames, const atn::ATN &atn, CharStream *input) @@ -31,10 +27,6 @@ LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const dfa throw IllegalArgumentException("The ATN must be a lexer ATN."); } - for (size_t i = 0; i < atn.maxTokenType; i++) { - _tokenNames.push_back(vocabulary.getDisplayName(i)); - } - for (size_t i = 0; i < atn.getNumberOfDecisions(); ++i) { _decisionToDFA.push_back(dfa::DFA(_atn.getDecisionState(i), i)); } @@ -54,10 +46,6 @@ std::string LexerInterpreter::getGrammarFileName() const { return _grammarFileName; } -const std::vector& LexerInterpreter::getTokenNames() const { - return _tokenNames; -} - const std::vector& LexerInterpreter::getRuleNames() const { return _ruleNames; } diff --git a/runtime/Cpp/runtime/src/LexerInterpreter.h b/runtime/Cpp/runtime/src/LexerInterpreter.h old mode 100755 new mode 100644 index e8707e9472..234c1da25b --- a/runtime/Cpp/runtime/src/LexerInterpreter.h +++ b/runtime/Cpp/runtime/src/LexerInterpreter.h @@ -5,39 +5,36 @@ #pragma once +#include +#include #include "Lexer.h" +#include "antlr4-common.h" #include "atn/PredictionContext.h" +#include "atn/PredictionContextCache.h" #include "Vocabulary.h" namespace antlr4 { class ANTLR4CPP_PUBLIC LexerInterpreter : public Lexer { public: - // @deprecated - LexerInterpreter(const std::string &grammarFileName, const std::vector &tokenNames, - const std::vector &ruleNames, const std::vector &channelNames, - const std::vector &modeNames, const atn::ATN &atn, CharStream *input); LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary, const std::vector &ruleNames, const std::vector &channelNames, const std::vector &modeNames, const atn::ATN &atn, CharStream *input); - ~LexerInterpreter(); + ~LexerInterpreter() override; - virtual const atn::ATN& getATN() const override; - virtual std::string getGrammarFileName() const override; - virtual const std::vector& getTokenNames() const override; - virtual const std::vector& getRuleNames() const override; - virtual const std::vector& getChannelNames() const override; - virtual const std::vector& getModeNames() const override; + const atn::ATN& getATN() const override; + std::string getGrammarFileName() const override; + const std::vector& getRuleNames() const override; + const std::vector& getChannelNames() const override; + const std::vector& getModeNames() const override; - virtual const dfa::Vocabulary& getVocabulary() const override; + const dfa::Vocabulary& getVocabulary() const override; protected: const std::string _grammarFileName; const atn::ATN &_atn; - // @deprecated - std::vector _tokenNames; const std::vector &_ruleNames; const std::vector &_channelNames; const std::vector &_modeNames; diff --git a/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp b/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp old mode 100755 new mode 100644 index cc4fdcfd90..f118a8a7f9 --- a/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp +++ b/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "misc/Interval.h" #include "support/CPPUtils.h" #include "CharStream.h" diff --git a/runtime/Cpp/runtime/src/LexerNoViableAltException.h b/runtime/Cpp/runtime/src/LexerNoViableAltException.h old mode 100755 new mode 100644 index bc827e803e..fe74f53051 --- a/runtime/Cpp/runtime/src/LexerNoViableAltException.h +++ b/runtime/Cpp/runtime/src/LexerNoViableAltException.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "RecognitionException.h" +#include "antlr4-common.h" #include "atn/ATNConfigSet.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/ListTokenSource.cpp b/runtime/Cpp/runtime/src/ListTokenSource.cpp old mode 100755 new mode 100644 index ec93cb9b6a..22361271ea --- a/runtime/Cpp/runtime/src/ListTokenSource.cpp +++ b/runtime/Cpp/runtime/src/ListTokenSource.cpp @@ -3,7 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "Token.h" +#include "antlr4-common.h" #include "CommonToken.h" #include "CharStream.h" diff --git a/runtime/Cpp/runtime/src/ListTokenSource.h b/runtime/Cpp/runtime/src/ListTokenSource.h old mode 100755 new mode 100644 index 70cba93638..0960def4d5 --- a/runtime/Cpp/runtime/src/ListTokenSource.h +++ b/runtime/Cpp/runtime/src/ListTokenSource.h @@ -5,7 +5,13 @@ #pragma once +#include +#include +#include +#include #include "TokenSource.h" +#include "Token.h" +#include "antlr4-common.h" #include "CommonTokenFactory.h" namespace antlr4 { @@ -68,18 +74,18 @@ namespace antlr4 { /// if {@code tokens} is {@code null} ListTokenSource(std::vector> tokens_, const std::string &sourceName_); - virtual size_t getCharPositionInLine() override; - virtual std::unique_ptr nextToken() override; - virtual size_t getLine() const override; - virtual CharStream* getInputStream() override; - virtual std::string getSourceName() override; + size_t getCharPositionInLine() override; + std::unique_ptr nextToken() override; + size_t getLine() const override; + CharStream* getInputStream() override; + std::string getSourceName() override; template void setTokenFactory(TokenFactory *factory) { this->_factory = factory; } - virtual TokenFactory* getTokenFactory() override; + TokenFactory* getTokenFactory() override; private: void InitializeInstanceFields(); diff --git a/runtime/Cpp/runtime/src/NoViableAltException.cpp b/runtime/Cpp/runtime/src/NoViableAltException.cpp old mode 100755 new mode 100644 index 273c208c74..86dd06f609 --- a/runtime/Cpp/runtime/src/NoViableAltException.cpp +++ b/runtime/Cpp/runtime/src/NoViableAltException.cpp @@ -5,6 +5,8 @@ #include "Parser.h" +#include "Token.h" +#include "antlr4-common.h" #include "NoViableAltException.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/NoViableAltException.h b/runtime/Cpp/runtime/src/NoViableAltException.h old mode 100755 new mode 100644 index b15039d0cb..c81103feba --- a/runtime/Cpp/runtime/src/NoViableAltException.h +++ b/runtime/Cpp/runtime/src/NoViableAltException.h @@ -6,6 +6,7 @@ #pragma once #include "RecognitionException.h" +#include "antlr4-common.h" #include "Token.h" #include "atn/ATNConfigSet.h" @@ -20,7 +21,7 @@ namespace antlr4 { NoViableAltException(Parser *recognizer); // LL(1) error NoViableAltException(Parser *recognizer, TokenStream *input,Token *startToken, Token *offendingToken, atn::ATNConfigSet *deadEndConfigs, ParserRuleContext *ctx, bool deleteConfigs); - ~NoViableAltException(); + ~NoViableAltException() override; virtual Token* getStartToken() const; virtual atn::ATNConfigSet* getDeadEndConfigs() const; diff --git a/runtime/Cpp/runtime/src/Parser.cpp b/runtime/Cpp/runtime/src/Parser.cpp old mode 100755 new mode 100644 index 8e70c9bbc5..ac50325e4f --- a/runtime/Cpp/runtime/src/Parser.cpp +++ b/runtime/Cpp/runtime/src/Parser.cpp @@ -3,7 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "atn/ATNDeserializationOptions.h" +#include "ANTLRErrorStrategy.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "tree/pattern/ParseTreePatternMatcher.h" #include "dfa/DFA.h" #include "ParserRuleContext.h" @@ -20,6 +28,7 @@ #include "Exceptions.h" #include "ANTLRErrorListener.h" #include "tree/pattern/ParseTreePattern.h" +#include "internal/Synchronization.h" #include "atn/ProfilingATNSimulator.h" #include "atn/ParseInfo.h" @@ -28,10 +37,26 @@ using namespace antlr4; using namespace antlr4::atn; - +using namespace antlr4::internal; using namespace antlrcpp; -std::map, atn::ATN> Parser::bypassAltsAtnCache; +namespace { + +struct BypassAltsAtnCache final { + std::shared_mutex mutex; + /// This field maps from the serialized ATN string to the deserialized with + /// bypass alternatives. + /// + /// + std::map, std::unique_ptr, std::less<>> map; +}; + +BypassAltsAtnCache* getBypassAltsAtnCache() { + static BypassAltsAtnCache* const instance = new BypassAltsAtnCache(); + return instance; +} + +} Parser::TraceListener::TraceListener(Parser *outerInstance_) : outerInstance(outerInstance_) { } @@ -190,7 +215,7 @@ void Parser::removeParseListeners() { } void Parser::triggerEnterRuleEvent() { - for (auto listener : _parseListeners) { + for (auto *listener : _parseListeners) { listener->enterEveryRule(_ctx); _ctx->enterRule(listener); } @@ -212,27 +237,32 @@ TokenFactory* Parser::getTokenFactory() { return _input->getTokenSource()->getTokenFactory(); } - const atn::ATN& Parser::getATNWithBypassAlts() { - std::vector serializedAtn = getSerializedATN(); + auto serializedAtn = getSerializedATN(); if (serializedAtn.empty()) { throw UnsupportedOperationException("The current parser does not support an ATN with bypass alternatives."); } - - std::lock_guard lck(_mutex); - // XXX: using the entire serialized ATN as key into the map is a big resource waste. // How large can that thing become? - if (bypassAltsAtnCache.find(serializedAtn) == bypassAltsAtnCache.end()) + auto *cache = getBypassAltsAtnCache(); { - atn::ATNDeserializationOptions deserializationOptions; - deserializationOptions.setGenerateRuleBypassTransitions(true); - - atn::ATNDeserializer deserializer(deserializationOptions); - bypassAltsAtnCache[serializedAtn] = deserializer.deserialize(serializedAtn); + std::shared_lock lock(cache->mutex); + auto existing = cache->map.find(serializedAtn); + if (existing != cache->map.end()) { + return *existing->second; + } } - return bypassAltsAtnCache[serializedAtn]; + std::unique_lock lock(cache->mutex); + auto existing = cache->map.find(serializedAtn); + if (existing != cache->map.end()) { + return *existing->second; + } + atn::ATNDeserializationOptions deserializationOptions; + deserializationOptions.setGenerateRuleBypassTransitions(true); + atn::ATNDeserializer deserializer(deserializationOptions); + auto atn = deserializer.deserialize(serializedAtn); + return *cache->map.insert(std::make_pair(std::vector(serializedAtn.begin(), serializedAtn.end()), std::move(atn))).first->second; } tree::pattern::ParseTreePattern Parser::compileParseTreePattern(const std::string &pattern, int patternRuleIndex) { @@ -307,14 +337,14 @@ Token* Parser::consume() { tree::ErrorNode *node = createErrorNode(o); _ctx->addChild(node); if (_parseListeners.size() > 0) { - for (auto listener : _parseListeners) { + for (auto *listener : _parseListeners) { listener->visitErrorNode(node); } } } else { tree::TerminalNode *node = _ctx->addChild(createTerminalNode(o)); if (_parseListeners.size() > 0) { - for (auto listener : _parseListeners) { + for (auto *listener : _parseListeners) { listener->visitTerminal(node); } } @@ -328,8 +358,7 @@ void Parser::addContextToParseTree() { if (_ctx->parent == nullptr) return; - ParserRuleContext *parent = dynamic_cast(_ctx->parent); - parent->addChild(_ctx); + downCast(_ctx->parent)->addChild(_ctx); } void Parser::enterRule(ParserRuleContext *localctx, size_t state, size_t /*ruleIndex*/) { @@ -357,7 +386,7 @@ void Parser::exitRule() { triggerExitRuleEvent(); } setState(_ctx->invokingState); - _ctx = dynamic_cast(_ctx->parent); + _ctx = downCast(_ctx->parent); } void Parser::enterOuterAlt(ParserRuleContext *localctx, size_t altNum) { @@ -367,7 +396,7 @@ void Parser::enterOuterAlt(ParserRuleContext *localctx, size_t altNum) { // that is previous child of parse tree if (_buildParseTrees && _ctx != localctx) { if (_ctx->parent != nullptr) { - ParserRuleContext *parent = dynamic_cast(_ctx->parent); + ParserRuleContext *parent = downCast(_ctx->parent); parent->removeLastChild(); parent->addChild(localctx); } @@ -423,7 +452,7 @@ void Parser::unrollRecursionContexts(ParserRuleContext *parentctx) { if (_parseListeners.size() > 0) { while (_ctx != parentctx) { triggerExitRuleEvent(); - _ctx = dynamic_cast(_ctx->parent); + _ctx = downCast(_ctx->parent); } } else { _ctx = parentctx; @@ -446,7 +475,7 @@ ParserRuleContext* Parser::getInvokingContext(size_t ruleIndex) { } if (p->parent == nullptr) break; - p = dynamic_cast(p->parent); + p = downCast(p->parent); } return nullptr; } @@ -484,13 +513,13 @@ bool Parser::isExpectedToken(size_t symbol) { while (ctx && ctx->invokingState != ATNState::INVALID_STATE_NUMBER && following.contains(Token::EPSILON)) { atn::ATNState *invokingState = atn.states[ctx->invokingState]; - atn::RuleTransition *rt = static_cast(invokingState->transitions[0]); + const atn::RuleTransition *rt = static_cast(invokingState->transitions[0].get()); following = atn.nextTokens(rt->followState); if (following.contains(symbol)) { return true; } - ctx = dynamic_cast(ctx->parent); + ctx = downCast(ctx->parent); } if (following.contains(Token::EPSILON) && symbol == EOF) { @@ -543,9 +572,10 @@ std::vector Parser::getRuleInvocationStack(RuleContext *p) { } else { stack.push_back(ruleNames[ruleIndex]); } - if (p->parent == nullptr) + if (!RuleContext::is(run->parent)) { break; - run = dynamic_cast(run->parent); + } + run = downCast(run->parent); } return stack; } @@ -553,7 +583,7 @@ std::vector Parser::getRuleInvocationStack(RuleContext *p) { std::vector Parser::getDFAStrings() { atn::ParserATNSimulator *simulator = getInterpreter(); if (!simulator->decisionToDFA.empty()) { - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); std::vector s; for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) { @@ -568,7 +598,7 @@ std::vector Parser::getDFAStrings() { void Parser::dumpDFA() { atn::ParserATNSimulator *simulator = getInterpreter(); if (!simulator->decisionToDFA.empty()) { - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); bool seenOne = false; for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) { dfa::DFA &dfa = simulator->decisionToDFA[d]; @@ -589,12 +619,12 @@ std::string Parser::getSourceName() { } atn::ParseInfo Parser::getParseInfo() const { - atn::ProfilingATNSimulator *interp = getInterpreter(); - return atn::ParseInfo(interp); + atn::ParserATNSimulator *simulator = getInterpreter(); + return atn::ParseInfo(dynamic_cast(simulator)); } void Parser::setProfile(bool profile) { - atn::ParserATNSimulator *interp = getInterpreter(); + atn::ParserATNSimulator *interp = getInterpreter(); atn::PredictionMode saveMode = interp != nullptr ? interp->getPredictionMode() : atn::PredictionMode::LL; if (profile) { if (!is(interp)) { diff --git a/runtime/Cpp/runtime/src/Parser.h b/runtime/Cpp/runtime/src/Parser.h old mode 100755 new mode 100644 index 2cdd66689b..7eef920025 --- a/runtime/Cpp/runtime/src/Parser.h +++ b/runtime/Cpp/runtime/src/Parser.h @@ -5,7 +5,15 @@ #pragma once +#include +#include +#include +#include #include "Recognizer.h" +#include "misc/IntervalSet.h" +#include "ANTLRErrorStrategy.h" +#include "Token.h" +#include "antlr4-common.h" #include "tree/ParseTreeListener.h" #include "tree/ParseTree.h" #include "TokenStream.h" @@ -21,12 +29,12 @@ namespace antlr4 { class TraceListener : public tree::ParseTreeListener { public: TraceListener(Parser *outerInstance); - virtual ~TraceListener(); + ~TraceListener() override; - virtual void enterEveryRule(ParserRuleContext *ctx) override; - virtual void visitTerminal(tree::TerminalNode *node) override; - virtual void visitErrorNode(tree::ErrorNode *node) override; - virtual void exitEveryRule(ParserRuleContext *ctx) override; + void enterEveryRule(ParserRuleContext *ctx) override; + void visitTerminal(tree::TerminalNode *node) override; + void visitErrorNode(tree::ErrorNode *node) override; + void exitEveryRule(ParserRuleContext *ctx) override; private: Parser *const outerInstance; @@ -36,16 +44,16 @@ namespace antlr4 { public: static TrimToSizeListener INSTANCE; - virtual ~TrimToSizeListener(); + ~TrimToSizeListener() override; - virtual void enterEveryRule(ParserRuleContext *ctx) override; - virtual void visitTerminal(tree::TerminalNode *node) override; - virtual void visitErrorNode(tree::ErrorNode *node) override; - virtual void exitEveryRule(ParserRuleContext *ctx) override; + void enterEveryRule(ParserRuleContext *ctx) override; + void visitTerminal(tree::TerminalNode *node) override; + void visitErrorNode(tree::ErrorNode *node) override; + void exitEveryRule(ParserRuleContext *ctx) override; }; Parser(TokenStream *input); - virtual ~Parser(); + ~Parser() override; /// reset the parser's state virtual void reset(); @@ -193,7 +201,7 @@ namespace antlr4 { /// virtual size_t getNumberOfSyntaxErrors(); - virtual TokenFactory* getTokenFactory() override; + TokenFactory* getTokenFactory() override; /// /// Tell our token source and error strategy about a new way to create tokens. @@ -229,7 +237,7 @@ namespace antlr4 { virtual Ref getErrorHandler(); virtual void setErrorHandler(Ref const& handler); - virtual IntStream* getInputStream() override; + IntStream* getInputStream() override; void setInputStream(IntStream *input) override; virtual TokenStream* getTokenStream(); @@ -297,7 +305,7 @@ namespace antlr4 { virtual ParserRuleContext* getInvokingContext(size_t ruleIndex); virtual ParserRuleContext* getContext(); virtual void setContext(ParserRuleContext *ctx); - virtual bool precpred(RuleContext *localctx, int precedence) override; + bool precpred(RuleContext *localctx, int precedence) override; virtual bool inContext(const std::string &context); /// @@ -448,12 +456,6 @@ namespace antlr4 { tree::ParseTreeTracker _tracker; private: - /// This field maps from the serialized ATN string to the deserialized with - /// bypass alternatives. - /// - /// - static std::map, atn::ATN> bypassAltsAtnCache; - /// When setTrace(true) is called, a reference to the /// TraceListener is stored here so it can be easily removed in a /// later call to setTrace(false). The listener itself is diff --git a/runtime/Cpp/runtime/src/ParserInterpreter.cpp b/runtime/Cpp/runtime/src/ParserInterpreter.cpp old mode 100755 new mode 100644 index 5d5d2ced77..f95960f5c0 --- a/runtime/Cpp/runtime/src/ParserInterpreter.cpp +++ b/runtime/Cpp/runtime/src/ParserInterpreter.cpp @@ -3,7 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "dfa/DFA.h" +#include "atn/ATNStateType.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/RuleStartState.h" #include "InterpreterRuleContext.h" #include "atn/ParserATNSimulator.h" @@ -26,6 +34,7 @@ #include "tree/ErrorNode.h" #include "support/CPPUtils.h" +#include "support/Casts.h" #include "ParserInterpreter.h" @@ -35,19 +44,10 @@ using namespace antlr4::misc; using namespace antlrcpp; -ParserInterpreter::ParserInterpreter(const std::string &grammarFileName, const std::vector& tokenNames, - const std::vector& ruleNames, const atn::ATN &atn, TokenStream *input) - : ParserInterpreter(grammarFileName, dfa::Vocabulary::fromTokenNames(tokenNames), ruleNames, atn, input) { -} - ParserInterpreter::ParserInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary, const std::vector &ruleNames, const atn::ATN &atn, TokenStream *input) : Parser(input), _grammarFileName(grammarFileName), _atn(atn), _ruleNames(ruleNames), _vocabulary(vocabulary) { - for (size_t i = 0; i < atn.maxTokenType; ++i) { - _tokenNames.push_back(vocabulary.getDisplayName(i)); - } - // init decision DFA for (size_t i = 0; i < atn.getNumberOfDecisions(); ++i) { atn::DecisionState *decisionState = atn.getDecisionState(i); @@ -72,10 +72,6 @@ const atn::ATN& ParserInterpreter::getATN() const { return _atn; } -const std::vector& ParserInterpreter::getTokenNames() const { - return _tokenNames; -} - const dfa::Vocabulary& ParserInterpreter::getVocabulary() const { return _vocabulary; } @@ -102,7 +98,7 @@ ParserRuleContext* ParserInterpreter::parse(size_t startRuleIndex) { while (true) { atn::ATNState *p = getATNState(); switch (p->getStateType()) { - case atn::ATNState::RULE_STOP : + case atn::ATNStateType::RULE_STOP : // pop; return from rule if (_ctx->isEmpty()) { if (startRuleStartState->isLeftRecursiveRule) { @@ -161,16 +157,16 @@ atn::ATNState* ParserInterpreter::getATNState() { void ParserInterpreter::visitState(atn::ATNState *p) { size_t predictedAlt = 1; - if (is(p)) { - predictedAlt = visitDecisionState(dynamic_cast(p)); + if (DecisionState::is(p)) { + predictedAlt = visitDecisionState(downCast(p)); } - atn::Transition *transition = p->transitions[predictedAlt - 1]; - switch (transition->getSerializationType()) { - case atn::Transition::EPSILON: - if (p->getStateType() == ATNState::STAR_LOOP_ENTRY && - (dynamic_cast(p))->isPrecedenceDecision && - !is(transition->target)) { + const atn::Transition *transition = p->transitions[predictedAlt - 1].get(); + switch (transition->getTransitionType()) { + case atn::TransitionType::EPSILON: + if (p->getStateType() == ATNStateType::STAR_LOOP_ENTRY && + (downCast(p))->isPrecedenceDecision && + !LoopEndState::is(transition->target)) { // We are at the start of a left recursive rule's (...)* loop // and we're not taking the exit branch of loop. InterpreterRuleContext *localctx = createInterpreterRuleContext(_parentContextStack.top().first, @@ -179,56 +175,56 @@ void ParserInterpreter::visitState(atn::ATNState *p) { } break; - case atn::Transition::ATOM: - match(static_cast(static_cast(transition)->_label)); + case atn::TransitionType::ATOM: + match(static_cast(static_cast(transition)->_label)); break; - case atn::Transition::RANGE: - case atn::Transition::SET: - case atn::Transition::NOT_SET: + case atn::TransitionType::RANGE: + case atn::TransitionType::SET: + case atn::TransitionType::NOT_SET: if (!transition->matches(static_cast(_input->LA(1)), Token::MIN_USER_TOKEN_TYPE, Lexer::MAX_CHAR_VALUE)) { recoverInline(); } matchWildcard(); break; - case atn::Transition::WILDCARD: + case atn::TransitionType::WILDCARD: matchWildcard(); break; - case atn::Transition::RULE: + case atn::TransitionType::RULE: { atn::RuleStartState *ruleStartState = static_cast(transition->target); size_t ruleIndex = ruleStartState->ruleIndex; InterpreterRuleContext *newctx = createInterpreterRuleContext(_ctx, p->stateNumber, ruleIndex); if (ruleStartState->isLeftRecursiveRule) { - enterRecursionRule(newctx, ruleStartState->stateNumber, ruleIndex, static_cast(transition)->precedence); + enterRecursionRule(newctx, ruleStartState->stateNumber, ruleIndex, static_cast(transition)->precedence); } else { enterRule(newctx, transition->target->stateNumber, ruleIndex); } } break; - case atn::Transition::PREDICATE: + case atn::TransitionType::PREDICATE: { - atn::PredicateTransition *predicateTransition = static_cast(transition); - if (!sempred(_ctx, predicateTransition->ruleIndex, predicateTransition->predIndex)) { + const atn::PredicateTransition *predicateTransition = static_cast(transition); + if (!sempred(_ctx, predicateTransition->getRuleIndex(), predicateTransition->getPredIndex())) { throw FailedPredicateException(this); } } break; - case atn::Transition::ACTION: + case atn::TransitionType::ACTION: { - atn::ActionTransition *actionTransition = static_cast(transition); + const atn::ActionTransition *actionTransition = static_cast(transition); action(_ctx, actionTransition->ruleIndex, actionTransition->actionIndex); } break; - case atn::Transition::PRECEDENCE: + case atn::TransitionType::PRECEDENCE: { - if (!precpred(_ctx, static_cast(transition)->precedence)) { - throw FailedPredicateException(this, "precpred(_ctx, " + std::to_string(static_cast(transition)->precedence) + ")"); + if (!precpred(_ctx, static_cast(transition)->getPrecedence())) { + throw FailedPredicateException(this, "precpred(_ctx, " + std::to_string(static_cast(transition)->getPrecedence()) + ")"); } } break; @@ -272,7 +268,7 @@ void ParserInterpreter::visitRuleStopState(atn::ATNState *p) { exitRule(); } - atn::RuleTransition *ruleTransition = static_cast(_atn.states[getState()]->transitions[0]); + const atn::RuleTransition *ruleTransition = static_cast(_atn.states[getState()]->transitions[0].get()); setState(ruleTransition->followState->stateNumber); } diff --git a/runtime/Cpp/runtime/src/ParserInterpreter.h b/runtime/Cpp/runtime/src/ParserInterpreter.h old mode 100755 new mode 100644 index f25fc53741..ae57da7275 --- a/runtime/Cpp/runtime/src/ParserInterpreter.h +++ b/runtime/Cpp/runtime/src/ParserInterpreter.h @@ -5,10 +5,19 @@ #pragma once +#include +#include +#include +#include +#include #include "Parser.h" +#include "Token.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/ATN.h" #include "support/BitSet.h" #include "atn/PredictionContext.h" +#include "atn/PredictionContextCache.h" #include "Vocabulary.h" namespace antlr4 { @@ -29,29 +38,23 @@ namespace antlr4 { /// class ANTLR4CPP_PUBLIC ParserInterpreter : public Parser { public: - // @deprecated - ParserInterpreter(const std::string &grammarFileName, const std::vector& tokenNames, - const std::vector& ruleNames, const atn::ATN &atn, TokenStream *input); ParserInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary, const std::vector &ruleNames, const atn::ATN &atn, TokenStream *input); - ~ParserInterpreter(); + ~ParserInterpreter() override; - virtual void reset() override; + void reset() override; - virtual const atn::ATN& getATN() const override; + const atn::ATN& getATN() const override; - // @deprecated - virtual const std::vector& getTokenNames() const override; + const dfa::Vocabulary& getVocabulary() const override; - virtual const dfa::Vocabulary& getVocabulary() const override; - - virtual const std::vector& getRuleNames() const override; - virtual std::string getGrammarFileName() const override; + const std::vector& getRuleNames() const override; + std::string getGrammarFileName() const override; /// Begin parsing at startRuleIndex virtual ParserRuleContext* parse(size_t startRuleIndex); - virtual void enterRecursionRule(ParserRuleContext *localctx, size_t state, size_t ruleIndex, int precedence) override; + void enterRecursionRule(ParserRuleContext *localctx, size_t state, size_t ruleIndex, int precedence) override; /** Override this parser interpreters normal decision-making process @@ -110,7 +113,6 @@ namespace antlr4 { protected: const std::string _grammarFileName; - std::vector _tokenNames; const atn::ATN &_atn; std::vector _ruleNames; diff --git a/runtime/Cpp/runtime/src/ParserRuleContext.cpp b/runtime/Cpp/runtime/src/ParserRuleContext.cpp old mode 100755 new mode 100644 index cfbb5f885f..6f3f4de346 --- a/runtime/Cpp/runtime/src/ParserRuleContext.cpp +++ b/runtime/Cpp/runtime/src/ParserRuleContext.cpp @@ -3,12 +3,16 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "tree/TerminalNode.h" #include "tree/ErrorNode.h" #include "misc/Interval.h" #include "Parser.h" #include "Token.h" +#include "support/Casts.h" #include "support/CPPUtils.h" #include "ParserRuleContext.h" @@ -38,11 +42,10 @@ void ParserRuleContext::copyFrom(ParserRuleContext *ctx) { // copy any error nodes to alt label node if (!ctx->children.empty()) { - for (auto child : ctx->children) { - auto errorNode = dynamic_cast(child); - if (errorNode != nullptr) { - errorNode->setParent(this); - children.push_back(errorNode); + for (auto *child : ctx->children) { + if (ErrorNode::is(child)) { + downCast(child)->setParent(this); + children.push_back(child); } } @@ -76,39 +79,36 @@ void ParserRuleContext::removeLastChild() { } } -tree::TerminalNode* ParserRuleContext::getToken(size_t ttype, size_t i) { +tree::TerminalNode* ParserRuleContext::getToken(size_t ttype, size_t i) const { if (i >= children.size()) { return nullptr; } - size_t j = 0; // what token with ttype have we found? - for (auto o : children) { - if (is(o)) { - tree::TerminalNode *tnode = dynamic_cast(o); - Token *symbol = tnode->getSymbol(); + for (auto *child : children) { + if (TerminalNode::is(child)) { + tree::TerminalNode *typedChild = downCast(child); + Token *symbol = typedChild->getSymbol(); if (symbol->getType() == ttype) { if (j++ == i) { - return tnode; + return typedChild; } } } } - return nullptr; } -std::vector ParserRuleContext::getTokens(size_t ttype) { - std::vector tokens; - for (auto &o : children) { - if (is(o)) { - tree::TerminalNode *tnode = dynamic_cast(o); - Token *symbol = tnode->getSymbol(); +std::vector ParserRuleContext::getTokens(size_t ttype) const { + std::vector tokens; + for (auto *child : children) { + if (TerminalNode::is(child)) { + tree::TerminalNode *typedChild = downCast(child); + Token *symbol = typedChild->getSymbol(); if (symbol->getType() == ttype) { - tokens.push_back(tnode); + tokens.push_back(typedChild); } } } - return tokens; } @@ -123,11 +123,11 @@ misc::Interval ParserRuleContext::getSourceInterval() { return misc::Interval(start->getTokenIndex(), stop->getTokenIndex()); } -Token* ParserRuleContext::getStart() { +Token* ParserRuleContext::getStart() const { return start; } -Token* ParserRuleContext::getStop() { +Token* ParserRuleContext::getStop() const { return stop; } diff --git a/runtime/Cpp/runtime/src/ParserRuleContext.h b/runtime/Cpp/runtime/src/ParserRuleContext.h old mode 100755 new mode 100644 index e117c3b6be..c9c66bb684 --- a/runtime/Cpp/runtime/src/ParserRuleContext.h +++ b/runtime/Cpp/runtime/src/ParserRuleContext.h @@ -5,7 +5,14 @@ #pragma once +#include +#include +#include +#include #include "RuleContext.h" +#include "misc/Interval.h" +#include "Token.h" +#include "antlr4-common.h" #include "support/CPPUtils.h" namespace antlr4 { @@ -67,7 +74,6 @@ namespace antlr4 { ParserRuleContext(); ParserRuleContext(ParserRuleContext *parent, size_t invokingStateNumber); - virtual ~ParserRuleContext() {} /** COPY a ctx (I'm deliberately not using copy constructor) to avoid * confusion with creating node with parent. Does not copy children @@ -88,23 +94,22 @@ namespace antlr4 { /// Used by enterOuterAlt to toss out a RuleContext previously added as /// we entered a rule. If we have # label, we will need to remove /// generic ruleContext object. - virtual void removeLastChild(); + void removeLastChild(); - virtual tree::TerminalNode* getToken(size_t ttype, std::size_t i); + tree::TerminalNode* getToken(size_t ttype, std::size_t i) const; - virtual std::vector getTokens(size_t ttype); + std::vector getTokens(size_t ttype) const; template - T* getRuleContext(size_t i) { - if (children.empty()) { - return nullptr; - } - + T* getRuleContext(size_t i) const { + static_assert(std::is_base_of_v, "T must be derived from RuleContext"); size_t j = 0; // what element have we found with ctxType? - for (auto &child : children) { - if (antlrcpp::is(child)) { - if (j++ == i) { - return dynamic_cast(child); + for (auto *child : children) { + if (RuleContext::is(child)) { + if (auto *typedChild = dynamic_cast(child); typedChild != nullptr) { + if (j++ == i) { + return typedChild; + } } } } @@ -112,32 +117,34 @@ namespace antlr4 { } template - std::vector getRuleContexts() { - std::vector contexts; - for (auto child : children) { - if (antlrcpp::is(child)) { - contexts.push_back(dynamic_cast(child)); + std::vector getRuleContexts() const { + static_assert(std::is_base_of_v, "T must be derived from RuleContext"); + std::vector contexts; + for (auto *child : children) { + if (RuleContext::is(child)) { + if (auto *typedChild = dynamic_cast(child); typedChild != nullptr) { + contexts.push_back(typedChild); + } } } - return contexts; } - virtual misc::Interval getSourceInterval() override; + misc::Interval getSourceInterval() override; /** * Get the initial token in this context. * Note that the range from start to stop is inclusive, so for rules that do not consume anything * (for example, zero length or error productions) this token may exceed stop. */ - virtual Token *getStart(); + Token* getStart() const; /** * Get the final token in this context. * Note that the range from start to stop is inclusive, so for rules that do not consume anything * (for example, zero length or error productions) this token may precede start. */ - virtual Token *getStop(); + Token* getStop() const; /// /// Used for rule context info debugging during parse-time, not so much for ATN debugging diff --git a/runtime/Cpp/runtime/src/ProxyErrorListener.cpp b/runtime/Cpp/runtime/src/ProxyErrorListener.cpp old mode 100755 new mode 100644 index 4a961d7f8a..6985f53415 --- a/runtime/Cpp/runtime/src/ProxyErrorListener.cpp +++ b/runtime/Cpp/runtime/src/ProxyErrorListener.cpp @@ -3,6 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include "Token.h" +#include "ANTLRErrorListener.h" #include "ProxyErrorListener.h" using namespace antlr4; @@ -26,28 +31,28 @@ void ProxyErrorListener::removeErrorListeners() { void ProxyErrorListener::syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr e) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e); } } void ProxyErrorListener::reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs); } } void ProxyErrorListener::reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs); } } void ProxyErrorListener::reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, size_t prediction, atn::ATNConfigSet *configs) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs); } } diff --git a/runtime/Cpp/runtime/src/ProxyErrorListener.h b/runtime/Cpp/runtime/src/ProxyErrorListener.h old mode 100755 new mode 100644 index 6720f9714a..b8951cb599 --- a/runtime/Cpp/runtime/src/ProxyErrorListener.h +++ b/runtime/Cpp/runtime/src/ProxyErrorListener.h @@ -5,7 +5,12 @@ #pragma once +#include +#include +#include #include "ANTLRErrorListener.h" +#include "Token.h" +#include "antlr4-common.h" #include "Exceptions.h" namespace antlr4 { @@ -25,13 +30,13 @@ namespace antlr4 { void syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr e) override; - virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, + void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) override; - virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, + void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) override; - virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, + void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, size_t prediction, atn::ATNConfigSet *configs) override; }; diff --git a/runtime/Cpp/runtime/src/RecognitionException.cpp b/runtime/Cpp/runtime/src/RecognitionException.cpp old mode 100755 new mode 100644 index 29c950819e..aae115d3d1 --- a/runtime/Cpp/runtime/src/RecognitionException.cpp +++ b/runtime/Cpp/runtime/src/RecognitionException.cpp @@ -3,9 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/ATN.h" +#include "Token.h" +#include "antlr4-common.h" #include "Recognizer.h" -#include "support/StringUtils.h" #include "ParserRuleContext.h" #include "misc/IntervalSet.h" diff --git a/runtime/Cpp/runtime/src/RecognitionException.h b/runtime/Cpp/runtime/src/RecognitionException.h old mode 100755 new mode 100644 index aa204f71e9..34c82fd9af --- a/runtime/Cpp/runtime/src/RecognitionException.h +++ b/runtime/Cpp/runtime/src/RecognitionException.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "misc/IntervalSet.h" +#include "Token.h" #include "Exceptions.h" namespace antlr4 { @@ -34,7 +39,7 @@ namespace antlr4 { RecognitionException(const std::string &message, Recognizer *recognizer, IntStream *input, ParserRuleContext *ctx, Token *offendingToken = nullptr); RecognitionException(RecognitionException const&) = default; - ~RecognitionException(); + ~RecognitionException() override; RecognitionException& operator=(RecognitionException const&) = default; /// Get the ATN state number the parser was in at the time the error diff --git a/runtime/Cpp/runtime/src/Recognizer.cpp b/runtime/Cpp/runtime/src/Recognizer.cpp old mode 100755 new mode 100644 index 257619ba17..a1a328c174 --- a/runtime/Cpp/runtime/src/Recognizer.cpp +++ b/runtime/Cpp/runtime/src/Recognizer.cpp @@ -3,14 +3,21 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "ConsoleErrorListener.h" +#include "ANTLRErrorListener.h" +#include "atn/ATNState.h" #include "RecognitionException.h" #include "support/CPPUtils.h" -#include "support/StringUtils.h" #include "Token.h" #include "atn/ATN.h" #include "atn/ATNSimulator.h" #include "support/CPPUtils.h" +#include "support/StringUtils.h" #include "Vocabulary.h" @@ -18,8 +25,9 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; -std::map> Recognizer::_tokenTypeMapCache; +std::map> Recognizer::_tokenTypeMapCache; std::map, std::map> Recognizer::_ruleIndexMapCache; Recognizer::Recognizer() { @@ -30,31 +38,26 @@ Recognizer::Recognizer() { Recognizer::~Recognizer() { } -dfa::Vocabulary const& Recognizer::getVocabulary() const { - static dfa::Vocabulary vocabulary = dfa::Vocabulary::fromTokenNames(getTokenNames()); - return vocabulary; -} - -std::map Recognizer::getTokenTypeMap() { +std::map Recognizer::getTokenTypeMap() { const dfa::Vocabulary& vocabulary = getVocabulary(); - std::lock_guard lck(_mutex); - std::map result; + UniqueLock lck(_mutex); + std::map result; auto iterator = _tokenTypeMapCache.find(&vocabulary); if (iterator != _tokenTypeMapCache.end()) { result = iterator->second; } else { for (size_t i = 0; i <= getATN().maxTokenType; ++i) { - std::string literalName = vocabulary.getLiteralName(i); + std::string_view literalName = vocabulary.getLiteralName(i); if (!literalName.empty()) { result[literalName] = i; } - std::string symbolicName = vocabulary.getSymbolicName(i); + std::string_view symbolicName = vocabulary.getSymbolicName(i); if (!symbolicName.empty()) { result[symbolicName] = i; } - } + } result["EOF"] = EOF; _tokenTypeMapCache[&vocabulary] = result; } @@ -68,7 +71,7 @@ std::map Recognizer::getRuleIndexMap() { throw "The current recognizer does not provide a list of rule names."; } - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); std::map result; auto iterator = _ruleIndexMapCache.find(ruleNames); if (iterator != _ruleIndexMapCache.end()) { @@ -80,8 +83,8 @@ std::map Recognizer::getRuleIndexMap() { return result; } -size_t Recognizer::getTokenType(const std::string &tokenName) { - const std::map &map = getTokenTypeMap(); +size_t Recognizer::getTokenType(std::string_view tokenName) { + const std::map &map = getTokenTypeMap(); auto iterator = map.find(tokenName); if (iterator == map.end()) return Token::INVALID_TYPE; @@ -118,11 +121,13 @@ std::string Recognizer::getTokenErrorDisplay(Token *t) { } } - antlrcpp::replaceAll(s, "\n", "\\n"); - antlrcpp::replaceAll(s, "\r","\\r"); - antlrcpp::replaceAll(s, "\t", "\\t"); - - return "'" + s + "'"; + std::string result; + result.reserve(s.size() + 2); + result.push_back('\''); + antlrcpp::escapeWhitespace(result, s); + result.push_back('\''); + result.shrink_to_fit(); + return result; } void Recognizer::addErrorListener(ANTLRErrorListener *listener) { @@ -152,14 +157,6 @@ bool Recognizer::precpred(RuleContext * /*localctx*/, int /*precedence*/) { void Recognizer::action(RuleContext * /*localctx*/, size_t /*ruleIndex*/, size_t /*actionIndex*/) { } -size_t Recognizer::getState() const { - return _stateNumber; -} - -void Recognizer::setState(size_t atnState) { - _stateNumber = atnState; -} - void Recognizer::InitializeInstanceFields() { _stateNumber = ATNState::INVALID_STATE_NUMBER; _interpreter = nullptr; diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h old mode 100755 new mode 100644 index 1b16f5e913..aa9989d124 --- a/runtime/Cpp/runtime/src/Recognizer.h +++ b/runtime/Cpp/runtime/src/Recognizer.h @@ -5,13 +5,25 @@ #pragma once +#include +#include +#include +#include +#include +#include #include "ProxyErrorListener.h" +#include "ANTLRErrorListener.h" +#include "Token.h" +#include "antlr4-common.h" +#include "support/Casts.h" +#include "atn/SerializedATNView.h" +#include "internal/Synchronization.h" namespace antlr4 { class ANTLR4CPP_PUBLIC Recognizer { public: - static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013. + static constexpr size_t EOF = std::numeric_limits::max(); Recognizer(); Recognizer(Recognizer const&) = delete; @@ -19,13 +31,6 @@ namespace antlr4 { Recognizer& operator=(Recognizer const&) = delete; - /** Used to print out token names like ID during debugging and - * error reporting. The generated parsers implement a method - * that overrides this to point to their String[] tokenNames. - * - * @deprecated Use {@link #getVocabulary()} instead. - */ - virtual std::vector const& getTokenNames() const = 0; virtual std::vector const& getRuleNames() const = 0; /** @@ -34,14 +39,14 @@ namespace antlr4 { * @return A {@link Vocabulary} instance providing information about the * vocabulary used by the grammar. */ - virtual dfa::Vocabulary const& getVocabulary() const; + virtual dfa::Vocabulary const& getVocabulary() const = 0; /// /// Get a map from token names to token types. ///

    /// Used for XPath and tree pattern compilation. ///

    - virtual std::map getTokenTypeMap(); + virtual std::map getTokenTypeMap(); /// /// Get a map from rule names to rule indexes. @@ -50,7 +55,7 @@ namespace antlr4 { /// virtual std::map getRuleIndexMap(); - virtual size_t getTokenType(const std::string &tokenName); + virtual size_t getTokenType(std::string_view tokenName); /// /// If this recognizer was generated, it will have a serialized ATN @@ -59,7 +64,7 @@ namespace antlr4 { /// For interpreters, we don't know their serialized ATN despite having /// created the interpreter from it. /// - virtual const std::vector getSerializedATN() const { + virtual atn::SerializedATNView getSerializedATN() const { throw "there is no serialized ATN"; } @@ -73,7 +78,7 @@ namespace antlr4 { /// @returns The ATN interpreter used by the recognizer for prediction. template T* getInterpreter() const { - return dynamic_cast(_interpreter); + return antlrcpp::downCast(_interpreter); } /** @@ -119,7 +124,7 @@ namespace antlr4 { virtual void action(RuleContext *localctx, size_t ruleIndex, size_t actionIndex); - virtual size_t getState() const ; + size_t getState() const { return _stateNumber; } // Get the ATN used by the recognizer for prediction. virtual const atn::ATN& getATN() const = 0; @@ -132,7 +137,7 @@ namespace antlr4 { /// invoking rules. Combine this and we have complete ATN /// configuration information. ///
    - void setState(size_t atnState); + void setState(size_t atnState) { _stateNumber = atnState; } virtual IntStream* getInputStream() = 0; @@ -147,10 +152,10 @@ namespace antlr4 { atn::ATNSimulator *_interpreter; // Set and deleted in descendants (or the profiler). // Mutex to manage synchronized access for multithreading. - std::mutex _mutex; + internal::Mutex _mutex; private: - static std::map> _tokenTypeMapCache; + static std::map> _tokenTypeMapCache; static std::map, std::map> _ruleIndexMapCache; ProxyErrorListener _proxListener; // Manages a collection of listeners. diff --git a/runtime/Cpp/runtime/src/RuleContext.cpp b/runtime/Cpp/runtime/src/RuleContext.cpp old mode 100755 new mode 100644 index 467e5ec427..f8b6e9e049 --- a/runtime/Cpp/runtime/src/RuleContext.cpp +++ b/runtime/Cpp/runtime/src/RuleContext.cpp @@ -3,7 +3,13 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "tree/Trees.h" +#include "tree/ParseTreeType.h" +#include "antlr4-common.h" #include "misc/Interval.h" #include "Parser.h" #include "atn/ATN.h" @@ -14,12 +20,13 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::tree; -RuleContext::RuleContext() { +RuleContext::RuleContext() : ParseTree(ParseTreeType::RULE) { InitializeInstanceFields(); } -RuleContext::RuleContext(RuleContext *parent_, size_t invokingState_) { +RuleContext::RuleContext(RuleContext *parent_, size_t invokingState_) : ParseTree(ParseTreeType::RULE) { InitializeInstanceFields(); this->parent = parent_; this->invokingState = invokingState_; @@ -71,7 +78,7 @@ size_t RuleContext::getAltNumber() const { void RuleContext::setAltNumber(size_t /*altNumber*/) { } -antlrcpp::Any RuleContext::accept(tree::ParseTreeVisitor *visitor) { +std::any RuleContext::accept(tree::ParseTreeVisitor *visitor) { return visitor->visitChildren(this); } diff --git a/runtime/Cpp/runtime/src/RuleContext.h b/runtime/Cpp/runtime/src/RuleContext.h old mode 100755 new mode 100644 index 9ee0d2defd..6832bb2b34 --- a/runtime/Cpp/runtime/src/RuleContext.h +++ b/runtime/Cpp/runtime/src/RuleContext.h @@ -5,6 +5,12 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "tree/ParseTreeType.h" +#include "misc/Interval.h" #include "tree/ParseTree.h" namespace antlr4 { @@ -61,6 +67,10 @@ namespace antlr4 { */ class ANTLR4CPP_PUBLIC RuleContext : public tree::ParseTree { public: + static bool is(const tree::ParseTree &parseTree) { return parseTree.getTreeType() == tree::ParseTreeType::RULE; } + + static bool is(const tree::ParseTree *parseTree) { return parseTree != nullptr && is(*parseTree); } + /// What state invoked the rule associated with this context? /// The "return address" is the followState of invokingState /// If parent is null, this should be -1 and this context object represents the start rule. @@ -76,9 +86,9 @@ namespace antlr4 { // satisfy the ParseTree / SyntaxTree interface - virtual misc::Interval getSourceInterval() override; + misc::Interval getSourceInterval() override; - virtual std::string getText() override; + std::string getText() override; virtual size_t getRuleIndex() const; @@ -103,14 +113,14 @@ namespace antlr4 { */ virtual void setAltNumber(size_t altNumber); - virtual antlrcpp::Any accept(tree::ParseTreeVisitor *visitor) override; + std::any accept(tree::ParseTreeVisitor *visitor) override; /// /// Print out a whole tree, not just a node, in LISP format /// (root child1 .. childN). Print just a node if this is a leaf. /// We have to know the recognizer so we can get rule names. /// - virtual std::string toStringTree(Parser *recog, bool pretty = false) override; + std::string toStringTree(Parser *recog, bool pretty = false) override; /// /// Print out a whole tree, not just a node, in LISP format @@ -118,8 +128,8 @@ namespace antlr4 { /// virtual std::string toStringTree(std::vector &ruleNames, bool pretty = false); - virtual std::string toStringTree(bool pretty = false) override; - virtual std::string toString() override; + std::string toStringTree(bool pretty = false) override; + std::string toString() override; std::string toString(Recognizer *recog); std::string toString(const std::vector &ruleNames); @@ -128,8 +138,6 @@ namespace antlr4 { virtual std::string toString(const std::vector &ruleNames, RuleContext *stop); - bool operator == (const RuleContext &other) { return this == &other; } // Simple address comparison. - private: void InitializeInstanceFields(); }; diff --git a/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp b/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp old mode 100755 new mode 100644 index 250859fdc0..945326bbc7 --- a/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp +++ b/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/ATN.h" #include "RuleContextWithAltNum.h" diff --git a/runtime/Cpp/runtime/src/RuleContextWithAltNum.h b/runtime/Cpp/runtime/src/RuleContextWithAltNum.h old mode 100755 new mode 100644 index 995d9aa7b1..5a38ef8b8c --- a/runtime/Cpp/runtime/src/RuleContextWithAltNum.h +++ b/runtime/Cpp/runtime/src/RuleContextWithAltNum.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "ParserRuleContext.h" namespace antlr4 { @@ -25,8 +27,8 @@ namespace antlr4 { RuleContextWithAltNum(); RuleContextWithAltNum(ParserRuleContext *parent, int invokingStateNumber); - virtual size_t getAltNumber() const override; - virtual void setAltNumber(size_t altNum) override; + size_t getAltNumber() const override; + void setAltNumber(size_t altNum) override; }; } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/RuntimeMetaData.cpp b/runtime/Cpp/runtime/src/RuntimeMetaData.cpp old mode 100755 new mode 100644 index 3ba7998065..2f26f6b459 --- a/runtime/Cpp/runtime/src/RuntimeMetaData.cpp +++ b/runtime/Cpp/runtime/src/RuntimeMetaData.cpp @@ -3,11 +3,14 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "RuntimeMetaData.h" +#include "Version.h" using namespace antlr4; -const std::string RuntimeMetaData::VERSION = "4.8"; +const std::string RuntimeMetaData::VERSION = ANTLRCPP_VERSION_STRING; std::string RuntimeMetaData::getRuntimeVersion() { return VERSION; diff --git a/runtime/Cpp/runtime/src/RuntimeMetaData.h b/runtime/Cpp/runtime/src/RuntimeMetaData.h old mode 100755 new mode 100644 index f178cfe9e8..33b8c08529 --- a/runtime/Cpp/runtime/src/RuntimeMetaData.h +++ b/runtime/Cpp/runtime/src/RuntimeMetaData.h @@ -5,6 +5,7 @@ #pragma once +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/Token.h b/runtime/Cpp/runtime/src/Token.h old mode 100755 new mode 100644 index a7c1594ffd..eae8eccf92 --- a/runtime/Cpp/runtime/src/Token.h +++ b/runtime/Cpp/runtime/src/Token.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" #include "IntStream.h" namespace antlr4 { @@ -14,24 +18,24 @@ namespace antlr4 { /// we obtained this token. class ANTLR4CPP_PUBLIC Token { public: - static const size_t INVALID_TYPE = 0; + static constexpr size_t INVALID_TYPE = 0; /// During lookahead operations, this "token" signifies we hit rule end ATN state /// and did not follow it despite needing to. - static const size_t EPSILON = static_cast(-2); - static const size_t MIN_USER_TOKEN_TYPE = 1; - static const size_t EOF = IntStream::EOF; + static constexpr size_t EPSILON = std::numeric_limits::max() - 1; + static constexpr size_t MIN_USER_TOKEN_TYPE = 1; + static constexpr size_t EOF = IntStream::EOF; virtual ~Token(); /// All tokens go to the parser (unless skip() is called in that rule) /// on a particular "channel". The parser tunes to a particular channel /// so that whitespace etc... can go to the parser on a "hidden" channel. - static const size_t DEFAULT_CHANNEL = 0; + static constexpr size_t DEFAULT_CHANNEL = 0; /// Anything on different channel than DEFAULT_CHANNEL is not parsed /// by parser. - static const size_t HIDDEN_CHANNEL = 1; + static constexpr size_t HIDDEN_CHANNEL = 1; /** * This is the minimum constant value which can be assigned to a @@ -44,7 +48,7 @@ namespace antlr4 { * * @see Token#getChannel() */ - static const size_t MIN_USER_CHANNEL_VALUE = 2; + static constexpr size_t MIN_USER_CHANNEL_VALUE = 2; /// Get the text of the token. virtual std::string getText() const = 0; diff --git a/runtime/Cpp/runtime/src/TokenFactory.h b/runtime/Cpp/runtime/src/TokenFactory.h old mode 100755 new mode 100644 index e29335f5b7..4a9f0c58c9 --- a/runtime/Cpp/runtime/src/TokenFactory.h +++ b/runtime/Cpp/runtime/src/TokenFactory.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/TokenSource.h b/runtime/Cpp/runtime/src/TokenSource.h old mode 100755 new mode 100644 index a8ed34f85d..472c2767c8 --- a/runtime/Cpp/runtime/src/TokenSource.h +++ b/runtime/Cpp/runtime/src/TokenSource.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "TokenFactory.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/TokenStream.h b/runtime/Cpp/runtime/src/TokenStream.h old mode 100755 new mode 100644 index c7dd0d4033..37b5b49192 --- a/runtime/Cpp/runtime/src/TokenStream.h +++ b/runtime/Cpp/runtime/src/TokenStream.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "misc/Interval.h" +#include "Token.h" #include "IntStream.h" namespace antlr4 { @@ -22,7 +27,7 @@ namespace antlr4 { ///
    /// public: - virtual ~TokenStream(); + ~TokenStream() override; virtual Token* LT(ssize_t k) = 0; diff --git a/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp b/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp old mode 100755 new mode 100644 index df20ea9b9d..eab5e00eb3 --- a/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp +++ b/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp @@ -1,9 +1,14 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "Exceptions.h" +#include "antlr4-common.h" #include "misc/Interval.h" #include "Token.h" #include "TokenStream.h" @@ -93,8 +98,8 @@ TokenStreamRewriter::TokenStreamRewriter(TokenStream *tokens_) : tokens(tokens_) } TokenStreamRewriter::~TokenStreamRewriter() { - for (auto program : _programs) { - for (auto operation : program.second) { + for (const auto &program : _programs) { + for (auto *operation : program.second) { delete operation; } } @@ -313,6 +318,10 @@ std::string TokenStreamRewriter::getText(const std::string &programName, const I std::unordered_map TokenStreamRewriter::reduceToSingleOperationPerIndex( std::vector &rewrites) { + // Reset the instructionIndex + for (size_t i = 0; i < rewrites.size(); ++i) { + rewrites[i]->instructionIndex = i; + } // WALK REPLACES for (size_t i = 0; i < rewrites.size(); ++i) { @@ -323,27 +332,27 @@ std::unordered_map TokenStreamRe // Wipe prior inserts within range std::vector inserts = getKindOfOps(rewrites, i); - for (auto iop : inserts) { + for (auto *iop : inserts) { if (iop->index == rop->index) { // E.g., insert before 2, delete 2..2; update replace // text to include insert before, kill insert - delete rewrites[iop->instructionIndex]; - rewrites[iop->instructionIndex] = nullptr; rop->text = iop->text + (!rop->text.empty() ? rop->text : ""); + rewrites[iop->instructionIndex] = nullptr; + delete iop; } else if (iop->index > rop->index && iop->index <= rop->lastIndex) { // delete insert as it's a no-op. - delete rewrites[iop->instructionIndex]; rewrites[iop->instructionIndex] = nullptr; + delete iop; } } // Drop any prior replaces contained within std::vector prevReplaces = getKindOfOps(rewrites, i); - for (auto prevRop : prevReplaces) { + for (auto *prevRop : prevReplaces) { if (prevRop->index >= rop->index && prevRop->lastIndex <= rop->lastIndex) { // delete replace as it's a no-op. - delete rewrites[prevRop->instructionIndex]; rewrites[prevRop->instructionIndex] = nullptr; + delete prevRop; continue; } // throw exception unless disjoint or identical @@ -351,11 +360,10 @@ std::unordered_map TokenStreamRe // Delete special case of replace (text==null): // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) if (prevRop->text.empty() && rop->text.empty() && !disjoint) { - delete rewrites[prevRop->instructionIndex]; - rewrites[prevRop->instructionIndex] = nullptr; // kill first delete rop->index = std::min(prevRop->index, rop->index); rop->lastIndex = std::max(prevRop->lastIndex, rop->lastIndex); - std::cout << "new rop " << rop << std::endl; + rewrites[prevRop->instructionIndex] = nullptr; // kill first delete + delete prevRop; } else if (!disjoint) { throw IllegalArgumentException("replace op boundaries of " + rop->toString() + @@ -373,19 +381,19 @@ std::unordered_map TokenStreamRe // combine current insert with prior if any at same index std::vector prevInserts = getKindOfOps(rewrites, i); - for (auto prevIop : prevInserts) { + for (auto *prevIop : prevInserts) { if (prevIop->index == iop->index) { // combine objects // convert to strings...we're in process of toString'ing // whole token buffer so no lazy eval issue with any templates iop->text = catOpText(&iop->text, &prevIop->text); // delete redundant prior insert - delete rewrites[prevIop->instructionIndex]; rewrites[prevIop->instructionIndex] = nullptr; + delete prevIop; } } // look for replaces where iop.index is in range; error std::vector prevReplaces = getKindOfOps(rewrites, i); - for (auto rop : prevReplaces) { + for (auto *rop : prevReplaces) { if (iop->index == rop->index) { rop->text = catOpText(&iop->text, &rop->text); delete rewrites[i]; diff --git a/runtime/Cpp/runtime/src/TokenStreamRewriter.h b/runtime/Cpp/runtime/src/TokenStreamRewriter.h old mode 100755 new mode 100644 index 102a9e946e..c22919746d --- a/runtime/Cpp/runtime/src/TokenStreamRewriter.h +++ b/runtime/Cpp/runtime/src/TokenStreamRewriter.h @@ -5,6 +5,15 @@ #pragma once +#include +#include +#include +#include +#include +#include "Token.h" +#include "misc/Interval.h" +#include "antlr4-common.h" + namespace antlr4 { /** @@ -86,8 +95,8 @@ namespace antlr4 { class ANTLR4CPP_PUBLIC TokenStreamRewriter { public: static const std::string DEFAULT_PROGRAM_NAME; - static const size_t PROGRAM_INIT_SIZE = 100; - static const size_t MIN_TOKEN_INDEX = 0; + static constexpr size_t PROGRAM_INIT_SIZE = 100; + static constexpr size_t MIN_TOKEN_INDEX = 0; TokenStreamRewriter(TokenStream *tokens); virtual ~TokenStreamRewriter(); @@ -184,7 +193,7 @@ namespace antlr4 { public: InsertBeforeOp(TokenStreamRewriter *outerInstance, size_t index, const std::string& text); - virtual size_t execute(std::string *buf) override; + size_t execute(std::string *buf) override; }; class ReplaceOp : public RewriteOperation { @@ -195,8 +204,8 @@ namespace antlr4 { size_t lastIndex; ReplaceOp(TokenStreamRewriter *outerInstance, size_t from, size_t to, const std::string& text); - virtual size_t execute(std::string *buf) override; - virtual std::string toString() override; + size_t execute(std::string *buf) override; + std::string toString() override; private: void InitializeInstanceFields(); diff --git a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp old mode 100755 new mode 100644 index 1f18d38431..8d8b9a04fb --- a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp @@ -3,9 +3,14 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "misc/Interval.h" +#include "antlr4-common.h" #include "Exceptions.h" -#include "support/StringUtils.h" +#include "support/Utf8.h" #include "UnbufferedCharStream.h" @@ -13,9 +18,8 @@ using namespace antlrcpp; using namespace antlr4; using namespace antlr4::misc; -UnbufferedCharStream::UnbufferedCharStream(std::wistream &input) : _input(input) { - InitializeInstanceFields(); - +UnbufferedCharStream::UnbufferedCharStream(std::wistream &input) + : _p(0), _numMarkers(0), _lastChar(0), _lastCharBufferStart(0), _currentCharIndex(0), _input(input) { // The vector's size is what used to be n in Java code. fill(1); // prime } @@ -74,9 +78,7 @@ size_t UnbufferedCharStream::fill(size_t n) { } char32_t UnbufferedCharStream::nextChar() { - wchar_t result = 0; - _input >> result; - return result; + return _input.get(); } void UnbufferedCharStream::add(char32_t c) { @@ -101,7 +103,7 @@ size_t UnbufferedCharStream::LA(ssize_t i) { return EOF; } - if (_data[static_cast(index)] == 0xFFFF) { + if (_data[static_cast(index)] == std::char_traits::eof()) { return EOF; } @@ -178,7 +180,7 @@ std::string UnbufferedCharStream::getSourceName() const { } std::string UnbufferedCharStream::getText(const misc::Interval &interval) { - if (interval.a < 0 || interval.b >= interval.a - 1) { + if (interval.a < 0 || interval.b < interval.a - 1) { throw IllegalArgumentException("invalid interval"); } @@ -195,17 +197,17 @@ std::string UnbufferedCharStream::getText(const misc::Interval &interval) { } // convert from absolute to local index size_t i = interval.a - bufferStartIndex; - return utf32_to_utf8(_data.substr(i, interval.length())); + auto maybeUtf8 = Utf8::strictEncode(std::u32string_view(_data).substr(i, interval.length())); + if (!maybeUtf8.has_value()) { + throw IllegalArgumentException("Unbuffered stream contains invalid Unicode code points"); + } + return std::move(maybeUtf8).value(); } -size_t UnbufferedCharStream::getBufferStartIndex() const { - return _currentCharIndex - _p; +std::string UnbufferedCharStream::toString() const { + throw UnsupportedOperationException("Unbuffered stream cannot be materialized to a string"); } -void UnbufferedCharStream::InitializeInstanceFields() { - _p = 0; - _numMarkers = 0; - _lastChar = 0; - _lastCharBufferStart = 0; - _currentCharIndex = 0; +size_t UnbufferedCharStream::getBufferStartIndex() const { + return _currentCharIndex - _p; } diff --git a/runtime/Cpp/runtime/src/UnbufferedCharStream.h b/runtime/Cpp/runtime/src/UnbufferedCharStream.h old mode 100755 new mode 100644 index 98cdcc6142..6720355eb8 --- a/runtime/Cpp/runtime/src/UnbufferedCharStream.h +++ b/runtime/Cpp/runtime/src/UnbufferedCharStream.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "misc/Interval.h" #include "CharStream.h" namespace antlr4 { @@ -18,10 +22,10 @@ namespace antlr4 { /// The name or source of this char stream. std::string name; - UnbufferedCharStream(std::wistream &input); + explicit UnbufferedCharStream(std::wistream &input); - virtual void consume() override; - virtual size_t LA(ssize_t i) override; + void consume() override; + size_t LA(ssize_t i) override; /// /// Return a marker that we can release later. @@ -30,35 +34,32 @@ namespace antlr4 { /// protection against misuse where {@code seek()} is called on a mark or /// {@code release()} is called in the wrong order. /// - virtual ssize_t mark() override; + ssize_t mark() override; /// /// Decrement number of markers, resetting buffer if we hit 0. /// - virtual void release(ssize_t marker) override; - virtual size_t index() override; + void release(ssize_t marker) override; + size_t index() override; /// /// Seek to absolute character index, which might not be in the current /// sliding window. Move {@code p} to {@code index-bufferStartIndex}. /// - virtual void seek(size_t index) override; - virtual size_t size() override; - virtual std::string getSourceName() const override; - virtual std::string getText(const misc::Interval &interval) override; + void seek(size_t index) override; + size_t size() override; + std::string getSourceName() const override; + std::string getText(const misc::Interval &interval) override; + + std::string toString() const override; protected: /// A moving window buffer of the data being scanned. While there's a marker, /// we keep adding to buffer. Otherwise, resets so /// we start filling at index 0 again. // UTF-32 encoded. -#if defined(_MSC_VER) && _MSC_VER == 1900 - i32string _data; // Custom type for VS 2015. - typedef __int32 storage_type; -#else std::u32string _data; typedef char32_t storage_type; -#endif /// /// 0..n-1 index into of next character. @@ -115,9 +116,6 @@ namespace antlr4 { virtual char32_t nextChar(); virtual void add(char32_t c); size_t getBufferStartIndex() const; - - private: - void InitializeInstanceFields(); }; } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp old mode 100755 new mode 100644 index 98e952a0a4..59cc171461 --- a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp @@ -3,7 +3,13 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "Token.h" +#include "antlr4-common.h" #include "Exceptions.h" #include "assert.h" #include "TokenSource.h" diff --git a/runtime/Cpp/runtime/src/UnbufferedTokenStream.h b/runtime/Cpp/runtime/src/UnbufferedTokenStream.h old mode 100755 new mode 100644 index 244cc8d19d..9c9d174c01 --- a/runtime/Cpp/runtime/src/UnbufferedTokenStream.h +++ b/runtime/Cpp/runtime/src/UnbufferedTokenStream.h @@ -5,6 +5,13 @@ #pragma once +#include +#include +#include +#include +#include "antlr4-common.h" +#include "misc/Interval.h" +#include "Token.h" #include "TokenStream.h" namespace antlr4 { @@ -14,22 +21,22 @@ namespace antlr4 { UnbufferedTokenStream(TokenSource *tokenSource); UnbufferedTokenStream(TokenSource *tokenSource, int bufferSize); UnbufferedTokenStream(const UnbufferedTokenStream& other) = delete; - virtual ~UnbufferedTokenStream(); + ~UnbufferedTokenStream() override; UnbufferedTokenStream& operator = (const UnbufferedTokenStream& other) = delete; - virtual Token* get(size_t i) const override; - virtual Token* LT(ssize_t i) override; - virtual size_t LA(ssize_t i) override; + Token* get(size_t i) const override; + Token* LT(ssize_t i) override; + size_t LA(ssize_t i) override; - virtual TokenSource* getTokenSource() const override; + TokenSource* getTokenSource() const override; - virtual std::string getText(const misc::Interval &interval) override; - virtual std::string getText() override; - virtual std::string getText(RuleContext *ctx) override; - virtual std::string getText(Token *start, Token *stop) override; + std::string getText(const misc::Interval &interval) override; + std::string getText() override; + std::string getText(RuleContext *ctx) override; + std::string getText(Token *start, Token *stop) override; - virtual void consume() override; + void consume() override; /// /// Return a marker that we can release later. @@ -38,12 +45,12 @@ namespace antlr4 { /// protection against misuse where {@code seek()} is called on a mark or /// {@code release()} is called in the wrong order. /// - virtual ssize_t mark() override; - virtual void release(ssize_t marker) override; - virtual size_t index() override; - virtual void seek(size_t index) override; - virtual size_t size() override; - virtual std::string getSourceName() const override; + ssize_t mark() override; + void release(ssize_t marker) override; + size_t index() override; + void seek(size_t index) override; + size_t size() override; + std::string getSourceName() const override; protected: /// Make sure we have 'need' elements from current position p. Last valid diff --git a/runtime/Cpp/runtime/src/Version.h b/runtime/Cpp/runtime/src/Version.h new file mode 100644 index 0000000000..179111909b --- /dev/null +++ b/runtime/Cpp/runtime/src/Version.h @@ -0,0 +1,42 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#define ANTLRCPP_VERSION_MAJOR 4 +#define ANTLRCPP_VERSION_MINOR 13 +#define ANTLRCPP_VERSION_PATCH 2 + +#define ANTLRCPP_MAKE_VERSION(major, minor, patch) ((major) * 100000 + (minor) * 1000 + (patch)) + +#define ANTLRCPP_VERSION \ + ANTLRCPP_MAKE_VERSION(ANTLR4CPP_VERSION_MAJOR, ANTLR4CPP_VERSION_MINOR, ANTLR4CPP_VERSION_PATCH) + +#define ANTLRCPP_VERSION_STRING \ + ANTLR4CPP_STRINGIFY(ANTLR4CPP_VERSION_MAJOR) "." \ + ANTLR4CPP_STRINGIFY(ANTLR4CPP_VERSION_MINOR) "." \ + ANTLR4CPP_STRINGIFY(ANTLR4CPP_VERSION_PATCH) diff --git a/runtime/Cpp/runtime/src/Vocabulary.cpp b/runtime/Cpp/runtime/src/Vocabulary.cpp old mode 100755 new mode 100644 index dcfa45e4be..26e854a943 --- a/runtime/Cpp/runtime/src/Vocabulary.cpp +++ b/runtime/Cpp/runtime/src/Vocabulary.cpp @@ -3,6 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "Token.h" #include "Vocabulary.h" @@ -11,58 +16,18 @@ using namespace antlr4::dfa; const Vocabulary Vocabulary::EMPTY_VOCABULARY; -Vocabulary::Vocabulary(const std::vector &literalNames, const std::vector &symbolicNames) -: Vocabulary(literalNames, symbolicNames, {}) { +Vocabulary::Vocabulary(std::vector literalNames, std::vector symbolicNames) +: Vocabulary(std::move(literalNames), std::move(symbolicNames), {}) { } -Vocabulary::Vocabulary(const std::vector &literalNames, - const std::vector &symbolicNames, const std::vector &displayNames) - : _literalNames(literalNames), _symbolicNames(symbolicNames), _displayNames(displayNames), +Vocabulary::Vocabulary(std::vector literalNames, + std::vector symbolicNames, std::vector displayNames) + : _literalNames(std::move(literalNames)), _symbolicNames(std::move(symbolicNames)), _displayNames(std::move(displayNames)), _maxTokenType(std::max(_displayNames.size(), std::max(_literalNames.size(), _symbolicNames.size())) - 1) { // See note here on -1 part: https://github.com/antlr/antlr4/pull/1146 } -Vocabulary::~Vocabulary() { -} - -Vocabulary Vocabulary::fromTokenNames(const std::vector &tokenNames) { - if (tokenNames.empty()) { - return EMPTY_VOCABULARY; - } - - std::vector literalNames = tokenNames; - std::vector symbolicNames = tokenNames; - std::locale locale; - for (size_t i = 0; i < tokenNames.size(); i++) { - std::string tokenName = tokenNames[i]; - if (tokenName == "") { - continue; - } - - if (!tokenName.empty()) { - char firstChar = tokenName[0]; - if (firstChar == '\'') { - symbolicNames[i] = ""; - continue; - } else if (std::isupper(firstChar, locale)) { - literalNames[i] = ""; - continue; - } - } - - // wasn't a literal or symbolic name - literalNames[i] = ""; - symbolicNames[i] = ""; - } - - return Vocabulary(literalNames, symbolicNames, tokenNames); -} - -size_t Vocabulary::getMaxTokenType() const { - return _maxTokenType; -} - -std::string Vocabulary::getLiteralName(size_t tokenType) const { +std::string_view Vocabulary::getLiteralName(size_t tokenType) const { if (tokenType < _literalNames.size()) { return _literalNames[tokenType]; } @@ -70,7 +35,7 @@ std::string Vocabulary::getLiteralName(size_t tokenType) const { return ""; } -std::string Vocabulary::getSymbolicName(size_t tokenType) const { +std::string_view Vocabulary::getSymbolicName(size_t tokenType) const { if (tokenType == Token::EOF) { return "EOF"; } @@ -84,20 +49,20 @@ std::string Vocabulary::getSymbolicName(size_t tokenType) const { std::string Vocabulary::getDisplayName(size_t tokenType) const { if (tokenType < _displayNames.size()) { - std::string displayName = _displayNames[tokenType]; + std::string_view displayName = _displayNames[tokenType]; if (!displayName.empty()) { - return displayName; + return std::string(displayName); } } - std::string literalName = getLiteralName(tokenType); + std::string_view literalName = getLiteralName(tokenType); if (!literalName.empty()) { - return literalName; + return std::string(literalName); } - std::string symbolicName = getSymbolicName(tokenType); + std::string_view symbolicName = getSymbolicName(tokenType); if (!symbolicName.empty()) { - return symbolicName; + return std::string(symbolicName); } return std::to_string(tokenType); diff --git a/runtime/Cpp/runtime/src/Vocabulary.h b/runtime/Cpp/runtime/src/Vocabulary.h old mode 100755 new mode 100644 index 7dbf85cd3a..9f2e9b99b2 --- a/runtime/Cpp/runtime/src/Vocabulary.h +++ b/runtime/Cpp/runtime/src/Vocabulary.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include #include "antlr4-common.h" namespace antlr4 { @@ -12,21 +16,20 @@ namespace dfa { /// This class provides a default implementation of the /// interface. - class ANTLR4CPP_PUBLIC Vocabulary { + class ANTLR4CPP_PUBLIC Vocabulary final { public: - Vocabulary(Vocabulary const&) = default; - virtual ~Vocabulary(); - /// Gets an empty instance. /// /// /// No literal or symbol names are assigned to token types, so /// returns the numeric value for all tokens /// except . - static const Vocabulary EMPTY_VOCABULARY; + [[deprecated("Use the default constructor of Vocabulary instead.")]] static const Vocabulary EMPTY_VOCABULARY; Vocabulary() {} + Vocabulary(const Vocabulary&) = default; + /// /// Constructs a new instance of from the specified /// literal and symbolic token names. @@ -38,7 +41,7 @@ namespace dfa { /// /// /// - Vocabulary(const std::vector &literalNames, const std::vector &symbolicNames); + Vocabulary(std::vector literalNames, std::vector symbolicNames); /// /// Constructs a new instance of from the specified @@ -56,29 +59,14 @@ namespace dfa { /// /// /// - Vocabulary(const std::vector &literalNames, const std::vector &symbolicNames, - const std::vector &displayNames); - - /// - /// Returns a instance from the specified set of token - /// names. This method acts as a compatibility layer for the single - /// {@code tokenNames} array generated by previous releases of ANTLR. - /// - /// The resulting vocabulary instance returns {@code null} for - /// and , and the - /// value from {@code tokenNames} for the display names. - /// - /// The token names, or {@code null} if no token names are - /// available. - /// A instance which uses {@code tokenNames} for - /// the display names of tokens. - static Vocabulary fromTokenNames(const std::vector &tokenNames); + Vocabulary(std::vector literalNames, std::vector symbolicNames, + std::vector displayNames); /// /// Returns the highest token type value. It can be used to iterate from /// zero to that number, inclusively, thus querying all stored entries. /// the highest token type value - virtual size_t getMaxTokenType() const; + constexpr size_t getMaxTokenType() const { return _maxTokenType; } /// /// Gets the string literal associated with a token type. The string returned @@ -115,7 +103,7 @@ namespace dfa { /// /// The string literal associated with the specified token type, or /// {@code null} if no string literal is associated with the type. - virtual std::string getLiteralName(size_t tokenType) const; + std::string_view getLiteralName(size_t tokenType) const; /// /// Gets the symbolic name associated with a token type. The string returned @@ -159,7 +147,7 @@ namespace dfa { /// /// The symbolic name associated with the specified token type, or /// {@code null} if no symbolic name is associated with the type. - virtual std::string getSymbolicName(size_t tokenType) const; + std::string_view getSymbolicName(size_t tokenType) const; /// /// Gets the display name of a token type. @@ -180,7 +168,7 @@ namespace dfa { /// /// The display name of the token type, for use in error reporting or /// other user-visible messages which reference specific token types. - virtual std::string getDisplayName(size_t tokenType) const; + std::string getDisplayName(size_t tokenType) const; private: std::vector const _literalNames; diff --git a/runtime/Cpp/runtime/src/WritableToken.h b/runtime/Cpp/runtime/src/WritableToken.h old mode 100755 new mode 100644 index 56bc9d0796..07a5181a46 --- a/runtime/Cpp/runtime/src/WritableToken.h +++ b/runtime/Cpp/runtime/src/WritableToken.h @@ -5,13 +5,16 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "Token.h" namespace antlr4 { class ANTLR4CPP_PUBLIC WritableToken : public Token { public: - virtual ~WritableToken(); + ~WritableToken() override; virtual void setText(const std::string &text) = 0; virtual void setType(size_t ttype) = 0; virtual void setLine(size_t line) = 0; diff --git a/runtime/Cpp/runtime/src/antlr4-common.h b/runtime/Cpp/runtime/src/antlr4-common.h index 25d890b3f7..d7f9a65fa1 100644 --- a/runtime/Cpp/runtime/src/antlr4-common.h +++ b/runtime/Cpp/runtime/src/antlr4-common.h @@ -6,36 +6,32 @@ #pragma once #include -#include +#include #include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include #include #include #include -#include -#include -#include #include #include #include +#include #include #include #include #include #include #include -#include -#include -#include -#include -#include // Defines for the Guid class and other platform dependent stuff. #ifdef _WIN32 @@ -49,25 +45,12 @@ #endif #endif - #define GUID_WINDOWS - #ifdef _WIN64 typedef __int64 ssize_t; #else typedef __int32 ssize_t; #endif - #if _MSC_VER >= 1900 && _MSC_VER < 2000 - // VS 2015 has a known bug when using std::codecvt_utf8 - // so we have to temporarily use __int32 instead. - // https://connect.microsoft.com/VisualStudio/feedback/details/1403302/unresolved-external-when-using-codecvt-utf8 - typedef std::basic_string<__int32> i32string; - - typedef i32string UTF32String; - #else - typedef std::u32string UTF32String; - #endif - #ifdef ANTLR4CPP_EXPORTS #define ANTLR4CPP_PUBLIC __declspec(dllexport) #else @@ -78,26 +61,13 @@ #endif #endif - #if defined(_MSC_VER) && !defined(__clang__) - // clang-cl should escape this to prevent [ignored-attributes]. - namespace std { - class ANTLR4CPP_PUBLIC exception; // Prevents warning C4275 from MSVC. - } // namespace std - #endif - #elif defined(__APPLE__) - typedef std::u32string UTF32String; - - #define GUID_CFUUID #if __GNUC__ >= 4 #define ANTLR4CPP_PUBLIC __attribute__ ((visibility ("default"))) #else #define ANTLR4CPP_PUBLIC #endif #else - typedef std::u32string UTF32String; - - #define GUID_LIBUUID #if __GNUC__ >= 6 #define ANTLR4CPP_PUBLIC __attribute__ ((visibility ("default"))) #else @@ -105,28 +75,22 @@ #endif #endif -#include "support/guid.h" -#include "support/Declarations.h" +#ifdef __has_builtin +#define ANTLR4CPP_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ANTLR4CPP_HAVE_BUILTIN(x) 0 +#endif -#if !defined(HAS_NOEXCEPT) - #if defined(__clang__) - #if __has_feature(cxx_noexcept) - #define HAS_NOEXCEPT - #endif - #else - #if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 || \ - defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 - #define HAS_NOEXCEPT - #endif - #endif +#define ANTLR4CPP_INTERNAL_STRINGIFY(x) #x +#define ANTLR4CPP_STRINGIFY(x) ANTLR4CPP_INTERNAL_STRINGIFY(x) - #ifdef HAS_NOEXCEPT - #define NOEXCEPT noexcept - #else - #define NOEXCEPT - #endif +// We use everything from the C++ standard library by default. +#ifndef ANTLR4CPP_USING_ABSEIL +#define ANTLR4CPP_USING_ABSEIL 0 #endif +#include "support/Declarations.h" + // We have to undefine this symbol as ANTLR will use this name for own members and even // generated functions. Because EOF is a global macro we cannot use e.g. a namespace scope to disambiguate. #ifdef EOF diff --git a/runtime/Cpp/runtime/src/antlr4-runtime.h b/runtime/Cpp/runtime/src/antlr4-runtime.h index d58e4591e9..6774982e43 100644 --- a/runtime/Cpp/runtime/src/antlr4-runtime.h +++ b/runtime/Cpp/runtime/src/antlr4-runtime.h @@ -7,6 +7,8 @@ // This is the umbrella header for all ANTLR4 C++ runtime headers. +// IWYU pragma: begin_exports + #include "antlr4-common.h" #include "ANTLRErrorListener.h" @@ -49,6 +51,7 @@ #include "TokenStreamRewriter.h" #include "UnbufferedCharStream.h" #include "UnbufferedTokenStream.h" +#include "Version.h" #include "Vocabulary.h" #include "Vocabulary.h" #include "WritableToken.h" @@ -57,11 +60,9 @@ #include "atn/ATNConfigSet.h" #include "atn/ATNDeserializationOptions.h" #include "atn/ATNDeserializer.h" -#include "atn/ATNSerializer.h" #include "atn/ATNSimulator.h" #include "atn/ATNState.h" #include "atn/ATNType.h" -#include "atn/AbstractPredicateTransition.h" #include "atn/ActionTransition.h" #include "atn/AmbiguityInfo.h" #include "atn/ArrayPredictionContext.h" @@ -74,7 +75,6 @@ #include "atn/DecisionEventInfo.h" #include "atn/DecisionInfo.h" #include "atn/DecisionState.h" -#include "atn/EmptyPredictionContext.h" #include "atn/EpsilonTransition.h" #include "atn/ErrorInfo.h" #include "atn/LL1Analyzer.h" @@ -98,12 +98,16 @@ #include "atn/OrderedATNConfigSet.h" #include "atn/ParseInfo.h" #include "atn/ParserATNSimulator.h" +#include "atn/ParserATNSimulatorOptions.h" #include "atn/PlusBlockStartState.h" #include "atn/PlusLoopbackState.h" #include "atn/PrecedencePredicateTransition.h" #include "atn/PredicateEvalInfo.h" #include "atn/PredicateTransition.h" #include "atn/PredictionContext.h" +#include "atn/PredictionContextCache.h" +#include "atn/PredictionContextMergeCache.h" +#include "atn/PredictionContextMergeCacheOptions.h" #include "atn/PredictionMode.h" #include "atn/ProfilingATNSimulator.h" #include "atn/RangeTransition.h" @@ -111,6 +115,7 @@ #include "atn/RuleStopState.h" #include "atn/RuleTransition.h" #include "atn/SemanticContext.h" +#include "atn/SerializedATNView.h" #include "atn/SetTransition.h" #include "atn/SingletonPredictionContext.h" #include "atn/StarBlockStartState.h" @@ -131,9 +136,8 @@ #include "support/Any.h" #include "support/Arrays.h" #include "support/BitSet.h" +#include "support/Casts.h" #include "support/CPPUtils.h" -#include "support/StringUtils.h" -#include "support/guid.h" #include "tree/AbstractParseTreeVisitor.h" #include "tree/ErrorNode.h" #include "tree/ErrorNodeImpl.h" @@ -163,5 +167,6 @@ #include "tree/xpath/XPathTokenElement.h" #include "tree/xpath/XPathWildcardAnywhereElement.h" #include "tree/xpath/XPathWildcardElement.h" +#include "internal/Synchronization.h" - +// IWYU pragma: end_exports diff --git a/runtime/Cpp/runtime/src/atn/ATN.cpp b/runtime/Cpp/runtime/src/atn/ATN.cpp old mode 100755 new mode 100644 index 21924a27de..d58b1a144a --- a/runtime/Cpp/runtime/src/atn/ATN.cpp +++ b/runtime/Cpp/runtime/src/atn/ATN.cpp @@ -3,7 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "atn/LL1Analyzer.h" +#include "atn/ATNState.h" #include "Token.h" #include "atn/RuleTransition.h" #include "misc/IntervalSet.h" @@ -18,26 +21,12 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; using namespace antlrcpp; -ATN::ATN() : ATN(ATNType::LEXER, 0) { -} +ATN::ATN() : ATN(ATNType::LEXER, 0) {} -ATN::ATN(ATN &&other) { - // All source vectors are implicitly cleared by the moves. - states = std::move(other.states); - decisionToState = std::move(other.decisionToState); - ruleToStartState = std::move(other.ruleToStartState); - ruleToStopState = std::move(other.ruleToStopState); - grammarType = std::move(other.grammarType); - maxTokenType = std::move(other.maxTokenType); - ruleToTokenType = std::move(other.ruleToTokenType); - lexerActions = std::move(other.lexerActions); - modeToStartState = std::move(other.modeToStartState); -} - -ATN::ATN(ATNType grammarType_, size_t maxTokenType_) : grammarType(grammarType_), maxTokenType(maxTokenType_) { -} +ATN::ATN(ATNType grammarType_, size_t maxTokenType_) : grammarType(grammarType_), maxTokenType(maxTokenType_) {} ATN::~ATN() { for (ATNState *state : states) { @@ -45,42 +34,6 @@ ATN::~ATN() { } } -/** - * Required to be defined (even though not used) as we have an explicit move assignment operator. - */ -ATN& ATN::operator = (ATN &other) NOEXCEPT { - states = other.states; - decisionToState = other.decisionToState; - ruleToStartState = other.ruleToStartState; - ruleToStopState = other.ruleToStopState; - grammarType = other.grammarType; - maxTokenType = other.maxTokenType; - ruleToTokenType = other.ruleToTokenType; - lexerActions = other.lexerActions; - modeToStartState = other.modeToStartState; - - return *this; -} - -/** - * Explicit move assignment operator to make this the preferred assignment. With implicit copy/move assignment - * operators it seems the copy operator is preferred causing trouble when releasing the allocated ATNState instances. - */ -ATN& ATN::operator = (ATN &&other) NOEXCEPT { - // All source vectors are implicitly cleared by the moves. - states = std::move(other.states); - decisionToState = std::move(other.decisionToState); - ruleToStartState = std::move(other.ruleToStartState); - ruleToStopState = std::move(other.ruleToStopState); - grammarType = std::move(other.grammarType); - maxTokenType = std::move(other.maxTokenType); - ruleToTokenType = std::move(other.ruleToTokenType); - lexerActions = std::move(other.lexerActions); - modeToStartState = std::move(other.modeToStartState); - - return *this; -} - misc::IntervalSet ATN::nextTokens(ATNState *s, RuleContext *ctx) const { LL1Analyzer analyzer(*this); return analyzer.LOOK(s, ctx); @@ -89,7 +42,7 @@ misc::IntervalSet ATN::nextTokens(ATNState *s, RuleContext *ctx) const { misc::IntervalSet const& ATN::nextTokens(ATNState *s) const { if (!s->_nextTokenUpdated) { - std::unique_lock lock { _mutex }; + UniqueLock lock(_mutex); if (!s->_nextTokenUpdated) { s->_nextTokenWithinRule = nextTokens(s, nullptr); s->_nextTokenUpdated = true; @@ -146,7 +99,7 @@ misc::IntervalSet ATN::getExpectedTokens(size_t stateNumber, RuleContext *contex expected.remove(Token::EPSILON); while (ctx && ctx->invokingState != ATNState::INVALID_STATE_NUMBER && following.contains(Token::EPSILON)) { ATNState *invokingState = states.at(ctx->invokingState); - RuleTransition *rt = static_cast(invokingState->transitions[0]); + const RuleTransition *rt = static_cast(invokingState->transitions[0].get()); following = nextTokens(rt->followState); expected.addAll(following); expected.remove(Token::EPSILON); @@ -183,7 +136,7 @@ std::string ATN::toString() const { ss << "states (" << states.size() << ") {" << std::endl; size_t index = 0; - for (auto state : states) { + for (auto *state : states) { if (state == nullptr) { ss << " " << index++ << ": nul" << std::endl; } else { @@ -193,7 +146,7 @@ std::string ATN::toString() const { } index = 0; - for (auto state : decisionToState) { + for (auto *state : decisionToState) { if (state == nullptr) { ss << " " << index++ << ": nul" << std::endl; } else { diff --git a/runtime/Cpp/runtime/src/atn/ATN.h b/runtime/Cpp/runtime/src/atn/ATN.h old mode 100755 new mode 100644 index 9c40cee306..0a9045b62f --- a/runtime/Cpp/runtime/src/atn/ATN.h +++ b/runtime/Cpp/runtime/src/atn/ATN.h @@ -5,20 +5,45 @@ #pragma once +#include +#include +#include #include "RuleContext.h" +#include "misc/IntervalSet.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" +#include "internal/Synchronization.h" + +// GCC generates a warning when forward-declaring ATN if ATN has already been +// declared due to the attributes added by ANTLR4CPP_PUBLIC. +// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39159 +// Add constant that can be checked so forward-declarations can be omitted. +#define ANTLR4CPP_ATN_DECLARED namespace antlr4 { namespace atn { + class LexerATNSimulator; + class ParserATNSimulator; + class ANTLR4CPP_PUBLIC ATN { public: - static const size_t INVALID_ALT_NUMBER = 0; + static constexpr size_t INVALID_ALT_NUMBER = 0; /// Used for runtime deserialization of ATNs from strings. ATN(); - ATN(ATN &&other); + ATN(ATNType grammarType, size_t maxTokenType); - virtual ~ATN(); + + ATN(const ATN&) = delete; + + ATN(ATN&&) = delete; + + ~ATN(); + + ATN& operator=(const ATN&) = delete; + + ATN& operator=(ATN&&) = delete; std::vector states; @@ -50,37 +75,34 @@ namespace atn { /// For lexer ATNs, this is an array of {@link LexerAction} objects which may /// be referenced by action transitions in the ATN. - std::vector> lexerActions; + std::vector> lexerActions; std::vector modeToStartState; - ATN& operator = (ATN &other) NOEXCEPT; - ATN& operator = (ATN &&other) NOEXCEPT; - /// /// Compute the set of valid tokens that can occur starting in state {@code s}. /// If {@code ctx} is null, the set of tokens will not include what can follow /// the rule surrounding {@code s}. In other words, the set will be /// restricted to tokens reachable staying within {@code s}'s rule. /// - virtual misc::IntervalSet nextTokens(ATNState *s, RuleContext *ctx) const; + misc::IntervalSet nextTokens(ATNState *s, RuleContext *ctx) const; /// /// Compute the set of valid tokens that can occur starting in {@code s} and /// staying in same rule. is in set if we reach end of /// rule. /// - virtual misc::IntervalSet const& nextTokens(ATNState *s) const; + misc::IntervalSet const& nextTokens(ATNState *s) const; - virtual void addState(ATNState *state); + void addState(ATNState *state); - virtual void removeState(ATNState *state); + void removeState(ATNState *state); - virtual int defineDecisionState(DecisionState *s); + int defineDecisionState(DecisionState *s); - virtual DecisionState *getDecisionState(size_t decision) const; + DecisionState *getDecisionState(size_t decision) const; - virtual size_t getNumberOfDecisions() const; + size_t getNumberOfDecisions() const; /// /// Computes the set of input symbols which could follow ATN state number @@ -100,12 +122,17 @@ namespace atn { /// specified state in the specified context. /// if the ATN does not contain a state with /// number {@code stateNumber} - virtual misc::IntervalSet getExpectedTokens(size_t stateNumber, RuleContext *context) const; + misc::IntervalSet getExpectedTokens(size_t stateNumber, RuleContext *context) const; std::string toString() const; private: - mutable std::mutex _mutex; + friend class LexerATNSimulator; + friend class ParserATNSimulator; + + mutable internal::Mutex _mutex; + mutable internal::SharedMutex _stateMutex; + mutable internal::SharedMutex _edgeMutex; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/ATNConfig.cpp b/runtime/Cpp/runtime/src/atn/ATNConfig.cpp old mode 100755 new mode 100644 index a775ccbfa8..f3ce765fcc --- a/runtime/Cpp/runtime/src/atn/ATNConfig.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNConfig.cpp @@ -1,9 +1,15 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "misc/MurmurHash.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/PredictionContext.h" #include "SemanticContext.h" @@ -11,43 +17,40 @@ using namespace antlr4::atn; -const size_t ATNConfig::SUPPRESS_PRECEDENCE_FILTER = 0x40000000; +namespace { -ATNConfig::ATNConfig(ATNState *state_, size_t alt_, Ref const& context_) - : ATNConfig(state_, alt_, context_, SemanticContext::NONE) { -} +/** + * This field stores the bit mask for implementing the + * {@link #isPrecedenceFilterSuppressed} property as a bit within the + * existing {@link #reachesIntoOuterContext} field. + */ +inline constexpr size_t SUPPRESS_PRECEDENCE_FILTER = 0x40000000; -ATNConfig::ATNConfig(ATNState *state_, size_t alt_, Ref const& context_, Ref const& semanticContext_) - : state(state_), alt(alt_), context(context_), semanticContext(semanticContext_) { - reachesIntoOuterContext = 0; } -ATNConfig::ATNConfig(Ref const& c) : ATNConfig(c, c->state, c->context, c->semanticContext) { -} +ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref context) + : ATNConfig(state, alt, std::move(context), 0, SemanticContext::Empty::Instance) {} -ATNConfig::ATNConfig(Ref const& c, ATNState *state_) : ATNConfig(c, state_, c->context, c->semanticContext) { -} +ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref context, Ref semanticContext) + : ATNConfig(state, alt, std::move(context), 0, std::move(semanticContext)) {} -ATNConfig::ATNConfig(Ref const& c, ATNState *state, Ref const& semanticContext) - : ATNConfig(c, state, c->context, semanticContext) { -} +ATNConfig::ATNConfig(ATNConfig const& other, Ref semanticContext) + : ATNConfig(other.state, other.alt, other.context, other.reachesIntoOuterContext, std::move(semanticContext)) {} -ATNConfig::ATNConfig(Ref const& c, Ref const& semanticContext) - : ATNConfig(c, c->state, c->context, semanticContext) { -} +ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state) + : ATNConfig(state, other.alt, other.context, other.reachesIntoOuterContext, other.semanticContext) {} -ATNConfig::ATNConfig(Ref const& c, ATNState *state, Ref const& context) - : ATNConfig(c, state, context, c->semanticContext) { -} +ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state, Ref semanticContext) + : ATNConfig(state, other.alt, other.context, other.reachesIntoOuterContext, std::move(semanticContext)) {} -ATNConfig::ATNConfig(Ref const& c, ATNState *state, Ref const& context, - Ref const& semanticContext) - : state(state), alt(c->alt), context(context), reachesIntoOuterContext(c->reachesIntoOuterContext), - semanticContext(semanticContext) { -} +ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state, Ref context) + : ATNConfig(state, other.alt, std::move(context), other.reachesIntoOuterContext, other.semanticContext) {} -ATNConfig::~ATNConfig() { -} +ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state, Ref context, Ref semanticContext) + : ATNConfig(state, other.alt, std::move(context), other.reachesIntoOuterContext, std::move(semanticContext)) {} + +ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref context, size_t reachesIntoOuterContext, Ref semanticContext) + : state(state), alt(alt), context(std::move(context)), reachesIntoOuterContext(reachesIntoOuterContext), semanticContext(std::move(semanticContext)) {} size_t ATNConfig::hashCode() const { size_t hashCode = misc::MurmurHash::initialize(7); @@ -75,22 +78,18 @@ void ATNConfig::setPrecedenceFilterSuppressed(bool value) { } } -bool ATNConfig::operator == (const ATNConfig &other) const { +bool ATNConfig::operator==(const ATNConfig &other) const { return state->stateNumber == other.state->stateNumber && alt == other.alt && ((context == other.context) || (*context == *other.context)) && *semanticContext == *other.semanticContext && isPrecedenceFilterSuppressed() == other.isPrecedenceFilterSuppressed(); } -bool ATNConfig::operator != (const ATNConfig &other) const { - return !operator==(other); -} - -std::string ATNConfig::toString() { +std::string ATNConfig::toString() const { return toString(true); } -std::string ATNConfig::toString(bool showAlt) { +std::string ATNConfig::toString(bool showAlt) const { std::stringstream ss; ss << "("; @@ -101,13 +100,13 @@ std::string ATNConfig::toString(bool showAlt) { if (context) { ss << ",[" << context->toString() << "]"; } - if (semanticContext != nullptr && semanticContext != SemanticContext::NONE) { - ss << "," << semanticContext.get(); + if (semanticContext != nullptr && semanticContext != SemanticContext::Empty::Instance) { + ss << "," << semanticContext->toString(); } if (getOuterContextDepth() > 0) { ss << ",up=" << getOuterContextDepth(); } - ss << ')'; + ss << ")"; return ss.str(); } diff --git a/runtime/Cpp/runtime/src/atn/ATNConfig.h b/runtime/Cpp/runtime/src/atn/ATNConfig.h old mode 100755 new mode 100644 index a78b5c0c55..6484f5e213 --- a/runtime/Cpp/runtime/src/atn/ATNConfig.h +++ b/runtime/Cpp/runtime/src/atn/ATNConfig.h @@ -5,6 +5,16 @@ #pragma once +#include +#include +#include +#include +#include + +#include "antlr4-common.h" +#include "atn/ATNState.h" +#include "atn/SemanticContext.h" + namespace antlr4 { namespace atn { @@ -20,32 +30,39 @@ namespace atn { public: struct Hasher { + size_t operator()(Ref const& k) const { + return k->hashCode(); + } + size_t operator()(ATNConfig const& k) const { return k.hashCode(); } }; struct Comparer { + bool operator()(Ref const& lhs, Ref const& rhs) const { + return (lhs == rhs) || (*lhs == *rhs); + } + bool operator()(ATNConfig const& lhs, ATNConfig const& rhs) const { return (&lhs == &rhs) || (lhs == rhs); } }; - using Set = std::unordered_set, Hasher, Comparer>; /// The ATN state associated with this configuration. - ATNState * state; + ATNState *state = nullptr; /// What alt (or lexer rule) is predicted by this configuration. - const size_t alt; + const size_t alt = 0; /// The stack of invoking states leading to the rule/states associated /// with this config. We track only those contexts pushed during /// execution of the ATN simulator. /// /// Can be shared between multiple ANTConfig instances. - Ref context; + Ref context; /** * We cannot execute predicates dependent upon local context unless @@ -70,23 +87,25 @@ namespace atn { * {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are * completely unaffected by the change.

    */ - size_t reachesIntoOuterContext; + size_t reachesIntoOuterContext = 0; /// Can be shared between multiple ATNConfig instances. - Ref semanticContext; + Ref semanticContext; - ATNConfig(ATNState *state, size_t alt, Ref const& context); - ATNConfig(ATNState *state, size_t alt, Ref const& context, Ref const& semanticContext); + ATNConfig(ATNState *state, size_t alt, Ref context); + ATNConfig(ATNState *state, size_t alt, Ref context, Ref semanticContext); - ATNConfig(Ref const& c); // dup - ATNConfig(Ref const& c, ATNState *state); - ATNConfig(Ref const& c, ATNState *state, Ref const& semanticContext); - ATNConfig(Ref const& c, Ref const& semanticContext); - ATNConfig(Ref const& c, ATNState *state, Ref const& context); - ATNConfig(Ref const& c, ATNState *state, Ref const& context, Ref const& semanticContext); + ATNConfig(ATNConfig const& other, Ref semanticContext); + ATNConfig(ATNConfig const& other, ATNState *state); + ATNConfig(ATNConfig const& other, ATNState *state, Ref semanticContext); + ATNConfig(ATNConfig const& other, ATNState *state, Ref context); + ATNConfig(ATNConfig const& other, ATNState *state, Ref context, Ref semanticContext); ATNConfig(ATNConfig const&) = default; - virtual ~ATNConfig(); + + ATNConfig(ATNConfig&&) = default; + + virtual ~ATNConfig() = default; virtual size_t hashCode() const; @@ -95,26 +114,21 @@ namespace atn { * as it existed prior to the introduction of the * {@link #isPrecedenceFilterSuppressed} method. */ - size_t getOuterContextDepth() const ; + size_t getOuterContextDepth() const; bool isPrecedenceFilterSuppressed() const; void setPrecedenceFilterSuppressed(bool value); /// An ATN configuration is equal to another if both have /// the same state, they predict the same alternative, and /// syntactic/semantic contexts are the same. - bool operator == (const ATNConfig &other) const; - bool operator != (const ATNConfig &other) const; + bool operator==(const ATNConfig &other) const; + bool operator!=(const ATNConfig &other) const; - virtual std::string toString(); - std::string toString(bool showAlt); + virtual std::string toString() const; + std::string toString(bool showAlt) const; private: - /** - * This field stores the bit mask for implementing the - * {@link #isPrecedenceFilterSuppressed} property as a bit within the - * existing {@link #reachesIntoOuterContext} field. - */ - static const size_t SUPPRESS_PRECEDENCE_FILTER; + ATNConfig(ATNState *state, size_t alt, Ref context, size_t reachesIntoOuterContext, Ref semanticContext); }; } // namespace atn @@ -139,7 +153,7 @@ namespace std { size_t operator() (const std::vector> &vector) const { std::size_t seed = 0; - for (auto &config : vector) { + for (const auto &config : vector) { seed ^= config->hashCode() + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; diff --git a/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp b/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp old mode 100755 new mode 100644 index 7a480a480d..5334d7a992 --- a/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp @@ -1,9 +1,16 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "atn/PredictionContext.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/ATNConfig.h" #include "atn/ATNSimulator.h" #include "Exceptions.h" @@ -15,40 +22,44 @@ using namespace antlr4::atn; using namespace antlrcpp; -ATNConfigSet::ATNConfigSet(bool fullCtx) : fullCtx(fullCtx) { - InitializeInstanceFields(); -} +namespace { -ATNConfigSet::ATNConfigSet(const Ref &old) : ATNConfigSet(old->fullCtx) { - addAll(old); - uniqueAlt = old->uniqueAlt; - conflictingAlts = old->conflictingAlts; - hasSemanticContext = old->hasSemanticContext; - dipsIntoOuterContext = old->dipsIntoOuterContext; } -ATNConfigSet::~ATNConfigSet() { +ATNConfigSet::ATNConfigSet() : ATNConfigSet(true) {} + +ATNConfigSet::ATNConfigSet(const ATNConfigSet &other) + : fullCtx(other.fullCtx), _configLookup(other._configLookup.bucket_count(), ATNConfigHasher{this}, ATNConfigComparer{this}) { + addAll(other); + uniqueAlt = other.uniqueAlt; + conflictingAlts = other.conflictingAlts; + hasSemanticContext = other.hasSemanticContext; + dipsIntoOuterContext = other.dipsIntoOuterContext; } +ATNConfigSet::ATNConfigSet(bool fullCtx) + : fullCtx(fullCtx), _configLookup(0, ATNConfigHasher{this}, ATNConfigComparer{this}) {} + bool ATNConfigSet::add(const Ref &config) { return add(config, nullptr); } bool ATNConfigSet::add(const Ref &config, PredictionContextMergeCache *mergeCache) { + assert(config); + if (_readonly) { throw IllegalStateException("This set is readonly"); } - if (config->semanticContext != SemanticContext::NONE) { + if (config->semanticContext != SemanticContext::Empty::Instance) { hasSemanticContext = true; } if (config->getOuterContextDepth() > 0) { dipsIntoOuterContext = true; } - size_t hash = getHash(config.get()); - ATNConfig *existing = _configLookup[hash]; - if (existing == nullptr) { - _configLookup[hash] = config.get(); + auto existing = _configLookup.find(config.get()); + if (existing == _configLookup.end()) { + _configLookup.insert(config.get()); _cachedHashCode = 0; configs.push_back(config); // track order here @@ -57,32 +68,33 @@ bool ATNConfigSet::add(const Ref &config, PredictionContextMergeCache // a previous (s,i,pi,_), merge with it and save result bool rootIsWildcard = !fullCtx; - Ref merged = PredictionContext::merge(existing->context, config->context, rootIsWildcard, mergeCache); + Ref merged = PredictionContext::merge((*existing)->context, config->context, rootIsWildcard, mergeCache); // no need to check for existing.context, config.context in cache // since only way to create new graphs is "call rule" and here. We // cache at both places. - existing->reachesIntoOuterContext = std::max(existing->reachesIntoOuterContext, config->reachesIntoOuterContext); + (*existing)->reachesIntoOuterContext = std::max((*existing)->reachesIntoOuterContext, config->reachesIntoOuterContext); // make sure to preserve the precedence filter suppression during the merge if (config->isPrecedenceFilterSuppressed()) { - existing->setPrecedenceFilterSuppressed(true); + (*existing)->setPrecedenceFilterSuppressed(true); } - existing->context = merged; // replace context; no need to alt mapping + (*existing)->context = std::move(merged); // replace context; no need to alt mapping return true; } -bool ATNConfigSet::addAll(const Ref &other) { - for (auto &c : other->configs) { +bool ATNConfigSet::addAll(const ATNConfigSet &other) { + for (const auto &c : other.configs) { add(c); } return false; } -std::vector ATNConfigSet::getStates() { +std::vector ATNConfigSet::getStates() const { std::vector states; - for (auto c : configs) { + states.reserve(configs.size()); + for (const auto &c : configs) { states.push_back(c->state); } return states; @@ -97,41 +109,44 @@ std::vector ATNConfigSet::getStates() { * @since 4.3 */ -BitSet ATNConfigSet::getAlts() { +BitSet ATNConfigSet::getAlts() const { BitSet alts; - for (ATNConfig config : configs) { - alts.set(config.alt); + for (const auto &config : configs) { + alts.set(config->alt); } return alts; } -std::vector> ATNConfigSet::getPredicates() { - std::vector> preds; - for (auto c : configs) { - if (c->semanticContext != SemanticContext::NONE) { +std::vector> ATNConfigSet::getPredicates() const { + std::vector> preds; + preds.reserve(configs.size()); + for (const auto &c : configs) { + if (c->semanticContext != SemanticContext::Empty::Instance) { preds.push_back(c->semanticContext); } } return preds; } -Ref ATNConfigSet::get(size_t i) const { +const Ref& ATNConfigSet::get(size_t i) const { return configs[i]; } void ATNConfigSet::optimizeConfigs(ATNSimulator *interpreter) { + assert(interpreter); + if (_readonly) { throw IllegalStateException("This set is readonly"); } if (_configLookup.empty()) return; - for (auto &config : configs) { + for (const auto &config : configs) { config->context = interpreter->getCachedContext(config->context); } } -bool ATNConfigSet::operator == (const ATNConfigSet &other) { +bool ATNConfigSet::equals(const ATNConfigSet &other) const { if (&other == this) { return true; } @@ -147,22 +162,23 @@ bool ATNConfigSet::operator == (const ATNConfigSet &other) { return Arrays::equals(configs, other.configs); } -size_t ATNConfigSet::hashCode() { - if (!isReadonly() || _cachedHashCode == 0) { - _cachedHashCode = 1; - for (auto &i : configs) { - _cachedHashCode = 31 * _cachedHashCode + i->hashCode(); // Same as Java's list hashCode impl. +size_t ATNConfigSet::hashCode() const { + size_t cachedHashCode = _cachedHashCode.load(std::memory_order_relaxed); + if (!isReadonly() || cachedHashCode == 0) { + cachedHashCode = 1; + for (const auto &i : configs) { + cachedHashCode = 31 * cachedHashCode + i->hashCode(); // Same as Java's list hashCode impl. } + _cachedHashCode.store(cachedHashCode, std::memory_order_relaxed); } - - return _cachedHashCode; + return cachedHashCode; } -size_t ATNConfigSet::size() { +size_t ATNConfigSet::size() const { return configs.size(); } -bool ATNConfigSet::isEmpty() { +bool ATNConfigSet::isEmpty() const { return configs.empty(); } @@ -175,54 +191,50 @@ void ATNConfigSet::clear() { _configLookup.clear(); } -bool ATNConfigSet::isReadonly() { +bool ATNConfigSet::isReadonly() const { return _readonly; } void ATNConfigSet::setReadonly(bool readonly) { _readonly = readonly; - _configLookup.clear(); + LookupContainer(0, ATNConfigHasher{this}, ATNConfigComparer{this}).swap(_configLookup); } -std::string ATNConfigSet::toString() { +std::string ATNConfigSet::toString() const { std::stringstream ss; ss << "["; for (size_t i = 0; i < configs.size(); i++) { + if ( i>0 ) ss << ", "; ss << configs[i]->toString(); } ss << "]"; if (hasSemanticContext) { - ss << ",hasSemanticContext = " << hasSemanticContext; + ss << ",hasSemanticContext=" << (hasSemanticContext?"true":"false"); } if (uniqueAlt != ATN::INVALID_ALT_NUMBER) { - ss << ",uniqueAlt = " << uniqueAlt; + ss << ",uniqueAlt=" << uniqueAlt; } - if (conflictingAlts.size() > 0) { - ss << ",conflictingAlts = "; + if (conflictingAlts.count() > 0) { + ss << ",conflictingAlts="; ss << conflictingAlts.toString(); } if (dipsIntoOuterContext) { - ss << ", dipsIntoOuterContext"; + ss << ",dipsIntoOuterContext"; } return ss.str(); } -size_t ATNConfigSet::getHash(ATNConfig *c) { +size_t ATNConfigSet::hashCode(const ATNConfig &other) const { size_t hashCode = 7; - hashCode = 31 * hashCode + c->state->stateNumber; - hashCode = 31 * hashCode + c->alt; - hashCode = 31 * hashCode + c->semanticContext->hashCode(); + hashCode = 31 * hashCode + other.state->stateNumber; + hashCode = 31 * hashCode + other.alt; + hashCode = 31 * hashCode + other.semanticContext->hashCode(); return hashCode; } -void ATNConfigSet::InitializeInstanceFields() { - uniqueAlt = 0; - hasSemanticContext = false; - dipsIntoOuterContext = false; - - _readonly = false; - _cachedHashCode = 0; +bool ATNConfigSet::equals(const ATNConfig &lhs, const ATNConfig &rhs) const { + return lhs.state->stateNumber == rhs.state->stateNumber && lhs.alt == rhs.alt && *lhs.semanticContext == *rhs.semanticContext; } diff --git a/runtime/Cpp/runtime/src/atn/ATNConfigSet.h b/runtime/Cpp/runtime/src/atn/ATNConfigSet.h old mode 100755 new mode 100644 index 850a07c129..2e4a38446c --- a/runtime/Cpp/runtime/src/atn/ATNConfigSet.h +++ b/runtime/Cpp/runtime/src/atn/ATNConfigSet.h @@ -5,8 +5,17 @@ #pragma once +#include +#include +#include +#include + #include "support/BitSet.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/PredictionContext.h" +#include "atn/ATNConfig.h" +#include "FlatHashSet.h" namespace antlr4 { namespace atn { @@ -20,7 +29,7 @@ namespace atn { // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation // TODO: can we track conflicts as they are added to save scanning configs later? - size_t uniqueAlt; + size_t uniqueAlt = 0; /** Currently this is only used when we detect SLL conflict; this does * not necessarily represent the ambiguous alternatives. In fact, @@ -31,20 +40,25 @@ namespace atn { // Used in parser and lexer. In lexer, it indicates we hit a pred // while computing a closure operation. Don't make a DFA state from this. - bool hasSemanticContext; - bool dipsIntoOuterContext; + bool hasSemanticContext = false; + bool dipsIntoOuterContext = false; /// Indicates that this configuration set is part of a full context /// LL prediction. It will be used to determine how to merge $. With SLL /// it's a wildcard whereas it is not for LL context merge. - const bool fullCtx; + const bool fullCtx = true; + + ATNConfigSet(); + + ATNConfigSet(const ATNConfigSet &other); - ATNConfigSet(bool fullCtx = true); - ATNConfigSet(const Ref &old); + ATNConfigSet(ATNConfigSet&&) = delete; - virtual ~ATNConfigSet(); + explicit ATNConfigSet(bool fullCtx); - virtual bool add(const Ref &config); + virtual ~ATNConfigSet() = default; + + bool add(const Ref &config); /// /// Adding a new config means merging contexts with existing configs for @@ -56,9 +70,11 @@ namespace atn { /// This method updates and /// when necessary. /// - virtual bool add(const Ref &config, PredictionContextMergeCache *mergeCache); + bool add(const Ref &config, PredictionContextMergeCache *mergeCache); + + bool addAll(const ATNConfigSet &other); - virtual std::vector getStates(); + std::vector getStates() const; /** * Gets the complete set of represented alternatives for the configuration @@ -68,43 +84,79 @@ namespace atn { * * @since 4.3 */ - antlrcpp::BitSet getAlts(); - virtual std::vector> getPredicates(); + antlrcpp::BitSet getAlts() const; + std::vector> getPredicates() const; + + const Ref& get(size_t i) const; - virtual Ref get(size_t i) const; + void optimizeConfigs(ATNSimulator *interpreter); - virtual void optimizeConfigs(ATNSimulator *interpreter); + size_t size() const; + bool isEmpty() const; + void clear(); + bool isReadonly() const; + void setReadonly(bool readonly); + + virtual size_t hashCode() const; + + virtual bool equals(const ATNConfigSet &other) const; + + virtual std::string toString() const; + + private: + struct ATNConfigHasher final { + const ATNConfigSet* atnConfigSet; - bool addAll(const Ref &other); + size_t operator()(const ATNConfig *other) const { + assert(other != nullptr); + return atnConfigSet->hashCode(*other); + } + }; - bool operator == (const ATNConfigSet &other); - virtual size_t hashCode(); - virtual size_t size(); - virtual bool isEmpty(); - virtual void clear(); - virtual bool isReadonly(); - virtual void setReadonly(bool readonly); - virtual std::string toString(); + struct ATNConfigComparer final { + const ATNConfigSet* atnConfigSet; + + bool operator()(const ATNConfig *lhs, const ATNConfig *rhs) const { + assert(lhs != nullptr); + assert(rhs != nullptr); + return atnConfigSet->equals(*lhs, *rhs); + } + }; + + mutable std::atomic _cachedHashCode = 0; - protected: /// Indicates that the set of configurations is read-only. Do not /// allow any code to manipulate the set; DFA states will point at /// the sets and they must not change. This does not protect the other /// fields; in particular, conflictingAlts is set after /// we've made this readonly. - bool _readonly; + bool _readonly = false; - virtual size_t getHash(ATNConfig *c); // Hash differs depending on set type. + virtual size_t hashCode(const ATNConfig &atnConfig) const; - private: - size_t _cachedHashCode; + virtual bool equals(const ATNConfig &lhs, const ATNConfig &rhs) const; + + using LookupContainer = FlatHashSet; /// All configs but hashed by (s, i, _, pi) not including context. Wiped out /// when we go readonly as this set becomes a DFA state. - std::unordered_map _configLookup; - - void InitializeInstanceFields(); + LookupContainer _configLookup; }; + inline bool operator==(const ATNConfigSet &lhs, const ATNConfigSet &rhs) { return lhs.equals(rhs); } + + inline bool operator!=(const ATNConfigSet &lhs, const ATNConfigSet &rhs) { return !operator==(lhs, rhs); } + } // namespace atn } // namespace antlr4 + +namespace std { + +template <> +struct hash<::antlr4::atn::ATNConfigSet> { + size_t operator()(const ::antlr4::atn::ATNConfigSet &atnConfigSet) const { + return atnConfigSet.hashCode(); + } +}; + +} // namespace std diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp old mode 100755 new mode 100644 index a406c4e135..b6c1709d92 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp @@ -3,62 +3,38 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/ATNDeserializationOptions.h" +#include "Exceptions.h" +using namespace antlr4; using namespace antlr4::atn; -ATNDeserializationOptions ATNDeserializationOptions::defaultOptions; - -ATNDeserializationOptions::ATNDeserializationOptions() { - InitializeInstanceFields(); -} - -ATNDeserializationOptions::ATNDeserializationOptions(ATNDeserializationOptions *options) : ATNDeserializationOptions() { - this->verifyATN = options->verifyATN; - this->generateRuleBypassTransitions = options->generateRuleBypassTransitions; -} - -ATNDeserializationOptions::~ATNDeserializationOptions() { -} +ATNDeserializationOptions::ATNDeserializationOptions(ATNDeserializationOptions *options) + : _readOnly(false), _verifyATN(options->_verifyATN), + _generateRuleBypassTransitions(options->_generateRuleBypassTransitions) {} const ATNDeserializationOptions& ATNDeserializationOptions::getDefaultOptions() { - return defaultOptions; -} - -bool ATNDeserializationOptions::isReadOnly() { - return readOnly; + static const std::unique_ptr defaultOptions = std::make_unique(); + return *defaultOptions; } void ATNDeserializationOptions::makeReadOnly() { - readOnly = true; -} - -bool ATNDeserializationOptions::isVerifyATN() { - return verifyATN; + _readOnly = true; } void ATNDeserializationOptions::setVerifyATN(bool verify) { throwIfReadOnly(); - verifyATN = verify; -} - -bool ATNDeserializationOptions::isGenerateRuleBypassTransitions() { - return generateRuleBypassTransitions; + _verifyATN = verify; } void ATNDeserializationOptions::setGenerateRuleBypassTransitions(bool generate) { throwIfReadOnly(); - generateRuleBypassTransitions = generate; + _generateRuleBypassTransitions = generate; } -void ATNDeserializationOptions::throwIfReadOnly() { +void ATNDeserializationOptions::throwIfReadOnly() const { if (isReadOnly()) { - throw "The object is read only."; + throw IllegalStateException("ATNDeserializationOptions is read only."); } } - -void ATNDeserializationOptions::InitializeInstanceFields() { - readOnly = false; - verifyATN = true; - generateRuleBypassTransitions = false; -} diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h b/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h index 66aa37da5f..8b1f9850b7 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h @@ -10,41 +10,39 @@ namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC ATNDeserializationOptions { - private: - static ATNDeserializationOptions defaultOptions; +class ANTLR4CPP_PUBLIC ATNDeserializationOptions final { +public: + ATNDeserializationOptions() + : _readOnly(false), _verifyATN(true), _generateRuleBypassTransitions(false) {} - bool readOnly; - bool verifyATN; - bool generateRuleBypassTransitions; + // TODO: Is this useful? If so we should mark it as explicit, otherwise remove it. + ATNDeserializationOptions(ATNDeserializationOptions *options); - public: - ATNDeserializationOptions(); - ATNDeserializationOptions(ATNDeserializationOptions *options); - ATNDeserializationOptions(ATNDeserializationOptions const&) = default; - virtual ~ATNDeserializationOptions(); - ATNDeserializationOptions& operator=(ATNDeserializationOptions const&) = default; + ATNDeserializationOptions(const ATNDeserializationOptions&) = default; - static const ATNDeserializationOptions& getDefaultOptions(); + ATNDeserializationOptions& operator=(const ATNDeserializationOptions&) = default; - bool isReadOnly(); + static const ATNDeserializationOptions& getDefaultOptions(); - void makeReadOnly(); + bool isReadOnly() const { return _readOnly; } - bool isVerifyATN(); + void makeReadOnly(); - void setVerifyATN(bool verify); + bool isVerifyATN() const { return _verifyATN; } - bool isGenerateRuleBypassTransitions(); + void setVerifyATN(bool verify); - void setGenerateRuleBypassTransitions(bool generate); + bool isGenerateRuleBypassTransitions() const { return _generateRuleBypassTransitions; } - protected: - virtual void throwIfReadOnly(); + void setGenerateRuleBypassTransitions(bool generate); - private: - void InitializeInstanceFields(); - }; +private: + void throwIfReadOnly() const; + + bool _readOnly; + bool _verifyATN; + bool _generateRuleBypassTransitions; +}; } // namespace atn } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp old mode 100755 new mode 100644 index c6cceda13d..b6e3dce128 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp @@ -5,6 +5,8 @@ #include "atn/ATNDeserializationOptions.h" +#include "atn/ATNStateType.h" +#include "antlr4-common.h" #include "atn/ATNType.h" #include "atn/ATNState.h" #include "atn/ATN.h" @@ -33,12 +35,13 @@ #include "atn/SetTransition.h" #include "atn/NotSetTransition.h" #include "atn/WildcardTransition.h" +#include "atn/TransitionType.h" #include "Token.h" #include "misc/IntervalSet.h" #include "Exceptions.h" #include "support/CPPUtils.h" -#include "support/StringUtils.h" +#include "support/Casts.h" #include "atn/LexerCustomAction.h" #include "atn/LexerChannelAction.h" @@ -51,118 +54,213 @@ #include "atn/ATNDeserializer.h" +#include +#include +#include +#include +#include #include +#include using namespace antlr4; using namespace antlr4::atn; using namespace antlrcpp; -const size_t ATNDeserializer::SERIALIZED_VERSION = 3; - namespace { -uint32_t deserializeInt32(const std::vector& data, size_t offset) { - return (uint32_t)data[offset] | ((uint32_t)data[offset + 1] << 16); -} - -ssize_t readUnicodeInt(const std::vector& data, int& p) { - return static_cast(data[p++]); -} + void checkCondition(bool condition, std::string_view message) { + if (!condition) { + throw IllegalStateException(std::string(message)); + } + } -ssize_t readUnicodeInt32(const std::vector& data, int& p) { - auto result = deserializeInt32(data, p); - p += 2; - return static_cast(result); -} + void checkCondition(bool condition) { + checkCondition(condition, ""); + } -// We templatize this on the function type so the optimizer can inline -// the 16- or 32-bit readUnicodeInt/readUnicodeInt32 as needed. -template -void deserializeSets( - const std::vector& data, - int& p, - std::vector& sets, - F readUnicode) { - int nsets = data[p++]; - for (int i = 0; i < nsets; i++) { - int nintervals = data[p++]; - misc::IntervalSet set; - - bool containsEof = data[p++] != 0; - if (containsEof) { - set.add(-1); - } + /** + * Analyze the {@link StarLoopEntryState} states in the specified ATN to set + * the {@link StarLoopEntryState#isPrecedenceDecision} field to the + * correct value. + * + * @param atn The ATN. + */ + void markPrecedenceDecisions(const ATN &atn) { + for (ATNState *state : atn.states) { + if (!StarLoopEntryState::is(state)) { + continue; + } - for (int j = 0; j < nintervals; j++) { - auto a = readUnicode(data, p); - auto b = readUnicode(data, p); - set.add(a, b); + /* We analyze the ATN to determine if this ATN decision state is the + * decision for the closure block that determines whether a + * precedence rule should continue or complete. + */ + if (atn.ruleToStartState[state->ruleIndex]->isLeftRecursiveRule) { + ATNState *maybeLoopEndState = state->transitions[state->transitions.size() - 1]->target; + if (LoopEndState::is(maybeLoopEndState)) { + if (maybeLoopEndState->epsilonOnlyTransitions && RuleStopState::is(maybeLoopEndState->transitions[0]->target)) { + downCast(state)->isPrecedenceDecision = true; + } + } + } } - sets.push_back(set); } -} -} + Ref lexerActionFactory(LexerActionType type, int data1, int data2) { + switch (type) { + case LexerActionType::CHANNEL: + return std::make_shared(data1); -ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) { -} + case LexerActionType::CUSTOM: + return std::make_shared(data1, data2); -ATNDeserializer::ATNDeserializer(const ATNDeserializationOptions& dso): deserializationOptions(dso) { -} + case LexerActionType::MODE: + return std::make_shared< LexerModeAction>(data1); -ATNDeserializer::~ATNDeserializer() { -} + case LexerActionType::MORE: + return LexerMoreAction::getInstance(); -/** - * This value should never change. Updates following this version are - * reflected as change in the unique ID SERIALIZED_UUID. - */ -Guid ATNDeserializer::ADDED_PRECEDENCE_TRANSITIONS() { - return Guid("1DA0C57D-6C06-438A-9B27-10BCB3CE0F61"); -} + case LexerActionType::POP_MODE: + return LexerPopModeAction::getInstance(); -Guid ATNDeserializer::ADDED_LEXER_ACTIONS() { - return Guid("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"); -} + case LexerActionType::PUSH_MODE: + return std::make_shared(data1); -Guid ATNDeserializer::ADDED_UNICODE_SMP() { - return Guid("59627784-3BE5-417A-B9EB-8131A7286089"); -} + case LexerActionType::SKIP: + return LexerSkipAction::getInstance(); -Guid ATNDeserializer::SERIALIZED_UUID() { - return ADDED_UNICODE_SMP(); -} + case LexerActionType::TYPE: + return std::make_shared(data1); -Guid ATNDeserializer::BASE_SERIALIZED_UUID() { - return Guid("33761B2D-78BB-4A43-8B0B-4F5BEE8AACF3"); -} + default: + throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast(type)) + + " is not valid."); + } + } -std::vector& ATNDeserializer::SUPPORTED_UUIDS() { - static std::vector singleton = { BASE_SERIALIZED_UUID(), ADDED_PRECEDENCE_TRANSITIONS(), ADDED_LEXER_ACTIONS(), ADDED_UNICODE_SMP() }; - return singleton; -} + ConstTransitionPtr edgeFactory(const ATN &atn, TransitionType type, size_t trg, size_t arg1, size_t arg2, + size_t arg3, const std::vector &sets) { + ATNState *target = atn.states[trg]; + switch (type) { + case TransitionType::EPSILON: + return std::make_unique(target); + case TransitionType::RANGE: + if (arg3 != 0) { + return std::make_unique(target, Token::EOF, arg2); + } else { + return std::make_unique(target, arg1, arg2); + } + case TransitionType::RULE: + return std::make_unique(downCast(atn.states[arg1]), arg2, (int)arg3, target); + case TransitionType::PREDICATE: + return std::make_unique(target, arg1, arg2, arg3 != 0); + case TransitionType::PRECEDENCE: + return std::make_unique(target, (int)arg1); + case TransitionType::ATOM: + if (arg3 != 0) { + return std::make_unique(target, Token::EOF); + } else { + return std::make_unique(target, arg1); + } + case TransitionType::ACTION: + return std::make_unique(target, arg1, arg2, arg3 != 0); + case TransitionType::SET: + return std::make_unique(target, sets[arg1]); + case TransitionType::NOT_SET: + return std::make_unique(target, sets[arg1]); + case TransitionType::WILDCARD: + return std::make_unique(target); + } + + throw IllegalArgumentException("The specified transition type is not valid."); + } -bool ATNDeserializer::isFeatureSupported(const Guid &feature, const Guid &actualUuid) { - auto featureIterator = std::find(SUPPORTED_UUIDS().begin(), SUPPORTED_UUIDS().end(), feature); - if (featureIterator == SUPPORTED_UUIDS().end()) { - return false; + /* mem check: all created instances are freed in the d-tor of the ATN. */ + ATNState* stateFactory(ATNStateType type, size_t ruleIndex) { + ATNState *s; + switch (type) { + case ATNStateType::INVALID: + return nullptr; + case ATNStateType::BASIC : + s = new BasicState(); + break; + case ATNStateType::RULE_START : + s = new RuleStartState(); + break; + case ATNStateType::BLOCK_START : + s = new BasicBlockStartState(); + break; + case ATNStateType::PLUS_BLOCK_START : + s = new PlusBlockStartState(); + break; + case ATNStateType::STAR_BLOCK_START : + s = new StarBlockStartState(); + break; + case ATNStateType::TOKEN_START : + s = new TokensStartState(); + break; + case ATNStateType::RULE_STOP : + s = new RuleStopState(); + break; + case ATNStateType::BLOCK_END : + s = new BlockEndState(); + break; + case ATNStateType::STAR_LOOP_BACK : + s = new StarLoopbackState(); + break; + case ATNStateType::STAR_LOOP_ENTRY : + s = new StarLoopEntryState(); + break; + case ATNStateType::PLUS_LOOP_BACK : + s = new PlusLoopbackState(); + break; + case ATNStateType::LOOP_END : + s = new LoopEndState(); + break; + default : + std::string message = "The specified state type " + std::to_string(static_cast(type)) + " is not valid."; + throw IllegalArgumentException(message); + } + assert(s->getStateType() == type); + s->ruleIndex = ruleIndex; + return s; } - auto actualIterator = std::find(SUPPORTED_UUIDS().begin(), SUPPORTED_UUIDS().end(), actualUuid); - if (actualIterator == SUPPORTED_UUIDS().end()) { - return false; + + ssize_t readUnicodeInt32(SerializedATNView data, int& p) { + return static_cast(data[p++]); } - return std::distance(featureIterator, actualIterator) >= 0; -} + void deserializeSets( + SerializedATNView data, + int& p, + std::vector& sets) { + size_t nsets = data[p++]; + sets.reserve(sets.size() + nsets); + for (size_t i = 0; i < nsets; i++) { + size_t nintervals = data[p++]; + misc::IntervalSet set; + + bool containsEof = data[p++] != 0; + if (containsEof) { + set.add(-1); + } -ATN ATNDeserializer::deserialize(const std::vector& input) { - // Don't adjust the first value since that's the version number. - std::vector data(input.size()); - data[0] = input[0]; - for (size_t i = 1; i < input.size(); ++i) { - data[i] = input[i] - 2; + for (size_t j = 0; j < nintervals; j++) { + auto a = readUnicodeInt32(data, p); + auto b = readUnicodeInt32(data, p); + set.add(a, b); + } + sets.push_back(set); + } } +} + +ATNDeserializer::ATNDeserializer() : ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) {} + +ATNDeserializer::ATNDeserializer(ATNDeserializationOptions deserializationOptions) : _deserializationOptions(std::move(deserializationOptions)) {} + +std::unique_ptr ATNDeserializer::deserialize(SerializedATNView data) const { int p = 0; int version = data[p++]; if (version != SERIALIZED_VERSION) { @@ -171,60 +269,48 @@ ATN ATNDeserializer::deserialize(const std::vector& input) { throw UnsupportedOperationException(reason); } - Guid uuid = toUUID(data.data(), p); - p += 8; - auto uuidIterator = std::find(SUPPORTED_UUIDS().begin(), SUPPORTED_UUIDS().end(), uuid); - if (uuidIterator == SUPPORTED_UUIDS().end()) { - std::string reason = "Could not deserialize ATN with UUID " + uuid.toString() + " (expected " + - SERIALIZED_UUID().toString() + " or a legacy UUID)."; - - throw UnsupportedOperationException(reason); - } - - bool supportsPrecedencePredicates = isFeatureSupported(ADDED_PRECEDENCE_TRANSITIONS(), uuid); - bool supportsLexerActions = isFeatureSupported(ADDED_LEXER_ACTIONS(), uuid); - ATNType grammarType = (ATNType)data[p++]; size_t maxTokenType = data[p++]; - ATN atn(grammarType, maxTokenType); + auto atn = std::make_unique(grammarType, maxTokenType); // // STATES // - std::vector> loopBackStateNumbers; - std::vector> endStateNumbers; - size_t nstates = data[p++]; - for (size_t i = 0; i < nstates; i++) { - size_t stype = data[p++]; - // ignore bad type of states - if (stype == ATNState::ATN_INVALID_TYPE) { - atn.addState(nullptr); - continue; - } + { + std::vector> loopBackStateNumbers; + std::vector> endStateNumbers; + size_t nstates = data[p++]; + atn->states.reserve(nstates); + loopBackStateNumbers.reserve(nstates); // Reserve worst case size, its short lived. + endStateNumbers.reserve(nstates); // Reserve worst case size, its short lived. + for (size_t i = 0; i < nstates; i++) { + ATNStateType stype = static_cast(data[p++]); + // ignore bad type of states + if (stype == ATNStateType::INVALID) { + atn->addState(nullptr); + continue; + } - size_t ruleIndex = data[p++]; - if (ruleIndex == 0xFFFF) { - ruleIndex = INVALID_INDEX; + size_t ruleIndex = data[p++]; + ATNState *s = stateFactory(stype, ruleIndex); + if (stype == ATNStateType::LOOP_END) { // special case + int loopBackStateNumber = data[p++]; + loopBackStateNumbers.push_back({ downCast(s), loopBackStateNumber }); + } else if (BlockStartState::is(s)) { + int endStateNumber = data[p++]; + endStateNumbers.push_back({ downCast(s), endStateNumber }); + } + atn->addState(s); } - ATNState *s = stateFactory(stype, ruleIndex); - if (stype == ATNState::LOOP_END) { // special case - int loopBackStateNumber = data[p++]; - loopBackStateNumbers.push_back({ (LoopEndState*)s, loopBackStateNumber }); - } else if (is(s)) { - int endStateNumber = data[p++]; - endStateNumbers.push_back({ (BlockStartState*)s, endStateNumber }); + // delay the assignment of loop back and end states until we know all the state instances have been initialized + for (auto &pair : loopBackStateNumbers) { + pair.first->loopBackState = atn->states[pair.second]; } - atn.addState(s); - } - - // delay the assignment of loop back and end states until we know all the state instances have been initialized - for (auto &pair : loopBackStateNumbers) { - pair.first->loopBackState = atn.states[pair.second]; - } - for (auto &pair : endStateNumbers) { - pair.first->endState = (BlockEndState*)atn.states[pair.second]; + for (auto &pair : endStateNumbers) { + pair.first->endState = downCast(atn->states[pair.second]); + } } size_t numNonGreedyStates = data[p++]; @@ -232,118 +318,102 @@ ATN ATNDeserializer::deserialize(const std::vector& input) { size_t stateNumber = data[p++]; // The serialized ATN must be specifying the right states, so that the // cast below is correct. - ((DecisionState *)atn.states[stateNumber])->nonGreedy = true; + downCast(atn->states[stateNumber])->nonGreedy = true; } - if (supportsPrecedencePredicates) { - size_t numPrecedenceStates = data[p++]; - for (size_t i = 0; i < numPrecedenceStates; i++) { - size_t stateNumber = data[p++]; - ((RuleStartState *)atn.states[stateNumber])->isLeftRecursiveRule = true; - } + size_t numPrecedenceStates = data[p++]; + for (size_t i = 0; i < numPrecedenceStates; i++) { + size_t stateNumber = data[p++]; + downCast(atn->states[stateNumber])->isLeftRecursiveRule = true; } // // RULES // size_t nrules = data[p++]; + atn->ruleToStartState.reserve(nrules); for (size_t i = 0; i < nrules; i++) { size_t s = data[p++]; // Also here, the serialized atn must ensure to point to the correct class type. - RuleStartState *startState = (RuleStartState*)atn.states[s]; - atn.ruleToStartState.push_back(startState); - if (atn.grammarType == ATNType::LEXER) { + RuleStartState *startState = downCast(atn->states[s]); + atn->ruleToStartState.push_back(startState); + if (atn->grammarType == ATNType::LEXER) { size_t tokenType = data[p++]; - if (tokenType == 0xFFFF) { - tokenType = Token::EOF; - } - - atn.ruleToTokenType.push_back(tokenType); - - if (!isFeatureSupported(ADDED_LEXER_ACTIONS(), uuid)) { - // this piece of unused metadata was serialized prior to the - // addition of LexerAction - //int actionIndexIgnored = data[p++]; - p++; - } + atn->ruleToTokenType.push_back(tokenType); } } - atn.ruleToStopState.resize(nrules); - for (ATNState *state : atn.states) { - if (!is(state)) { + atn->ruleToStopState.resize(nrules); + for (ATNState *state : atn->states) { + if (!RuleStopState::is(state)) { continue; } - RuleStopState *stopState = static_cast(state); - atn.ruleToStopState[state->ruleIndex] = stopState; - atn.ruleToStartState[state->ruleIndex]->stopState = stopState; + RuleStopState *stopState = downCast(state); + atn->ruleToStopState[state->ruleIndex] = stopState; + atn->ruleToStartState[state->ruleIndex]->stopState = stopState; } // // MODES // size_t nmodes = data[p++]; + atn->modeToStartState.reserve(nmodes); for (size_t i = 0; i < nmodes; i++) { size_t s = data[p++]; - atn.modeToStartState.push_back(static_cast(atn.states[s])); + atn->modeToStartState.push_back(downCast(atn->states[s])); } // // SETS // - std::vector sets; - - // First, deserialize sets with 16-bit arguments <= U+FFFF. - deserializeSets(data, p, sets, readUnicodeInt); - - // Next, if the ATN was serialized with the Unicode SMP feature, - // deserialize sets with 32-bit arguments <= U+10FFFF. - if (isFeatureSupported(ADDED_UNICODE_SMP(), uuid)) { - deserializeSets(data, p, sets, readUnicodeInt32); - } - - // - // EDGES - // - int nedges = data[p++]; - for (int i = 0; i < nedges; i++) { - size_t src = data[p]; - size_t trg = data[p + 1]; - size_t ttype = data[p + 2]; - size_t arg1 = data[p + 3]; - size_t arg2 = data[p + 4]; - size_t arg3 = data[p + 5]; - Transition *trans = edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets); - ATNState *srcState = atn.states[src]; - srcState->addTransition(trans); - p += 6; + { + std::vector sets; + + deserializeSets(data, p, sets); + sets.shrink_to_fit(); + + // + // EDGES + // + int nedges = data[p++]; + for (int i = 0; i < nedges; i++) { + size_t src = data[p]; + size_t trg = data[p + 1]; + TransitionType ttype = static_cast(data[p + 2]); + size_t arg1 = data[p + 3]; + size_t arg2 = data[p + 4]; + size_t arg3 = data[p + 5]; + ConstTransitionPtr trans = edgeFactory(*atn, ttype, trg, arg1, arg2, arg3, sets); + ATNState *srcState = atn->states[src]; + srcState->addTransition(std::move(trans)); + p += 6; + } } - // edges for rule stop states can be derived, so they aren't serialized - for (ATNState *state : atn.states) { + for (ATNState *state : atn->states) { for (size_t i = 0; i < state->transitions.size(); i++) { - Transition *t = state->transitions[i]; - if (!is(t)) { + const Transition *t = state->transitions[i].get(); + if (!RuleTransition::is(t)) { continue; } - RuleTransition *ruleTransition = static_cast(t); + const RuleTransition *ruleTransition = downCast(t); size_t outermostPrecedenceReturn = INVALID_INDEX; - if (atn.ruleToStartState[ruleTransition->target->ruleIndex]->isLeftRecursiveRule) { + if (atn->ruleToStartState[ruleTransition->target->ruleIndex]->isLeftRecursiveRule) { if (ruleTransition->precedence == 0) { outermostPrecedenceReturn = ruleTransition->target->ruleIndex; } } - EpsilonTransition *returnTransition = new EpsilonTransition(ruleTransition->followState, outermostPrecedenceReturn); /* mem check: freed in ANTState d-tor */ - atn.ruleToStopState[ruleTransition->target->ruleIndex]->addTransition(returnTransition); + ConstTransitionPtr returnTransition = std::make_unique(ruleTransition->followState, outermostPrecedenceReturn); + atn->ruleToStopState[ruleTransition->target->ruleIndex]->addTransition(std::move(returnTransition)); } } - for (ATNState *state : atn.states) { - if (is(state)) { - BlockStartState *startState = static_cast(state); + for (ATNState *state : atn->states) { + if (BlockStartState::is(state)) { + BlockStartState *startState = downCast(state); // we need to know the end state to set its start state if (startState->endState == nullptr) { @@ -355,23 +425,23 @@ ATN ATNDeserializer::deserialize(const std::vector& input) { throw IllegalStateException(); } - startState->endState->startState = static_cast(state); + startState->endState->startState = downCast(state); } - if (is(state)) { - PlusLoopbackState *loopbackState = static_cast(state); + if (PlusLoopbackState::is(state)) { + PlusLoopbackState *loopbackState = downCast(state); for (size_t i = 0; i < loopbackState->transitions.size(); i++) { ATNState *target = loopbackState->transitions[i]->target; - if (is(target)) { - (static_cast(target))->loopBackState = loopbackState; + if (PlusBlockStartState::is(target)) { + (downCast(target))->loopBackState = loopbackState; } } - } else if (is(state)) { - StarLoopbackState *loopbackState = static_cast(state); + } else if (StarLoopbackState::is(state)) { + StarLoopbackState *loopbackState = downCast(state); for (size_t i = 0; i < loopbackState->transitions.size(); i++) { ATNState *target = loopbackState->transitions[i]->target; - if (is(target)) { - (static_cast(target))->loopBackState = loopbackState; + if (StarLoopEntryState::is(target)) { + downCast(target)->loopBackState = loopbackState; } } } @@ -381,104 +451,76 @@ ATN ATNDeserializer::deserialize(const std::vector& input) { // DECISIONS // size_t ndecisions = data[p++]; - for (size_t i = 1; i <= ndecisions; i++) { + atn->decisionToState.reserve(ndecisions); + for (size_t i = 0; i < ndecisions; i++) { size_t s = data[p++]; - DecisionState *decState = dynamic_cast(atn.states[s]); + DecisionState *decState = downCast(atn->states[s]); if (decState == nullptr) throw IllegalStateException(); - atn.decisionToState.push_back(decState); - decState->decision = (int)i - 1; + atn->decisionToState.push_back(decState); + decState->decision = static_cast(i); } // // LEXER ACTIONS // - if (atn.grammarType == ATNType::LEXER) { - if (supportsLexerActions) { - atn.lexerActions.resize(data[p++]); - for (size_t i = 0; i < atn.lexerActions.size(); i++) { - LexerActionType actionType = (LexerActionType)data[p++]; - int data1 = data[p++]; - if (data1 == 0xFFFF) { - data1 = -1; - } - - int data2 = data[p++]; - if (data2 == 0xFFFF) { - data2 = -1; - } - - atn.lexerActions[i] = lexerActionFactory(actionType, data1, data2); - } - } else { - // for compatibility with older serialized ATNs, convert the old - // serialized action index for action transitions to the new - // form, which is the index of a LexerCustomAction - for (ATNState *state : atn.states) { - for (size_t i = 0; i < state->transitions.size(); i++) { - Transition *transition = state->transitions[i]; - if (!is(transition)) { - continue; - } - - size_t ruleIndex = static_cast(transition)->ruleIndex; - size_t actionIndex = static_cast(transition)->actionIndex; - Ref lexerAction = std::make_shared(ruleIndex, actionIndex); - state->transitions[i] = new ActionTransition(transition->target, ruleIndex, atn.lexerActions.size(), false); /* mem-check freed in ATNState d-tor */ - delete transition; // ml: no longer needed since we just replaced it. - atn.lexerActions.push_back(lexerAction); - } - } + if (atn->grammarType == ATNType::LEXER) { + atn->lexerActions.resize(data[p++]); + for (size_t i = 0; i < atn->lexerActions.size(); i++) { + LexerActionType actionType = static_cast(data[p++]); + int data1 = data[p++]; + int data2 = data[p++]; + atn->lexerActions[i] = lexerActionFactory(actionType, data1, data2); } } - markPrecedenceDecisions(atn); + markPrecedenceDecisions(*atn); - if (deserializationOptions.isVerifyATN()) { - verifyATN(atn); + if (_deserializationOptions.isVerifyATN()) { + verifyATN(*atn); } - if (deserializationOptions.isGenerateRuleBypassTransitions() && atn.grammarType == ATNType::PARSER) { - atn.ruleToTokenType.resize(atn.ruleToStartState.size()); - for (size_t i = 0; i < atn.ruleToStartState.size(); i++) { - atn.ruleToTokenType[i] = int(atn.maxTokenType + i + 1); + if (_deserializationOptions.isGenerateRuleBypassTransitions() && atn->grammarType == ATNType::PARSER) { + atn->ruleToTokenType.resize(atn->ruleToStartState.size()); + for (size_t i = 0; i < atn->ruleToStartState.size(); i++) { + atn->ruleToTokenType[i] = static_cast(atn->maxTokenType + i + 1); } - for (std::vector::size_type i = 0; i < atn.ruleToStartState.size(); i++) { + for (std::vector::size_type i = 0; i < atn->ruleToStartState.size(); i++) { BasicBlockStartState *bypassStart = new BasicBlockStartState(); /* mem check: freed in ATN d-tor */ - bypassStart->ruleIndex = (int)i; - atn.addState(bypassStart); + bypassStart->ruleIndex = static_cast(i); + atn->addState(bypassStart); BlockEndState *bypassStop = new BlockEndState(); /* mem check: freed in ATN d-tor */ - bypassStop->ruleIndex = (int)i; - atn.addState(bypassStop); + bypassStop->ruleIndex = static_cast(i); + atn->addState(bypassStop); bypassStart->endState = bypassStop; - atn.defineDecisionState(bypassStart); + atn->defineDecisionState(bypassStart); bypassStop->startState = bypassStart; ATNState *endState; - Transition *excludeTransition = nullptr; - if (atn.ruleToStartState[i]->isLeftRecursiveRule) { + const Transition *excludeTransition = nullptr; + if (atn->ruleToStartState[i]->isLeftRecursiveRule) { // wrap from the beginning of the rule to the StarLoopEntryState endState = nullptr; - for (ATNState *state : atn.states) { + for (ATNState *state : atn->states) { if (state->ruleIndex != i) { continue; } - if (!is(state)) { + if (!StarLoopEntryState::is(state)) { continue; } ATNState *maybeLoopEndState = state->transitions[state->transitions.size() - 1]->target; - if (!is(maybeLoopEndState)) { + if (!LoopEndState::is(maybeLoopEndState)) { continue; } - if (maybeLoopEndState->epsilonOnlyTransitions && is(maybeLoopEndState->transitions[0]->target)) { + if (maybeLoopEndState->epsilonOnlyTransitions && RuleStopState::is(maybeLoopEndState->transitions[0]->target)) { endState = state; break; } @@ -489,78 +531,50 @@ ATN ATNDeserializer::deserialize(const std::vector& input) { } - excludeTransition = (static_cast(endState))->loopBackState->transitions[0]; + excludeTransition = (static_cast(endState))->loopBackState->transitions[0].get(); } else { - endState = atn.ruleToStopState[i]; + endState = atn->ruleToStopState[i]; } // all non-excluded transitions that currently target end state need to target blockEnd instead - for (ATNState *state : atn.states) { - for (Transition *transition : state->transitions) { - if (transition == excludeTransition) { + for (ATNState *state : atn->states) { + for (auto &transition : state->transitions) { + if (transition.get() == excludeTransition) { continue; } if (transition->target == endState) { - transition->target = bypassStop; + const_cast(transition.get())->target = bypassStop; } } } // all transitions leaving the rule start state need to leave blockStart instead - while (atn.ruleToStartState[i]->transitions.size() > 0) { - Transition *transition = atn.ruleToStartState[i]->removeTransition(atn.ruleToStartState[i]->transitions.size() - 1); - bypassStart->addTransition(transition); + while (atn->ruleToStartState[i]->transitions.size() > 0) { + ConstTransitionPtr transition = atn->ruleToStartState[i]->removeTransition(atn->ruleToStartState[i]->transitions.size() - 1); + bypassStart->addTransition(std::move(transition)); } // link the new states - atn.ruleToStartState[i]->addTransition(new EpsilonTransition(bypassStart)); /* mem check: freed in ATNState d-tor */ - bypassStop->addTransition(new EpsilonTransition(endState)); /* mem check: freed in ATNState d-tor */ + atn->ruleToStartState[i]->addTransition(std::make_unique(bypassStart)); + bypassStop->addTransition(std::make_unique(endState)); ATNState *matchState = new BasicState(); /* mem check: freed in ATN d-tor */ - atn.addState(matchState); - matchState->addTransition(new AtomTransition(bypassStop, atn.ruleToTokenType[i])); /* mem check: freed in ATNState d-tor */ - bypassStart->addTransition(new EpsilonTransition(matchState)); /* mem check: freed in ATNState d-tor */ + atn->addState(matchState); + matchState->addTransition(std::make_unique(bypassStop, atn->ruleToTokenType[i])); + bypassStart->addTransition(std::make_unique(matchState)); } - if (deserializationOptions.isVerifyATN()) { + if (_deserializationOptions.isVerifyATN()) { // reverify after modification - verifyATN(atn); + verifyATN(*atn); } } return atn; } -/** - * Analyze the {@link StarLoopEntryState} states in the specified ATN to set - * the {@link StarLoopEntryState#isPrecedenceDecision} field to the - * correct value. - * - * @param atn The ATN. - */ -void ATNDeserializer::markPrecedenceDecisions(const ATN &atn) { - for (ATNState *state : atn.states) { - if (!is(state)) { - continue; - } - - /* We analyze the ATN to determine if this ATN decision state is the - * decision for the closure block that determines whether a - * precedence rule should continue or complete. - */ - if (atn.ruleToStartState[state->ruleIndex]->isLeftRecursiveRule) { - ATNState *maybeLoopEndState = state->transitions[state->transitions.size() - 1]->target; - if (is(maybeLoopEndState)) { - if (maybeLoopEndState->epsilonOnlyTransitions && is(maybeLoopEndState->transitions[0]->target)) { - static_cast(state)->isPrecedenceDecision = true; - } - } - } - } -} - -void ATNDeserializer::verifyATN(const ATN &atn) { +void ATNDeserializer::verifyATN(const ATN &atn) const { // verify assumptions for (ATNState *state : atn.states) { if (state == nullptr) { @@ -569,190 +583,52 @@ void ATNDeserializer::verifyATN(const ATN &atn) { checkCondition(state->epsilonOnlyTransitions || state->transitions.size() <= 1); - if (is(state)) { - checkCondition((static_cast(state))->loopBackState != nullptr); + if (PlusBlockStartState::is(state)) { + checkCondition((downCast(state))->loopBackState != nullptr); } - if (is(state)) { - StarLoopEntryState *starLoopEntryState = static_cast(state); + if (StarLoopEntryState::is(state)) { + StarLoopEntryState *starLoopEntryState = downCast(state); checkCondition(starLoopEntryState->loopBackState != nullptr); checkCondition(starLoopEntryState->transitions.size() == 2); - if (is(starLoopEntryState->transitions[0]->target)) { - checkCondition(static_cast(starLoopEntryState->transitions[1]->target) != nullptr); + if (StarBlockStartState::is(starLoopEntryState->transitions[0]->target)) { + checkCondition(downCast(starLoopEntryState->transitions[1]->target) != nullptr); checkCondition(!starLoopEntryState->nonGreedy); - } else if (is(starLoopEntryState->transitions[0]->target)) { - checkCondition(is(starLoopEntryState->transitions[1]->target)); + } else if (LoopEndState::is(starLoopEntryState->transitions[0]->target)) { + checkCondition(StarBlockStartState::is(starLoopEntryState->transitions[1]->target)); checkCondition(starLoopEntryState->nonGreedy); } else { throw IllegalStateException(); - } } - if (is(state)) { + if (StarLoopbackState::is(state)) { checkCondition(state->transitions.size() == 1); - checkCondition(is(state->transitions[0]->target)); + checkCondition(StarLoopEntryState::is(state->transitions[0]->target)); } - if (is(state)) { - checkCondition((static_cast(state))->loopBackState != nullptr); + if (LoopEndState::is(state)) { + checkCondition((downCast(state))->loopBackState != nullptr); } - if (is(state)) { - checkCondition((static_cast(state))->stopState != nullptr); + if (RuleStartState::is(state)) { + checkCondition((downCast(state))->stopState != nullptr); } - if (is(state)) { - checkCondition((static_cast(state))->endState != nullptr); + if (BlockStartState::is(state)) { + checkCondition((downCast(state))->endState != nullptr); } - if (is(state)) { - checkCondition((static_cast(state))->startState != nullptr); + if (BlockEndState::is(state)) { + checkCondition((downCast(state))->startState != nullptr); } - if (is(state)) { - DecisionState *decisionState = static_cast(state); + if (DecisionState::is(state)) { + DecisionState *decisionState = downCast(state); checkCondition(decisionState->transitions.size() <= 1 || decisionState->decision >= 0); } else { - checkCondition(state->transitions.size() <= 1 || is(state)); + checkCondition(state->transitions.size() <= 1 || RuleStopState::is(state)); } } } - -void ATNDeserializer::checkCondition(bool condition) { - checkCondition(condition, ""); -} - -void ATNDeserializer::checkCondition(bool condition, const std::string &message) { - if (!condition) { - throw IllegalStateException(message); - } -} - -Guid ATNDeserializer::toUUID(const unsigned short *data, size_t offset) { - return Guid((uint16_t *)data + offset, true); -} - -/* mem check: all created instances are freed in the d-tor of the ATNState they are added to. */ -Transition *ATNDeserializer::edgeFactory(const ATN &atn, size_t type, size_t /*src*/, size_t trg, size_t arg1, - size_t arg2, size_t arg3, - const std::vector &sets) { - - ATNState *target = atn.states[trg]; - switch (type) { - case Transition::EPSILON: - return new EpsilonTransition(target); - case Transition::RANGE: - if (arg3 != 0) { - return new RangeTransition(target, Token::EOF, arg2); - } else { - return new RangeTransition(target, arg1, arg2); - } - case Transition::RULE: - return new RuleTransition(static_cast(atn.states[arg1]), arg2, (int)arg3, target); - case Transition::PREDICATE: - return new PredicateTransition(target, arg1, arg2, arg3 != 0); - case Transition::PRECEDENCE: - return new PrecedencePredicateTransition(target, (int)arg1); - case Transition::ATOM: - if (arg3 != 0) { - return new AtomTransition(target, Token::EOF); - } else { - return new AtomTransition(target, arg1); - } - case Transition::ACTION: - return new ActionTransition(target, arg1, arg2, arg3 != 0); - case Transition::SET: - return new SetTransition(target, sets[arg1]); - case Transition::NOT_SET: - return new NotSetTransition(target, sets[arg1]); - case Transition::WILDCARD: - return new WildcardTransition(target); - } - - throw IllegalArgumentException("The specified transition type is not valid."); -} - -/* mem check: all created instances are freed in the d-tor of the ATN. */ -ATNState* ATNDeserializer::stateFactory(size_t type, size_t ruleIndex) { - ATNState *s; - switch (type) { - case ATNState::ATN_INVALID_TYPE: - return nullptr; - case ATNState::BASIC : - s = new BasicState(); - break; - case ATNState::RULE_START : - s = new RuleStartState(); - break; - case ATNState::BLOCK_START : - s = new BasicBlockStartState(); - break; - case ATNState::PLUS_BLOCK_START : - s = new PlusBlockStartState(); - break; - case ATNState::STAR_BLOCK_START : - s = new StarBlockStartState(); - break; - case ATNState::TOKEN_START : - s = new TokensStartState(); - break; - case ATNState::RULE_STOP : - s = new RuleStopState(); - break; - case ATNState::BLOCK_END : - s = new BlockEndState(); - break; - case ATNState::STAR_LOOP_BACK : - s = new StarLoopbackState(); - break; - case ATNState::STAR_LOOP_ENTRY : - s = new StarLoopEntryState(); - break; - case ATNState::PLUS_LOOP_BACK : - s = new PlusLoopbackState(); - break; - case ATNState::LOOP_END : - s = new LoopEndState(); - break; - default : - std::string message = "The specified state type " + std::to_string(type) + " is not valid."; - throw IllegalArgumentException(message); - } - - s->ruleIndex = ruleIndex; - return s; -} - -Ref ATNDeserializer::lexerActionFactory(LexerActionType type, int data1, int data2) { - switch (type) { - case LexerActionType::CHANNEL: - return std::make_shared(data1); - - case LexerActionType::CUSTOM: - return std::make_shared(data1, data2); - - case LexerActionType::MODE: - return std::make_shared< LexerModeAction>(data1); - - case LexerActionType::MORE: - return LexerMoreAction::getInstance(); - - case LexerActionType::POP_MODE: - return LexerPopModeAction::getInstance(); - - case LexerActionType::PUSH_MODE: - return std::make_shared(data1); - - case LexerActionType::SKIP: - return LexerSkipAction::getInstance(); - - case LexerActionType::TYPE: - return std::make_shared(data1); - - default: - throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast(type)) + - " is not valid."); - } -} diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.h b/runtime/Cpp/runtime/src/atn/ATNDeserializer.h old mode 100755 new mode 100644 index 621e03db7a..bc36b424d1 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.h +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.h @@ -5,80 +5,30 @@ #pragma once -#include "atn/LexerAction.h" +#include +#include #include "atn/ATNDeserializationOptions.h" +#include "antlr4-common.h" +#include "atn/SerializedATNView.h" +#include "atn/LexerAction.h" +#include "atn/Transition.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC ATNDeserializer { + class ANTLR4CPP_PUBLIC ATNDeserializer final { public: - static const size_t SERIALIZED_VERSION; - - /// This is the current serialized UUID. - // ml: defined as function to avoid the “static initialization order fiasco”. - static Guid SERIALIZED_UUID(); + static constexpr size_t SERIALIZED_VERSION = 4; ATNDeserializer(); - ATNDeserializer(const ATNDeserializationOptions& dso); - virtual ~ATNDeserializer(); - - static Guid toUUID(const unsigned short *data, size_t offset); - - virtual ATN deserialize(const std::vector &input); - virtual void verifyATN(const ATN &atn); - - static void checkCondition(bool condition); - static void checkCondition(bool condition, const std::string &message); - static Transition *edgeFactory(const ATN &atn, size_t type, size_t src, size_t trg, size_t arg1, size_t arg2, - size_t arg3, const std::vector &sets); + explicit ATNDeserializer(ATNDeserializationOptions deserializationOptions); - static ATNState *stateFactory(size_t type, size_t ruleIndex); - - protected: - /// Determines if a particular serialized representation of an ATN supports - /// a particular feature, identified by the used for serializing - /// the ATN at the time the feature was first introduced. - /// - /// The marking the first time the feature was - /// supported in the serialized ATN. - /// The of the actual serialized ATN which is - /// currently being deserialized. - /// {@code true} if the {@code actualUuid} value represents a - /// serialized ATN at or after the feature identified by {@code feature} was - /// introduced; otherwise, {@code false}. - virtual bool isFeatureSupported(const Guid &feature, const Guid &actualUuid); - void markPrecedenceDecisions(const ATN &atn); - Ref lexerActionFactory(LexerActionType type, int data1, int data2); + std::unique_ptr deserialize(SerializedATNView input) const; + void verifyATN(const ATN &atn) const; private: - /// This is the earliest supported serialized UUID. - static Guid BASE_SERIALIZED_UUID(); - - /// This UUID indicates an extension of for the - /// addition of precedence predicates. - static Guid ADDED_PRECEDENCE_TRANSITIONS(); - - /** - * This UUID indicates an extension of ADDED_PRECEDENCE_TRANSITIONS - * for the addition of lexer actions encoded as a sequence of - * LexerAction instances. - */ - static Guid ADDED_LEXER_ACTIONS(); - - /** - * This UUID indicates the serialized ATN contains two sets of - * IntervalSets, where the second set's values are encoded as - * 32-bit integers to support the full Unicode SMP range up to U+10FFFF. - */ - static Guid ADDED_UNICODE_SMP(); - - /// This list contains all of the currently supported UUIDs, ordered by when - /// the feature first appeared in this branch. - static std::vector& SUPPORTED_UUIDS(); - - ATNDeserializationOptions deserializationOptions; + const ATNDeserializationOptions _deserializationOptions; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp deleted file mode 100755 index 206c742813..0000000000 --- a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp +++ /dev/null @@ -1,621 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "misc/IntervalSet.h" -#include "atn/ATNType.h" -#include "atn/ATNState.h" -#include "atn/BlockEndState.h" - -#include "atn/DecisionState.h" -#include "atn/RuleStartState.h" -#include "atn/LoopEndState.h" -#include "atn/BlockStartState.h" -#include "atn/Transition.h" -#include "atn/SetTransition.h" -#include "Token.h" -#include "misc/Interval.h" -#include "atn/ATN.h" - -#include "atn/RuleTransition.h" -#include "atn/PrecedencePredicateTransition.h" -#include "atn/PredicateTransition.h" -#include "atn/RangeTransition.h" -#include "atn/AtomTransition.h" -#include "atn/ActionTransition.h" -#include "atn/ATNDeserializer.h" - -#include "atn/TokensStartState.h" -#include "Exceptions.h" -#include "support/CPPUtils.h" - -#include "atn/LexerChannelAction.h" -#include "atn/LexerCustomAction.h" -#include "atn/LexerModeAction.h" -#include "atn/LexerPushModeAction.h" -#include "atn/LexerTypeAction.h" - -#include "Exceptions.h" - -#include "atn/ATNSerializer.h" - -using namespace antlrcpp; -using namespace antlr4::atn; - -ATNSerializer::ATNSerializer(ATN *atn) { this->atn = atn; } - -ATNSerializer::ATNSerializer(ATN *atn, const std::vector &tokenNames) { - this->atn = atn; - _tokenNames = tokenNames; -} - -ATNSerializer::~ATNSerializer() { } - -std::vector ATNSerializer::serialize() { - std::vector data; - data.push_back(ATNDeserializer::SERIALIZED_VERSION); - serializeUUID(data, ATNDeserializer::SERIALIZED_UUID()); - - // convert grammar type to ATN const to avoid dependence on ANTLRParser - data.push_back(static_cast(atn->grammarType)); - data.push_back(atn->maxTokenType); - size_t nedges = 0; - - std::unordered_map setIndices; - std::vector sets; - - // dump states, count edges and collect sets while doing so - std::vector nonGreedyStates; - std::vector precedenceStates; - data.push_back(atn->states.size()); - for (ATNState *s : atn->states) { - if (s == nullptr) { // might be optimized away - data.push_back(ATNState::ATN_INVALID_TYPE); - continue; - } - - size_t stateType = s->getStateType(); - if (is(s) && (static_cast(s))->nonGreedy) { - nonGreedyStates.push_back(s->stateNumber); - } - - if (is(s) && (static_cast(s))->isLeftRecursiveRule) { - precedenceStates.push_back(s->stateNumber); - } - - data.push_back(stateType); - - if (s->ruleIndex == INVALID_INDEX) { - data.push_back(0xFFFF); - } - else { - data.push_back(s->ruleIndex); - } - - if (s->getStateType() == ATNState::LOOP_END) { - data.push_back((static_cast(s))->loopBackState->stateNumber); - } - else if (is(s)) { - data.push_back((static_cast(s))->endState->stateNumber); - } - - if (s->getStateType() != ATNState::RULE_STOP) { - // the deserializer can trivially derive these edges, so there's no need - // to serialize them - nedges += s->transitions.size(); - } - - for (size_t i = 0; i < s->transitions.size(); i++) { - Transition *t = s->transitions[i]; - Transition::SerializationType edgeType = t->getSerializationType(); - if (edgeType == Transition::SET || edgeType == Transition::NOT_SET) { - SetTransition *st = static_cast(t); - if (setIndices.find(st->set) == setIndices.end()) { - sets.push_back(st->set); - setIndices.insert({ st->set, (int)sets.size() - 1 }); - } - } - } - } - - // non-greedy states - data.push_back(nonGreedyStates.size()); - for (size_t i = 0; i < nonGreedyStates.size(); i++) { - data.push_back(nonGreedyStates.at(i)); - } - - // precedence states - data.push_back(precedenceStates.size()); - for (size_t i = 0; i < precedenceStates.size(); i++) { - data.push_back(precedenceStates.at(i)); - } - - size_t nrules = atn->ruleToStartState.size(); - data.push_back(nrules); - for (size_t r = 0; r < nrules; r++) { - ATNState *ruleStartState = atn->ruleToStartState[r]; - data.push_back(ruleStartState->stateNumber); - if (atn->grammarType == ATNType::LEXER) { - if (atn->ruleToTokenType[r] == Token::EOF) { - data.push_back(0xFFFF); - } - else { - data.push_back(atn->ruleToTokenType[r]); - } - } - } - - size_t nmodes = atn->modeToStartState.size(); - data.push_back(nmodes); - if (nmodes > 0) { - for (const auto &modeStartState : atn->modeToStartState) { - data.push_back(modeStartState->stateNumber); - } - } - - size_t nsets = sets.size(); - data.push_back(nsets); - for (auto set : sets) { - bool containsEof = set.contains(Token::EOF); - if (containsEof && set.getIntervals().at(0).b == -1) { - data.push_back(set.getIntervals().size() - 1); - } - else { - data.push_back(set.getIntervals().size()); - } - - data.push_back(containsEof ? 1 : 0); - for (auto &interval : set.getIntervals()) { - if (interval.a == -1) { - if (interval.b == -1) { - continue; - } else { - data.push_back(0); - } - } - else { - data.push_back(interval.a); - } - - data.push_back(interval.b); - } - } - - data.push_back(nedges); - for (ATNState *s : atn->states) { - if (s == nullptr) { - // might be optimized away - continue; - } - - if (s->getStateType() == ATNState::RULE_STOP) { - continue; - } - - for (size_t i = 0; i < s->transitions.size(); i++) { - Transition *t = s->transitions[i]; - - if (atn->states[t->target->stateNumber] == nullptr) { - throw IllegalStateException("Cannot serialize a transition to a removed state."); - } - - size_t src = s->stateNumber; - size_t trg = t->target->stateNumber; - Transition::SerializationType edgeType = t->getSerializationType(); - size_t arg1 = 0; - size_t arg2 = 0; - size_t arg3 = 0; - switch (edgeType) { - case Transition::RULE: - trg = (static_cast(t))->followState->stateNumber; - arg1 = (static_cast(t))->target->stateNumber; - arg2 = (static_cast(t))->ruleIndex; - arg3 = (static_cast(t))->precedence; - break; - case Transition::PRECEDENCE: - { - PrecedencePredicateTransition *ppt = - static_cast(t); - arg1 = ppt->precedence; - } - break; - case Transition::PREDICATE: - { - PredicateTransition *pt = static_cast(t); - arg1 = pt->ruleIndex; - arg2 = pt->predIndex; - arg3 = pt->isCtxDependent ? 1 : 0; - } - break; - case Transition::RANGE: - arg1 = (static_cast(t))->from; - arg2 = (static_cast(t))->to; - if (arg1 == Token::EOF) { - arg1 = 0; - arg3 = 1; - } - - break; - case Transition::ATOM: - arg1 = (static_cast(t))->_label; - if (arg1 == Token::EOF) { - arg1 = 0; - arg3 = 1; - } - - break; - case Transition::ACTION: - { - ActionTransition *at = static_cast(t); - arg1 = at->ruleIndex; - arg2 = at->actionIndex; - if (arg2 == INVALID_INDEX) { - arg2 = 0xFFFF; - } - - arg3 = at->isCtxDependent ? 1 : 0; - } - break; - case Transition::SET: - arg1 = setIndices[(static_cast(t))->set]; - break; - - case Transition::NOT_SET: - arg1 = setIndices[(static_cast(t))->set]; - break; - - default: - break; - } - - data.push_back(src); - data.push_back(trg); - data.push_back(edgeType); - data.push_back(arg1); - data.push_back(arg2); - data.push_back(arg3); - } - } - - size_t ndecisions = atn->decisionToState.size(); - data.push_back(ndecisions); - for (DecisionState *decStartState : atn->decisionToState) { - data.push_back(decStartState->stateNumber); - } - - // LEXER ACTIONS - if (atn->grammarType == ATNType::LEXER) { - data.push_back(atn->lexerActions.size()); - for (Ref &action : atn->lexerActions) { - data.push_back(static_cast(action->getActionType())); - switch (action->getActionType()) { - case LexerActionType::CHANNEL: - { - int channel = std::dynamic_pointer_cast(action)->getChannel(); - data.push_back(channel != -1 ? channel : 0xFFFF); - data.push_back(0); - break; - } - - case LexerActionType::CUSTOM: - { - size_t ruleIndex = std::dynamic_pointer_cast(action)->getRuleIndex(); - size_t actionIndex = std::dynamic_pointer_cast(action)->getActionIndex(); - data.push_back(ruleIndex != INVALID_INDEX ? ruleIndex : 0xFFFF); - data.push_back(actionIndex != INVALID_INDEX ? actionIndex : 0xFFFF); - break; - } - - case LexerActionType::MODE: - { - int mode = std::dynamic_pointer_cast(action)->getMode(); - data.push_back(mode != -1 ? mode : 0xFFFF); - data.push_back(0); - break; - } - - case LexerActionType::MORE: - data.push_back(0); - data.push_back(0); - break; - - case LexerActionType::POP_MODE: - data.push_back(0); - data.push_back(0); - break; - - case LexerActionType::PUSH_MODE: - { - int mode = std::dynamic_pointer_cast(action)->getMode(); - data.push_back(mode != -1 ? mode : 0xFFFF); - data.push_back(0); - break; - } - - case LexerActionType::SKIP: - data.push_back(0); - data.push_back(0); - break; - - case LexerActionType::TYPE: - { - int type = std::dynamic_pointer_cast(action)->getType(); - data.push_back(type != -1 ? type : 0xFFFF); - data.push_back(0); - break; - } - - default: - throw IllegalArgumentException("The specified lexer action type " + - std::to_string(static_cast(action->getActionType())) + - " is not valid."); - } - } - } - - // don't adjust the first value since that's the version number - for (size_t i = 1; i < data.size(); i++) { - if (data.at(i) > 0xFFFF) { - throw UnsupportedOperationException("Serialized ATN data element out of range."); - } - - size_t value = (data.at(i) + 2) & 0xFFFF; - data.at(i) = value; - } - - return data; -} - -//------------------------------------------------------------------------------------------------------------ - -std::string ATNSerializer::decode(const std::wstring &inpdata) { - if (inpdata.size() < 10) - throw IllegalArgumentException("Not enough data to decode"); - - std::vector data(inpdata.size()); - data[0] = (uint16_t)inpdata[0]; - - // Don't adjust the first value since that's the version number. - for (size_t i = 1; i < inpdata.size(); ++i) { - data[i] = (uint16_t)inpdata[i] - 2; - } - - std::string buf; - size_t p = 0; - size_t version = data[p++]; - if (version != ATNDeserializer::SERIALIZED_VERSION) { - std::string reason = "Could not deserialize ATN with version " + std::to_string(version) + "(expected " + - std::to_string(ATNDeserializer::SERIALIZED_VERSION) + ")."; - throw UnsupportedOperationException("ATN Serializer" + reason); - } - - Guid uuid = ATNDeserializer::toUUID(data.data(), p); - p += 8; - if (uuid != ATNDeserializer::SERIALIZED_UUID()) { - std::string reason = "Could not deserialize ATN with UUID " + uuid.toString() + " (expected " + - ATNDeserializer::SERIALIZED_UUID().toString() + ")."; - throw UnsupportedOperationException("ATN Serializer" + reason); - } - - p++; // skip grammarType - size_t maxType = data[p++]; - buf.append("max type ").append(std::to_string(maxType)).append("\n"); - size_t nstates = data[p++]; - for (size_t i = 0; i < nstates; i++) { - size_t stype = data[p++]; - if (stype == ATNState::ATN_INVALID_TYPE) { // ignore bad type of states - continue; - } - size_t ruleIndex = data[p++]; - if (ruleIndex == 0xFFFF) { - ruleIndex = INVALID_INDEX; - } - - std::string arg = ""; - if (stype == ATNState::LOOP_END) { - int loopBackStateNumber = data[p++]; - arg = std::string(" ") + std::to_string(loopBackStateNumber); - } - else if (stype == ATNState::PLUS_BLOCK_START || - stype == ATNState::STAR_BLOCK_START || - stype == ATNState::BLOCK_START) { - int endStateNumber = data[p++]; - arg = std::string(" ") + std::to_string(endStateNumber); - } - buf.append(std::to_string(i)) - .append(":") - .append(ATNState::serializationNames[stype]) - .append(" ") - .append(std::to_string(ruleIndex)) - .append(arg) - .append("\n"); - } - size_t numNonGreedyStates = data[p++]; - p += numNonGreedyStates; // Instead of that useless loop below. - /* - for (int i = 0; i < numNonGreedyStates; i++) { - int stateNumber = data[p++]; - } - */ - - size_t numPrecedenceStates = data[p++]; - p += numPrecedenceStates; - /* - for (int i = 0; i < numPrecedenceStates; i++) { - int stateNumber = data[p++]; - } - */ - - size_t nrules = data[p++]; - for (size_t i = 0; i < nrules; i++) { - size_t s = data[p++]; - if (atn->grammarType == ATNType::LEXER) { - size_t arg1 = data[p++]; - buf.append("rule ") - .append(std::to_string(i)) - .append(":") - .append(std::to_string(s)) - .append(" ") - .append(std::to_string(arg1)) - .append("\n"); - } - else { - buf.append("rule ") - .append(std::to_string(i)) - .append(":") - .append(std::to_string(s)) - .append("\n"); - } - } - size_t nmodes = data[p++]; - for (size_t i = 0; i < nmodes; i++) { - size_t s = data[p++]; - buf.append("mode ") - .append(std::to_string(i)) - .append(":") - .append(std::to_string(s)) - .append("\n"); - } - size_t nsets = data[p++]; - for (size_t i = 0; i < nsets; i++) { - size_t nintervals = data[p++]; - buf.append(std::to_string(i)).append(":"); - bool containsEof = data[p++] != 0; - if (containsEof) { - buf.append(getTokenName(Token::EOF)); - } - - for (size_t j = 0; j < nintervals; j++) { - if (containsEof || j > 0) { - buf.append(", "); - } - - buf.append(getTokenName(data[p])) - .append("..") - .append(getTokenName(data[p + 1])); - p += 2; - } - buf.append("\n"); - } - size_t nedges = data[p++]; - for (size_t i = 0; i < nedges; i++) { - size_t src = data[p]; - size_t trg = data[p + 1]; - size_t ttype = data[p + 2]; - size_t arg1 = data[p + 3]; - size_t arg2 = data[p + 4]; - size_t arg3 = data[p + 5]; - buf.append(std::to_string(src)) - .append("->") - .append(std::to_string(trg)) - .append(" ") - .append(Transition::serializationNames[ttype]) - .append(" ") - .append(std::to_string(arg1)) - .append(",") - .append(std::to_string(arg2)) - .append(",") - .append(std::to_string(arg3)) - .append("\n"); - p += 6; - } - size_t ndecisions = data[p++]; - for (size_t i = 0; i < ndecisions; i++) { - size_t s = data[p++]; - buf += std::to_string(i) + ":" + std::to_string(s) + "\n"; - } - - if (atn->grammarType == ATNType::LEXER) { - //int lexerActionCount = data[p++]; - - //p += lexerActionCount * 3; // Instead of useless loop below. - /* - for (int i = 0; i < lexerActionCount; i++) { - LexerActionType actionType = (LexerActionType)data[p++]; - int data1 = data[p++]; - int data2 = data[p++]; - } - */ - } - - return buf; -} - -std::string ATNSerializer::getTokenName(size_t t) { - if (t == Token::EOF) { - return "EOF"; - } - - if (atn->grammarType == ATNType::LEXER && t <= 0x10FFFF) { - switch (t) { - case '\n': - return "'\\n'"; - case '\r': - return "'\\r'"; - case '\t': - return "'\\t'"; - case '\b': - return "'\\b'"; - case '\f': - return "'\\f'"; - case '\\': - return "'\\\\'"; - case '\'': - return "'\\''"; - default: - std::string s_hex = antlrcpp::toHexString((int)t); - if (s_hex >= "0" && s_hex <= "7F" && !iscntrl((int)t)) { - return "'" + std::to_string(t) + "'"; - } - - // turn on the bit above max "\u10FFFF" value so that we pad with zeros - // then only take last 6 digits - std::string hex = antlrcpp::toHexString((int)t | 0x1000000).substr(1, 6); - std::string unicodeStr = std::string("'\\u") + hex + std::string("'"); - return unicodeStr; - } - } - - if (_tokenNames.size() > 0 && t < _tokenNames.size()) { - return _tokenNames[t]; - } - - return std::to_string(t); -} - -std::wstring ATNSerializer::getSerializedAsString(ATN *atn) { - std::vector data = getSerialized(atn); - std::wstring result; - for (size_t entry : data) - result.push_back((wchar_t)entry); - - return result; -} - -std::vector ATNSerializer::getSerialized(ATN *atn) { - return ATNSerializer(atn).serialize(); -} - -std::string ATNSerializer::getDecoded(ATN *atn, std::vector &tokenNames) { - std::wstring serialized = getSerializedAsString(atn); - return ATNSerializer(atn, tokenNames).decode(serialized); -} - -void ATNSerializer::serializeUUID(std::vector &data, Guid uuid) { - unsigned int twoBytes = 0; - bool firstByte = true; - for( std::vector::const_reverse_iterator rit = uuid.rbegin(); rit != uuid.rend(); ++rit ) - { - if (firstByte) { - twoBytes = *rit; - firstByte = false; - } else { - twoBytes |= (*rit << 8); - data.push_back(twoBytes); - firstByte = true; - } - } - if (!firstByte) - throw IllegalArgumentException( "The UUID provided is not valid (odd number of bytes)." ); -} diff --git a/runtime/Cpp/runtime/src/atn/ATNSerializer.h b/runtime/Cpp/runtime/src/atn/ATNSerializer.h deleted file mode 100755 index a6d1d6976f..0000000000 --- a/runtime/Cpp/runtime/src/atn/ATNSerializer.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#pragma once - -namespace antlr4 { -namespace atn { - - class ANTLR4CPP_PUBLIC ATNSerializer { - public: - ATN *atn; - - ATNSerializer(ATN *atn); - ATNSerializer(ATN *atn, const std::vector &tokenNames); - virtual ~ATNSerializer(); - - /// - /// Serialize state descriptors, edge descriptors, and decision->state map - /// into list of ints: - /// - /// grammar-type, (ANTLRParser.LEXER, ...) - /// max token type, - /// num states, - /// state-0-type ruleIndex, state-1-type ruleIndex, ... state-i-type - /// ruleIndex optional-arg ... - /// num rules, - /// rule-1-start-state rule-1-args, rule-2-start-state rule-2-args, ... - /// (args are token type,actionIndex in lexer else 0,0) - /// num modes, - /// mode-0-start-state, mode-1-start-state, ... (parser has 0 modes) - /// num sets - /// set-0-interval-count intervals, set-1-interval-count intervals, ... - /// num total edges, - /// src, trg, edge-type, edge arg1, optional edge arg2 (present always), - /// ... - /// num decisions, - /// decision-0-start-state, decision-1-start-state, ... - /// - /// Convenient to pack into unsigned shorts to make as Java string. - /// - virtual std::vector serialize(); - - virtual std::string decode(const std::wstring& data); - virtual std::string getTokenName(size_t t); - - /// Used by Java target to encode short/int array as chars in string. - static std::wstring getSerializedAsString(ATN *atn); - static std::vector getSerialized(ATN *atn); - - static std::string getDecoded(ATN *atn, std::vector &tokenNames); - - private: - std::vector _tokenNames; - - void serializeUUID(std::vector &data, Guid uuid); - }; - -} // namespace atn -} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp old mode 100755 new mode 100644 index 29570b90db..3affe85815 --- a/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp @@ -3,61 +3,34 @@ * can be found in the LICENSE.txt file in the project root. */ -#include "atn/ATNType.h" +#include +#include +#include "atn/ATNSimulator.h" + +#include "antlr4-common.h" #include "atn/ATNConfigSet.h" -#include "dfa/DFAState.h" #include "atn/ATNDeserializer.h" -#include "atn/EmptyPredictionContext.h" - -#include "atn/ATNSimulator.h" +#include "atn/ATNType.h" +#include "dfa/DFAState.h" using namespace antlr4; using namespace antlr4::dfa; using namespace antlr4::atn; -const Ref ATNSimulator::ERROR = std::make_shared(INT32_MAX); -antlrcpp::SingleWriteMultipleReadLock ATNSimulator::_stateLock; -antlrcpp::SingleWriteMultipleReadLock ATNSimulator::_edgeLock; +const Ref ATNSimulator::ERROR = std::make_shared(std::numeric_limits::max()); ATNSimulator::ATNSimulator(const ATN &atn, PredictionContextCache &sharedContextCache) -: atn(atn), _sharedContextCache(sharedContextCache) { -} - -ATNSimulator::~ATNSimulator() { -} + : atn(atn), _sharedContextCache(sharedContextCache) {} void ATNSimulator::clearDFA() { throw UnsupportedOperationException("This ATN simulator does not support clearing the DFA."); } -PredictionContextCache& ATNSimulator::getSharedContextCache() { +PredictionContextCache& ATNSimulator::getSharedContextCache() const { return _sharedContextCache; } -Ref ATNSimulator::getCachedContext(Ref const& context) { +Ref ATNSimulator::getCachedContext(const Ref &context) { // This function must only be called with an active state lock, as we are going to change a shared structure. - std::map, Ref> visited; - return PredictionContext::getCachedContext(context, _sharedContextCache, visited); -} - -ATN ATNSimulator::deserialize(const std::vector &data) { - ATNDeserializer deserializer; - return deserializer.deserialize(data); -} - -void ATNSimulator::checkCondition(bool condition) { - ATNDeserializer::checkCondition(condition); -} - -void ATNSimulator::checkCondition(bool condition, const std::string &message) { - ATNDeserializer::checkCondition(condition, message); -} - -Transition *ATNSimulator::edgeFactory(const ATN &atn, int type, int src, int trg, int arg1, int arg2, int arg3, - const std::vector &sets) { - return ATNDeserializer::edgeFactory(atn, type, src, trg, arg1, arg2, arg3, sets); -} - -ATNState *ATNSimulator::stateFactory(int type, int ruleIndex) { - return ATNDeserializer::stateFactory(type, ruleIndex); + return PredictionContext::getCachedContext(context, getSharedContextCache()); } diff --git a/runtime/Cpp/runtime/src/atn/ATNSimulator.h b/runtime/Cpp/runtime/src/atn/ATNSimulator.h old mode 100755 new mode 100644 index f702c97f90..aee01b9df9 --- a/runtime/Cpp/runtime/src/atn/ATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/ATNSimulator.h @@ -6,9 +6,11 @@ #pragma once #include "atn/ATN.h" +#include "antlr4-common.h" +#include "atn/PredictionContext.h" +#include "atn/PredictionContextCache.h" #include "misc/IntervalSet.h" #include "support/CPPUtils.h" -#include "atn/PredictionContext.h" namespace antlr4 { namespace atn { @@ -20,7 +22,8 @@ namespace atn { const ATN &atn; ATNSimulator(const ATN &atn, PredictionContextCache &sharedContextCache); - virtual ~ATNSimulator(); + + virtual ~ATNSimulator() = default; virtual void reset() = 0; @@ -36,29 +39,11 @@ namespace atn { * @since 4.3 */ virtual void clearDFA(); - virtual PredictionContextCache& getSharedContextCache(); - virtual Ref getCachedContext(Ref const& context); - - /// @deprecated Use instead. - static ATN deserialize(const std::vector &data); - /// @deprecated Use instead. - static void checkCondition(bool condition); - - /// @deprecated Use instead. - static void checkCondition(bool condition, const std::string &message); - - /// @deprecated Use instead. - static Transition *edgeFactory(const ATN &atn, int type, int src, int trg, int arg1, int arg2, int arg3, - const std::vector &sets); - - /// @deprecated Use instead. - static ATNState *stateFactory(int type, int ruleIndex); + PredictionContextCache& getSharedContextCache() const; + Ref getCachedContext(const Ref &context); protected: - static antlrcpp::SingleWriteMultipleReadLock _stateLock; // Lock for DFA states. - static antlrcpp::SingleWriteMultipleReadLock _edgeLock; // Lock for the sparse edge map in DFA states. - /// /// The context cache maps all PredictionContext objects that are equals() /// to a single cached copy. This cache is shared across all contexts diff --git a/runtime/Cpp/runtime/src/atn/ATNState.cpp b/runtime/Cpp/runtime/src/atn/ATNState.cpp old mode 100755 new mode 100644 index 9bc074ce0c..a7c00c4ff6 --- a/runtime/Cpp/runtime/src/atn/ATNState.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNState.cpp @@ -3,6 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/ATN.h" #include "atn/Transition.h" #include "misc/IntervalSet.h" @@ -13,30 +16,15 @@ using namespace antlr4::atn; using namespace antlrcpp; -ATNState::ATNState() { -} - -ATNState::~ATNState() { - for (auto transition : transitions) { - delete transition; - } -} - -const std::vector ATNState::serializationNames = { - "INVALID", "BASIC", "RULE_START", "BLOCK_START", - "PLUS_BLOCK_START", "STAR_BLOCK_START", "TOKEN_START", "RULE_STOP", - "BLOCK_END", "STAR_LOOP_BACK", "STAR_LOOP_ENTRY", "PLUS_LOOP_BACK", "LOOP_END" -}; - -size_t ATNState::hashCode() { +size_t ATNState::hashCode() const { return stateNumber; } -bool ATNState::operator == (const ATNState &other) { +bool ATNState::equals(const ATNState &other) const { return stateNumber == other.stateNumber; } -bool ATNState::isNonGreedyExitState() { +bool ATNState::isNonGreedyExitState() const { return false; } @@ -44,14 +32,13 @@ std::string ATNState::toString() const { return std::to_string(stateNumber); } -void ATNState::addTransition(Transition *e) { - addTransition(transitions.size(), e); +void ATNState::addTransition(ConstTransitionPtr e) { + addTransition(transitions.size(), std::move(e)); } -void ATNState::addTransition(size_t index, Transition *e) { - for (Transition *transition : transitions) +void ATNState::addTransition(size_t index, ConstTransitionPtr e) { + for (const auto &transition : transitions) if (transition->target->stateNumber == e->target->stateNumber) { - delete e; return; } @@ -62,11 +49,11 @@ void ATNState::addTransition(size_t index, Transition *e) { epsilonOnlyTransitions = false; } - transitions.insert(transitions.begin() + index, e); + transitions.insert(transitions.begin() + index, std::move(e)); } -Transition *ATNState::removeTransition(size_t index) { - Transition *result = transitions[index]; +ConstTransitionPtr ATNState::removeTransition(size_t index) { + ConstTransitionPtr result = std::move(transitions[index]); transitions.erase(transitions.begin() + index); return result; } diff --git a/runtime/Cpp/runtime/src/atn/ATNState.h b/runtime/Cpp/runtime/src/atn/ATNState.h old mode 100755 new mode 100644 index 6c73d94355..06175ac97a --- a/runtime/Cpp/runtime/src/atn/ATNState.h +++ b/runtime/Cpp/runtime/src/atn/ATNState.h @@ -5,7 +5,14 @@ #pragma once +#include +#include +#include +#include #include "misc/IntervalSet.h" +#include "antlr4-common.h" +#include "atn/Transition.h" +#include "atn/ATNStateType.h" namespace antlr4 { namespace atn { @@ -70,55 +77,53 @@ namespace atn { /// /// /// + +// GCC generates a warning here if ATN has already been declared due to the +// attributes added by ANTLR4CPP_PUBLIC. +// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39159 +// Only forward-declare if it hasn't already been declared. +#ifndef ANTLR4CPP_ATN_DECLARED class ANTLR4CPP_PUBLIC ATN; +#endif class ANTLR4CPP_PUBLIC ATNState { public: - ATNState(); + static constexpr size_t INITIAL_NUM_TRANSITIONS = 4; + static constexpr size_t INVALID_STATE_NUMBER = std::numeric_limits::max(); + + size_t stateNumber = INVALID_STATE_NUMBER; + size_t ruleIndex = 0; // at runtime, we don't have Rule objects + bool epsilonOnlyTransitions = false; + + /// Track the transitions emanating from this ATN state. + std::vector transitions; + + ATNState() = delete; + ATNState(ATNState const&) = delete; - virtual ~ATNState(); + ATNState(ATNState&&) = delete; + + virtual ~ATNState() = default; ATNState& operator=(ATNState const&) = delete; - static const size_t INITIAL_NUM_TRANSITIONS = 4; - static const size_t INVALID_STATE_NUMBER = static_cast(-1); // std::numeric_limits::max(); - - enum { - ATN_INVALID_TYPE = 0, - BASIC = 1, - RULE_START = 2, - BLOCK_START = 3, - PLUS_BLOCK_START = 4, - STAR_BLOCK_START = 5, - TOKEN_START = 6, - RULE_STOP = 7, - BLOCK_END = 8, - STAR_LOOP_BACK = 9, - STAR_LOOP_ENTRY = 10, - PLUS_LOOP_BACK = 11, - LOOP_END = 12 - }; - - static const std::vector serializationNames; + ATNState& operator=(ATNState&&) = delete; - size_t stateNumber = INVALID_STATE_NUMBER; - size_t ruleIndex = 0; // at runtime, we don't have Rule objects - bool epsilonOnlyTransitions = false; + void addTransition(ConstTransitionPtr e); + void addTransition(size_t index, ConstTransitionPtr e); + ConstTransitionPtr removeTransition(size_t index); - public: - virtual size_t hashCode(); - bool operator == (const ATNState &other); - - /// Track the transitions emanating from this ATN state. - std::vector transitions; + virtual size_t hashCode() const; + virtual bool equals(const ATNState &other) const; - virtual bool isNonGreedyExitState(); + virtual bool isNonGreedyExitState() const; virtual std::string toString() const; - virtual void addTransition(Transition *e); - virtual void addTransition(size_t index, Transition *e); - virtual Transition* removeTransition(size_t index); - virtual size_t getStateType() = 0; + + ATNStateType getStateType() const { return _stateType; } + + protected: + explicit ATNState(ATNStateType stateType) : _stateType(stateType) {} private: /// Used to cache lookahead during parsing, not used during construction. @@ -126,8 +131,14 @@ namespace atn { misc::IntervalSet _nextTokenWithinRule; std::atomic _nextTokenUpdated { false }; + const ATNStateType _stateType; + friend class ATN; }; + inline bool operator==(const ATNState &lhs, const ATNState &rhs) { return lhs.equals(rhs); } + + inline bool operator!=(const ATNState &lhs, const ATNState &rhs) { return !operator==(lhs, rhs); } + } // namespace atn } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/ATNStateType.cpp b/runtime/Cpp/runtime/src/atn/ATNStateType.cpp new file mode 100644 index 0000000000..baf2d572eb --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/ATNStateType.cpp @@ -0,0 +1,34 @@ +#include +#include "atn/ATNStateType.h" + +std::string antlr4::atn::atnStateTypeName(ATNStateType atnStateType) { + switch (atnStateType) { + case ATNStateType::INVALID: + return "INVALID"; + case ATNStateType::BASIC: + return "BASIC"; + case ATNStateType::RULE_START: + return "RULE_START"; + case ATNStateType::BLOCK_START: + return "BLOCK_START"; + case ATNStateType::PLUS_BLOCK_START: + return "PLUS_BLOCK_START"; + case ATNStateType::STAR_BLOCK_START: + return "STAR_BLOCK_START"; + case ATNStateType::TOKEN_START: + return "TOKEN_START"; + case ATNStateType::RULE_STOP: + return "RULE_STOP"; + case ATNStateType::BLOCK_END: + return "BLOCK_END"; + case ATNStateType::STAR_LOOP_BACK: + return "STAR_LOOP_BACK"; + case ATNStateType::STAR_LOOP_ENTRY: + return "STAR_LOOP_ENTRY"; + case ATNStateType::PLUS_LOOP_BACK: + return "PLUS_LOOP_BACK"; + case ATNStateType::LOOP_END: + return "LOOP_END"; + } + return "UNKNOWN"; +} diff --git a/runtime/Cpp/runtime/src/atn/ATNStateType.h b/runtime/Cpp/runtime/src/atn/ATNStateType.h new file mode 100644 index 0000000000..e19b2cce92 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/ATNStateType.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include +#include + +#include "antlr4-common.h" + +namespace antlr4 { +namespace atn { + + // Constants for ATNState serialization. + enum class ATNStateType : size_t { + INVALID = 0, + BASIC = 1, + RULE_START = 2, + BLOCK_START = 3, + PLUS_BLOCK_START = 4, + STAR_BLOCK_START = 5, + TOKEN_START = 6, + RULE_STOP = 7, + BLOCK_END = 8, + STAR_LOOP_BACK = 9, + STAR_LOOP_ENTRY = 10, + PLUS_LOOP_BACK = 11, + LOOP_END = 12, + }; + + ANTLR4CPP_PUBLIC std::string atnStateTypeName(ATNStateType atnStateType); + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp b/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp deleted file mode 100755 index ef8afc25ed..0000000000 --- a/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp +++ /dev/null @@ -1,14 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/AbstractPredicateTransition.h" - -using namespace antlr4::atn; - -AbstractPredicateTransition::AbstractPredicateTransition(ATNState *target) : Transition(target) { -} - -AbstractPredicateTransition::~AbstractPredicateTransition() { -} diff --git a/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h b/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h deleted file mode 100755 index 4865cb1bd1..0000000000 --- a/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#pragma once - -#include "atn/Transition.h" - -namespace antlr4 { -namespace atn { - - class ANTState; - - class ANTLR4CPP_PUBLIC AbstractPredicateTransition : public Transition { - - public: - AbstractPredicateTransition(ATNState *target); - ~AbstractPredicateTransition(); - - }; - -} // namespace atn -} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/ActionTransition.cpp b/runtime/Cpp/runtime/src/atn/ActionTransition.cpp old mode 100755 new mode 100644 index fa11c4485e..eb4c9d09d9 --- a/runtime/Cpp/runtime/src/atn/ActionTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/ActionTransition.cpp @@ -3,20 +3,21 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include "antlr4-common.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/ActionTransition.h" using namespace antlr4::atn; ActionTransition::ActionTransition(ATNState *target, size_t ruleIndex) - : Transition(target), ruleIndex(ruleIndex), actionIndex(INVALID_INDEX), isCtxDependent(false) { + : Transition(TransitionType::ACTION, target), ruleIndex(ruleIndex), actionIndex(INVALID_INDEX), isCtxDependent(false) { } ActionTransition::ActionTransition(ATNState *target, size_t ruleIndex, size_t actionIndex, bool isCtxDependent) - : Transition(target), ruleIndex(ruleIndex), actionIndex(actionIndex), isCtxDependent(isCtxDependent) { -} - -Transition::SerializationType ActionTransition::getSerializationType() const { - return ACTION; + : Transition(TransitionType::ACTION, target), ruleIndex(ruleIndex), actionIndex(actionIndex), isCtxDependent(isCtxDependent) { } bool ActionTransition::isEpsilon() const { diff --git a/runtime/Cpp/runtime/src/atn/ActionTransition.h b/runtime/Cpp/runtime/src/atn/ActionTransition.h old mode 100755 new mode 100644 index 652e75f3bf..ed47a4985a --- a/runtime/Cpp/runtime/src/atn/ActionTransition.h +++ b/runtime/Cpp/runtime/src/atn/ActionTransition.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { @@ -12,6 +17,10 @@ namespace atn { class ANTLR4CPP_PUBLIC ActionTransition final : public Transition { public: + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::ACTION; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } + const size_t ruleIndex; const size_t actionIndex; const bool isCtxDependent; // e.g., $i ref in action @@ -20,13 +29,11 @@ namespace atn { ActionTransition(ATNState *target, size_t ruleIndex, size_t actionIndex, bool isCtxDependent); - virtual SerializationType getSerializationType() const override; - - virtual bool isEpsilon() const override; + bool isEpsilon() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - virtual std::string toString() const override; + std::string toString() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp b/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp old mode 100755 new mode 100644 index 72ce922633..d87ce6dfc4 --- a/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/AmbiguityInfo.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h b/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h old mode 100755 new mode 100644 index db594a1f48..2f8eae8fb2 --- a/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h +++ b/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h @@ -5,7 +5,9 @@ #pragma once +#include #include "atn/DecisionEventInfo.h" +#include "antlr4-common.h" #include "support/BitSet.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp b/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp old mode 100755 new mode 100644 index b69d30d18d..a2093af852 --- a/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp +++ b/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp @@ -3,25 +3,48 @@ * can be found in the LICENSE.txt file in the project root. */ -#include "support/Arrays.h" -#include "atn/SingletonPredictionContext.h" - #include "atn/ArrayPredictionContext.h" +#include +#include +#include +#include +#include +#include +#include + +#include "antlr4-common.h" +#include "atn/SingletonPredictionContext.h" +#include "atn/HashUtils.h" +#include "misc/MurmurHash.h" +#include "support/Casts.h" + using namespace antlr4::atn; +using namespace antlr4::misc; +using namespace antlrcpp; -ArrayPredictionContext::ArrayPredictionContext(Ref const& a) - : ArrayPredictionContext({ a->parent }, { a->returnState }) { -} +namespace { + + bool predictionContextEqual(const Ref &lhs, const Ref &rhs) { + // parent PredictionContext pointers can be null during full context mode and + // the ctxs are in an ArrayPredictionContext. If both are null, return true + // if just one is null, return false. If both are non-null, do comparison. + if ( lhs == nullptr ) return rhs == nullptr; + if ( rhs == nullptr ) return false; // lhs!=null and rhs==null + return *lhs == *rhs; // both nonnull + } -ArrayPredictionContext::ArrayPredictionContext(std::vector> const& parents_, - std::vector const& returnStates) - : PredictionContext(calculateHashCode(parents_, returnStates)), parents(parents_), returnStates(returnStates) { - assert(parents.size() > 0); - assert(returnStates.size() > 0); } -ArrayPredictionContext::~ArrayPredictionContext() { +ArrayPredictionContext::ArrayPredictionContext(const SingletonPredictionContext &predictionContext) + : ArrayPredictionContext({ predictionContext.parent }, { predictionContext.returnState }) {} + +ArrayPredictionContext::ArrayPredictionContext(std::vector> parents, + std::vector returnStates) + : PredictionContext(PredictionContextType::ARRAY), parents(std::move(parents)), returnStates(std::move(returnStates)) { + assert(this->parents.size() > 0); + assert(this->returnStates.size() > 0); + assert(this->parents.size() == this->returnStates.size()); } bool ArrayPredictionContext::isEmpty() const { @@ -33,7 +56,7 @@ size_t ArrayPredictionContext::size() const { return returnStates.size(); } -Ref ArrayPredictionContext::getParent(size_t index) const { +const Ref& ArrayPredictionContext::getParent(size_t index) const { return parents[index]; } @@ -41,18 +64,49 @@ size_t ArrayPredictionContext::getReturnState(size_t index) const { return returnStates[index]; } -bool ArrayPredictionContext::operator == (PredictionContext const& o) const { - if (this == &o) { +size_t ArrayPredictionContext::hashCodeImpl() const { + size_t hash = MurmurHash::initialize(); + hash = MurmurHash::update(hash, static_cast(getContextType())); + for (const auto &parent : parents) { + hash = MurmurHash::update(hash, parent); + } + for (const auto &returnState : returnStates) { + hash = MurmurHash::update(hash, returnState); + } + return MurmurHash::finish(hash, 1 + parents.size() + returnStates.size()); +} + +bool ArrayPredictionContext::equals(const PredictionContext &other) const { + if (this == std::addressof(other)) { return true; } + if (getContextType() != other.getContextType()) { + return false; + } + const auto &array = downCast(other); + const bool sameSize = returnStates.size() == array.returnStates.size() && + parents.size() == array.parents.size(); + if ( !sameSize ) { + return false; + } + + const bool sameHash = cachedHashCodeEqual(cachedHashCode(), array.cachedHashCode()); + if ( !sameHash ) { + return false; + } - const ArrayPredictionContext *other = dynamic_cast(&o); - if (other == nullptr || hashCode() != other->hashCode()) { - return false; // can't be same if hash is different + const size_t stateSizeBytes = sizeof(decltype(returnStates)::value_type); + const bool returnStateArraysEqual = + std::memcmp(returnStates.data(), array.returnStates.data(), + returnStates.size() * stateSizeBytes) == 0; + if ( !returnStateArraysEqual ) { + return false; } - return antlrcpp::Arrays::equals(returnStates, other->returnStates) && - antlrcpp::Arrays::equals(parents, other->parents); + // stack of contexts is the same + const bool parentCtxEqual = + std::equal(parents.begin(), parents.end(), array.parents.begin(), predictionContextEqual); + return parentCtxEqual; } std::string ArrayPredictionContext::toString() const { diff --git a/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h b/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h old mode 100755 new mode 100644 index 53a5b17a03..38980948ac --- a/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h @@ -6,6 +6,10 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" #include "atn/PredictionContext.h" namespace antlr4 { @@ -13,29 +17,37 @@ namespace atn { class SingletonPredictionContext; - class ANTLR4CPP_PUBLIC ArrayPredictionContext : public PredictionContext { + class ANTLR4CPP_PUBLIC ArrayPredictionContext final : public PredictionContext { public: + static bool is(const PredictionContext &predictionContext) { return predictionContext.getContextType() == PredictionContextType::ARRAY; } + + static bool is(const PredictionContext *predictionContext) { return predictionContext != nullptr && is(*predictionContext); } + /// Parent can be empty only if full ctx mode and we make an array /// from EMPTY and non-empty. We merge EMPTY by using null parent and /// returnState == EMPTY_RETURN_STATE. // Also here: we use a strong reference to our parents to avoid having them freed prematurely. // See also SinglePredictionContext. - const std::vector> parents; + std::vector> parents; /// Sorted for merge, no duplicates; if present, EMPTY_RETURN_STATE is always last. - const std::vector returnStates; + std::vector returnStates; + + explicit ArrayPredictionContext(const SingletonPredictionContext &predictionContext); + + ArrayPredictionContext(std::vector> parents, std::vector returnStates); - ArrayPredictionContext(Ref const& a); - ArrayPredictionContext(std::vector> const& parents_, std::vector const& returnStates); - virtual ~ArrayPredictionContext(); + ArrayPredictionContext(ArrayPredictionContext&&) = default; - virtual bool isEmpty() const override; - virtual size_t size() const override; - virtual Ref getParent(size_t index) const override; - virtual size_t getReturnState(size_t index) const override; - bool operator == (const PredictionContext &o) const override; + bool isEmpty() const override; + size_t size() const override; + const Ref& getParent(size_t index) const override; + size_t getReturnState(size_t index) const override; + bool equals(const PredictionContext &other) const override; + std::string toString() const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/AtomTransition.cpp b/runtime/Cpp/runtime/src/atn/AtomTransition.cpp old mode 100755 new mode 100644 index af956c2435..0f0979ae58 --- a/runtime/Cpp/runtime/src/atn/AtomTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/AtomTransition.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" #include "atn/AtomTransition.h" @@ -11,11 +15,7 @@ using namespace antlr4::misc; using namespace antlr4::atn; -AtomTransition::AtomTransition(ATNState *target, size_t label) : Transition(target), _label(label) { -} - -Transition::SerializationType AtomTransition::getSerializationType() const { - return ATOM; +AtomTransition::AtomTransition(ATNState *target, size_t label) : Transition(TransitionType::ATOM, target), _label(label) { } IntervalSet AtomTransition::label() const { diff --git a/runtime/Cpp/runtime/src/atn/AtomTransition.h b/runtime/Cpp/runtime/src/atn/AtomTransition.h old mode 100755 new mode 100644 index cc22e5ad9e..eacbae349a --- a/runtime/Cpp/runtime/src/atn/AtomTransition.h +++ b/runtime/Cpp/runtime/src/atn/AtomTransition.h @@ -5,6 +5,12 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { @@ -13,17 +19,20 @@ namespace atn { /// TODO: make all transitions sets? no, should remove set edges. class ANTLR4CPP_PUBLIC AtomTransition final : public Transition { public: + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::ATOM; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } + /// The token type or character value; or, signifies special label. + /// TODO: rename this to label const size_t _label; AtomTransition(ATNState *target, size_t label); - virtual SerializationType getSerializationType() const override; - - virtual misc::IntervalSet label() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + misc::IntervalSet label() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - virtual std::string toString() const override; + std::string toString() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp b/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp deleted file mode 100755 index dc3673723f..0000000000 --- a/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/BasicBlockStartState.h" - -using namespace antlr4::atn; - -size_t BasicBlockStartState::getStateType() { - return BLOCK_START; -} diff --git a/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h b/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h old mode 100755 new mode 100644 index 471fbc7bdd..e1b6c7dbc9 --- a/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h +++ b/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h @@ -6,16 +6,20 @@ #pragma once #include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/BlockStartState.h" namespace antlr4 { namespace atn { class ANTLR4CPP_PUBLIC BasicBlockStartState final : public BlockStartState { - public: - virtual size_t getStateType() override; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::BLOCK_START; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + BasicBlockStartState() : BlockStartState(ATNStateType::BLOCK_START) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/BasicState.cpp b/runtime/Cpp/runtime/src/atn/BasicState.cpp deleted file mode 100755 index c8cda80134..0000000000 --- a/runtime/Cpp/runtime/src/atn/BasicState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/BasicState.h" - -using namespace antlr4::atn; - -size_t BasicState::getStateType() { - return BASIC; -} diff --git a/runtime/Cpp/runtime/src/atn/BasicState.h b/runtime/Cpp/runtime/src/atn/BasicState.h old mode 100755 new mode 100644 index b650dc2fe0..79407e4e52 --- a/runtime/Cpp/runtime/src/atn/BasicState.h +++ b/runtime/Cpp/runtime/src/atn/BasicState.h @@ -5,16 +5,20 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { namespace atn { class ANTLR4CPP_PUBLIC BasicState final : public ATNState { - public: - virtual size_t getStateType() override; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::BASIC; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + BasicState() : ATNState(ATNStateType::BASIC) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/BlockEndState.cpp b/runtime/Cpp/runtime/src/atn/BlockEndState.cpp deleted file mode 100755 index 098d52af0b..0000000000 --- a/runtime/Cpp/runtime/src/atn/BlockEndState.cpp +++ /dev/null @@ -1,15 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/BlockEndState.h" - -using namespace antlr4::atn; - -BlockEndState::BlockEndState() : startState(nullptr) { -} - -size_t BlockEndState::getStateType() { - return BLOCK_END; -} diff --git a/runtime/Cpp/runtime/src/atn/BlockEndState.h b/runtime/Cpp/runtime/src/atn/BlockEndState.h old mode 100755 new mode 100644 index b24bee1b4f..3c29510da4 --- a/runtime/Cpp/runtime/src/atn/BlockEndState.h +++ b/runtime/Cpp/runtime/src/atn/BlockEndState.h @@ -5,6 +5,8 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { @@ -13,11 +15,13 @@ namespace atn { /// Terminal node of a simple {@code (a|b|c)} block. class ANTLR4CPP_PUBLIC BlockEndState final : public ATNState { public: - BlockStartState *startState = nullptr; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::BLOCK_END; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } - BlockEndState(); + BlockStartState *startState = nullptr; - virtual size_t getStateType() override; + BlockEndState() : ATNState(ATNStateType::BLOCK_END) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp deleted file mode 100644 index 44cca8f775..0000000000 --- a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp +++ /dev/null @@ -1,9 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "BlockStartState.h" - -antlr4::atn::BlockStartState::~BlockStartState() { -} diff --git a/runtime/Cpp/runtime/src/atn/BlockStartState.h b/runtime/Cpp/runtime/src/atn/BlockStartState.h old mode 100755 new mode 100644 index 725c700f04..f01d7573dd --- a/runtime/Cpp/runtime/src/atn/BlockStartState.h +++ b/runtime/Cpp/runtime/src/atn/BlockStartState.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/DecisionState.h" namespace antlr4 { @@ -13,8 +16,17 @@ namespace atn { /// The start of a regular {@code (...)} block. class ANTLR4CPP_PUBLIC BlockStartState : public DecisionState { public: - ~BlockStartState(); + static bool is(const ATNState &atnState) { + const auto stateType = atnState.getStateType(); + return stateType >= ATNStateType::BLOCK_START && stateType <= ATNStateType::STAR_BLOCK_START; + } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + BlockEndState *endState = nullptr; + + protected: + using DecisionState::DecisionState; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp b/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp old mode 100755 new mode 100644 index 12442a9bc0..c704423061 --- a/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/ContextSensitivityInfo.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h b/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h old mode 100755 new mode 100644 index 430ce3b6e8..c595c16a3f --- a/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h +++ b/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "atn/DecisionEventInfo.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp b/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp old mode 100755 new mode 100644 index bca6c778c0..b904e0116b --- a/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/DecisionEventInfo.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h b/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h old mode 100755 new mode 100644 index af7f5f4b17..7568a73fa6 --- a/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h +++ b/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h @@ -5,6 +5,7 @@ #pragma once +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp b/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp old mode 100755 new mode 100644 index ee9b1aac34..dd1e127b18 --- a/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp @@ -3,6 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/ErrorInfo.h" #include "atn/LookaheadEventInfo.h" diff --git a/runtime/Cpp/runtime/src/atn/DecisionInfo.h b/runtime/Cpp/runtime/src/atn/DecisionInfo.h old mode 100755 new mode 100644 index cfbb2e9baf..a585a5b5b6 --- a/runtime/Cpp/runtime/src/atn/DecisionInfo.h +++ b/runtime/Cpp/runtime/src/atn/DecisionInfo.h @@ -5,7 +5,11 @@ #pragma once +#include +#include +#include #include "atn/ContextSensitivityInfo.h" +#include "antlr4-common.h" #include "atn/AmbiguityInfo.h" #include "atn/PredicateEvalInfo.h" #include "atn/ErrorInfo.h" @@ -218,7 +222,7 @@ namespace atn { /// statistics for a particular decision. ///
    /// The decision number - DecisionInfo(size_t decision); + explicit DecisionInfo(size_t decision); std::string toString() const; }; diff --git a/runtime/Cpp/runtime/src/atn/DecisionState.cpp b/runtime/Cpp/runtime/src/atn/DecisionState.cpp old mode 100755 new mode 100644 index 924f814a9c..4364dd4b1d --- a/runtime/Cpp/runtime/src/atn/DecisionState.cpp +++ b/runtime/Cpp/runtime/src/atn/DecisionState.cpp @@ -3,15 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include "atn/ATNState.h" #include "atn/DecisionState.h" using namespace antlr4::atn; -void DecisionState::InitializeInstanceFields() { - decision = -1; - nonGreedy = false; -} - std::string DecisionState::toString() const { - return "DECISION " + ATNState::toString(); + return ATNState::toString(); } diff --git a/runtime/Cpp/runtime/src/atn/DecisionState.h b/runtime/Cpp/runtime/src/atn/DecisionState.h old mode 100755 new mode 100644 index 005de251a2..e43d9de514 --- a/runtime/Cpp/runtime/src/atn/DecisionState.h +++ b/runtime/Cpp/runtime/src/atn/DecisionState.h @@ -5,6 +5,9 @@ #pragma once +#include +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { @@ -12,18 +15,22 @@ namespace atn { class ANTLR4CPP_PUBLIC DecisionState : public ATNState { public: - int decision; - bool nonGreedy; + static bool is(const ATNState &atnState) { + const auto stateType = atnState.getStateType(); + return (stateType >= ATNStateType::BLOCK_START && stateType <= ATNStateType::TOKEN_START) || + stateType == ATNStateType::PLUS_LOOP_BACK || + stateType == ATNStateType::STAR_LOOP_ENTRY; + } - private: - void InitializeInstanceFields(); + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } - public: - DecisionState() { - InitializeInstanceFields(); - } + int decision = -1; + bool nonGreedy = false; + + std::string toString() const override; - virtual std::string toString() const override; + protected: + using ATNState::ATNState; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp b/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp deleted file mode 100755 index 17223bf1e4..0000000000 --- a/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/EmptyPredictionContext.h" - -using namespace antlr4::atn; - -EmptyPredictionContext::EmptyPredictionContext() : SingletonPredictionContext(nullptr, EMPTY_RETURN_STATE) { -} - -bool EmptyPredictionContext::isEmpty() const { - return true; -} - -size_t EmptyPredictionContext::size() const { - return 1; -} - -Ref EmptyPredictionContext::getParent(size_t /*index*/) const { - return nullptr; -} - -size_t EmptyPredictionContext::getReturnState(size_t /*index*/) const { - return returnState; -} - -bool EmptyPredictionContext::operator == (const PredictionContext &o) const { - return this == &o; -} - -std::string EmptyPredictionContext::toString() const { - return "$"; -} diff --git a/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h b/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h deleted file mode 100755 index 93c036c549..0000000000 --- a/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#pragma once - -#include "atn/SingletonPredictionContext.h" - -namespace antlr4 { -namespace atn { - - class ANTLR4CPP_PUBLIC EmptyPredictionContext : public SingletonPredictionContext { - public: - EmptyPredictionContext(); - - virtual bool isEmpty() const override; - virtual size_t size() const override; - virtual Ref getParent(size_t index) const override; - virtual size_t getReturnState(size_t index) const override; - virtual std::string toString() const override; - - virtual bool operator == (const PredictionContext &o) const override; - }; - -} // namespace atn -} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp b/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp old mode 100755 new mode 100644 index 550605d32b..7d767d1090 --- a/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp @@ -3,6 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include "antlr4-common.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/EpsilonTransition.h" using namespace antlr4::atn; @@ -11,17 +16,13 @@ EpsilonTransition::EpsilonTransition(ATNState *target) : EpsilonTransition(targe } EpsilonTransition::EpsilonTransition(ATNState *target, size_t outermostPrecedenceReturn) - : Transition(target), _outermostPrecedenceReturn(outermostPrecedenceReturn) { + : Transition(TransitionType::EPSILON, target), _outermostPrecedenceReturn(outermostPrecedenceReturn) { } -size_t EpsilonTransition::outermostPrecedenceReturn() { +size_t EpsilonTransition::outermostPrecedenceReturn() const { return _outermostPrecedenceReturn; } -Transition::SerializationType EpsilonTransition::getSerializationType() const { - return EPSILON; -} - bool EpsilonTransition::isEpsilon() const { return true; } diff --git a/runtime/Cpp/runtime/src/atn/EpsilonTransition.h b/runtime/Cpp/runtime/src/atn/EpsilonTransition.h old mode 100755 new mode 100644 index 41fb0fbf82..c8db57da53 --- a/runtime/Cpp/runtime/src/atn/EpsilonTransition.h +++ b/runtime/Cpp/runtime/src/atn/EpsilonTransition.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { @@ -12,7 +17,11 @@ namespace atn { class ANTLR4CPP_PUBLIC EpsilonTransition final : public Transition { public: - EpsilonTransition(ATNState *target); + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::EPSILON; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } + + explicit EpsilonTransition(ATNState *target); EpsilonTransition(ATNState *target, size_t outermostPrecedenceReturn); /** @@ -23,13 +32,12 @@ namespace atn { * @see ParserATNSimulator#applyPrecedenceFilter(ATNConfigSet) * @since 4.4.1 */ - size_t outermostPrecedenceReturn(); - virtual SerializationType getSerializationType() const override; + size_t outermostPrecedenceReturn() const; - virtual bool isEpsilon() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + bool isEpsilon() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - virtual std::string toString() const override; + std::string toString() const override; private: const size_t _outermostPrecedenceReturn; // A rule index. diff --git a/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp b/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp old mode 100755 new mode 100644 index efe8507124..b241929a27 --- a/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/ATNConfigSet.h" #include "atn/ErrorInfo.h" diff --git a/runtime/Cpp/runtime/src/atn/ErrorInfo.h b/runtime/Cpp/runtime/src/atn/ErrorInfo.h old mode 100755 new mode 100644 index d34642a195..e94345ade3 --- a/runtime/Cpp/runtime/src/atn/ErrorInfo.h +++ b/runtime/Cpp/runtime/src/atn/ErrorInfo.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "atn/DecisionEventInfo.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/atn/HashUtils.h b/runtime/Cpp/runtime/src/atn/HashUtils.h new file mode 100644 index 0000000000..690d204857 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/HashUtils.h @@ -0,0 +1,18 @@ +/* Copyright (c) 2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include + +namespace antlr4 { +namespace atn { + + inline bool cachedHashCodeEqual(size_t lhs, size_t rhs) { + return lhs == rhs || lhs == 0 || rhs == 0; + } + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp old mode 100755 new mode 100644 index d7949cd1ed..e4a5913932 --- a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp +++ b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp @@ -3,16 +3,22 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/RuleStopState.h" +#include "atn/ATNStateType.h" +#include "atn/TransitionType.h" +#include "Token.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/Transition.h" #include "atn/RuleTransition.h" #include "atn/SingletonPredictionContext.h" -#include "atn/AbstractPredicateTransition.h" #include "atn/WildcardTransition.h" #include "atn/NotSetTransition.h" #include "misc/IntervalSet.h" #include "atn/ATNConfig.h" -#include "atn/EmptyPredictionContext.h" #include "support/CPPUtils.h" @@ -22,10 +28,140 @@ using namespace antlr4; using namespace antlr4::atn; using namespace antlrcpp; -LL1Analyzer::LL1Analyzer(const ATN &atn) : _atn(atn) { -} +namespace { + + struct ATNConfigHasher final { + size_t operator()(const ATNConfig& atn_config) const { + return atn_config.hashCode(); + } + }; + + struct ATNConfigComparer final { + bool operator()(const ATNConfig& lhs, const ATNConfig& rhs) const { + return lhs == rhs; + } + }; + + class LL1AnalyzerImpl final { + public: + LL1AnalyzerImpl(const ATN& atn, misc::IntervalSet& look, bool seeThruPreds, bool addEOF) : _atn(atn), _look(look), _seeThruPreds(seeThruPreds), _addEOF(addEOF) {} + + /// + /// Compute set of tokens that can follow {@code s} in the ATN in the + /// specified {@code ctx}. + ///

    + /// If {@code ctx} is {@code null} and {@code stopState} or the end of the + /// rule containing {@code s} is reached, is added to + /// the result set. If {@code ctx} is not {@code null} and {@code addEOF} is + /// {@code true} and {@code stopState} or the end of the outermost rule is + /// reached, is added to the result set. + ///

    + /// the ATN state. + /// the ATN state to stop at. This can be a + /// to detect epsilon paths through a closure. + /// The outer context, or {@code null} if the outer context should + /// not be used. + /// The result lookahead set. + /// A set used for preventing epsilon closures in the ATN + /// from causing a stack overflow. Outside code should pass + /// {@code new HashSet} for this argument. + /// A set used for preventing left recursion in the + /// ATN from causing a stack overflow. Outside code should pass + /// {@code new BitSet()} for this argument. + /// {@code true} to true semantic predicates as + /// implicitly {@code true} and "see through them", otherwise {@code false} + /// to treat semantic predicates as opaque and add to the + /// result if one is encountered. + /// Add to the result if the end of the + /// outermost context is reached. This parameter has no effect if {@code ctx} + /// is {@code null}. + void LOOK(ATNState *s, ATNState *stopState, Ref const& ctx) { + if (!_lookBusy.insert(ATNConfig(s, 0, ctx)).second) { + return; + } + + // ml: s can never be null, hence no need to check if stopState is != null. + if (s == stopState) { + if (ctx == nullptr) { + _look.add(Token::EPSILON); + return; + } else if (ctx->isEmpty() && _addEOF) { + _look.add(Token::EOF); + return; + } + } + + if (s->getStateType() == ATNStateType::RULE_STOP) { + if (ctx == nullptr) { + _look.add(Token::EPSILON); + return; + } else if (ctx->isEmpty() && _addEOF) { + _look.add(Token::EOF); + return; + } + + if (ctx != PredictionContext::EMPTY) { + bool removed = _calledRuleStack.test(s->ruleIndex); + _calledRuleStack[s->ruleIndex] = false; + // run thru all possible stack tops in ctx + for (size_t i = 0; i < ctx->size(); i++) { + ATNState *returnState = _atn.states[ctx->getReturnState(i)]; + LOOK(returnState, stopState, ctx->getParent(i)); + } + if (removed) { + _calledRuleStack.set(s->ruleIndex); + } + return; + } + } + + size_t n = s->transitions.size(); + for (size_t i = 0; i < n; i++) { + const Transition *t = s->transitions[i].get(); + const auto tType = t->getTransitionType(); + + if (tType == TransitionType::RULE) { + if (_calledRuleStack[(static_cast(t))->target->ruleIndex]) { + continue; + } + + Ref newContext = SingletonPredictionContext::create(ctx, (static_cast(t))->followState->stateNumber); + + _calledRuleStack.set((static_cast(t))->target->ruleIndex); + LOOK(t->target, stopState, newContext); + _calledRuleStack[(static_cast(t))->target->ruleIndex] = false; + + } else if (tType == TransitionType::PREDICATE || tType == TransitionType::PRECEDENCE) { + if (_seeThruPreds) { + LOOK(t->target, stopState, ctx); + } else { + _look.add(LL1Analyzer::HIT_PRED); + } + } else if (t->isEpsilon()) { + LOOK(t->target, stopState, ctx); + } else if (tType == TransitionType::WILDCARD) { + _look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); + } else { + misc::IntervalSet set = t->label(); + if (!set.isEmpty()) { + if (tType == TransitionType::NOT_SET) { + set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); + } + _look.addAll(set); + } + } + } + } + + private: + const ATN& _atn; + misc::IntervalSet& _look; + antlrcpp::BitSet _calledRuleStack; + std::unordered_set _lookBusy; + bool _seeThruPreds; + bool _addEOF; + }; -LL1Analyzer::~LL1Analyzer() { } std::vector LL1Analyzer::getDecisionLookahead(ATNState *s) const { @@ -37,16 +173,11 @@ std::vector LL1Analyzer::getDecisionLookahead(ATNState *s) co look.resize(s->transitions.size()); // Fills all interval sets with defaults. for (size_t alt = 0; alt < s->transitions.size(); alt++) { - bool seeThruPreds = false; // fail to get lookahead upon pred - - ATNConfig::Set lookBusy; - antlrcpp::BitSet callRuleStack; - _LOOK(s->transitions[alt]->target, nullptr, PredictionContext::EMPTY, - look[alt], lookBusy, callRuleStack, seeThruPreds, false); - + LL1AnalyzerImpl impl(_atn, look[alt], false, false); + impl.LOOK(s->transitions[alt]->target, nullptr, PredictionContext::EMPTY); // Wipe out lookahead for this alternative if we found nothing // or we had a predicate when we !seeThruPreds - if (look[alt].size() == 0 || look[alt].contains(HIT_PRED)) { + if (look[alt].size() == 0 || look[alt].contains(LL1Analyzer::HIT_PRED)) { look[alt].clear(); } } @@ -58,101 +189,9 @@ misc::IntervalSet LL1Analyzer::LOOK(ATNState *s, RuleContext *ctx) const { } misc::IntervalSet LL1Analyzer::LOOK(ATNState *s, ATNState *stopState, RuleContext *ctx) const { + Ref lookContext = ctx != nullptr ? PredictionContext::fromRuleContext(_atn, ctx) : nullptr; misc::IntervalSet r; - bool seeThruPreds = true; // ignore preds; get all lookahead - Ref lookContext = ctx != nullptr ? PredictionContext::fromRuleContext(_atn, ctx) : nullptr; - - ATNConfig::Set lookBusy; - antlrcpp::BitSet callRuleStack; - _LOOK(s, stopState, lookContext, r, lookBusy, callRuleStack, seeThruPreds, true); - + LL1AnalyzerImpl impl(_atn, r, true, true); + impl.LOOK(s, stopState, lookContext); return r; } - -void LL1Analyzer::_LOOK(ATNState *s, ATNState *stopState, Ref const& ctx, misc::IntervalSet &look, - ATNConfig::Set &lookBusy, antlrcpp::BitSet &calledRuleStack, bool seeThruPreds, bool addEOF) const { - - Ref c = std::make_shared(s, 0, ctx); - - if (lookBusy.count(c) > 0) // Keep in mind comparison is based on members of the class, not the actual instance. - return; - - lookBusy.insert(c); - - // ml: s can never be null, hence no need to check if stopState is != null. - if (s == stopState) { - if (ctx == nullptr) { - look.add(Token::EPSILON); - return; - } else if (ctx->isEmpty() && addEOF) { - look.add(Token::EOF); - return; - } - } - - if (s->getStateType() == ATNState::RULE_STOP) { - if (ctx == nullptr) { - look.add(Token::EPSILON); - return; - } else if (ctx->isEmpty() && addEOF) { - look.add(Token::EOF); - return; - } - - if (ctx != PredictionContext::EMPTY) { - // run thru all possible stack tops in ctx - for (size_t i = 0; i < ctx->size(); i++) { - ATNState *returnState = _atn.states[ctx->getReturnState(i)]; - - bool removed = calledRuleStack.test(returnState->ruleIndex); - auto onExit = finally([removed, &calledRuleStack, returnState] { - if (removed) { - calledRuleStack.set(returnState->ruleIndex); - } - }); - - calledRuleStack[returnState->ruleIndex] = false; - _LOOK(returnState, stopState, ctx->getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } - return; - } - } - - size_t n = s->transitions.size(); - for (size_t i = 0; i < n; i++) { - Transition *t = s->transitions[i]; - - if (t->getSerializationType() == Transition::RULE) { - if (calledRuleStack[(static_cast(t))->target->ruleIndex]) { - continue; - } - - Ref newContext = SingletonPredictionContext::create(ctx, (static_cast(t))->followState->stateNumber); - auto onExit = finally([t, &calledRuleStack] { - calledRuleStack[(static_cast(t))->target->ruleIndex] = false; - }); - - calledRuleStack.set((static_cast(t))->target->ruleIndex); - _LOOK(t->target, stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - - } else if (is(t)) { - if (seeThruPreds) { - _LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } else { - look.add(HIT_PRED); - } - } else if (t->isEpsilon()) { - _LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); - } else if (t->getSerializationType() == Transition::WILDCARD) { - look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); - } else { - misc::IntervalSet set = t->label(); - if (!set.isEmpty()) { - if (is(t)) { - set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); - } - look.addAll(set); - } - } - } -} diff --git a/runtime/Cpp/runtime/src/atn/LL1Analyzer.h b/runtime/Cpp/runtime/src/atn/LL1Analyzer.h old mode 100755 new mode 100644 index b945411b5f..a74e87b2a1 --- a/runtime/Cpp/runtime/src/atn/LL1Analyzer.h +++ b/runtime/Cpp/runtime/src/atn/LL1Analyzer.h @@ -5,24 +5,26 @@ #pragma once +#include +#include #include "Token.h" -#include "support/BitSet.h" -#include "atn/PredictionContext.h" +#include "misc/IntervalSet.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/ATNConfig.h" +#include "atn/PredictionContext.h" +#include "support/BitSet.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC LL1Analyzer { + class ANTLR4CPP_PUBLIC LL1Analyzer final { public: /// Special value added to the lookahead sets to indicate that we hit /// a predicate during analysis if {@code seeThruPreds==false}. - static const size_t HIT_PRED = Token::INVALID_TYPE; + static constexpr size_t HIT_PRED = Token::INVALID_TYPE; - const atn::ATN &_atn; - - LL1Analyzer(const atn::ATN &atn); - virtual ~LL1Analyzer(); + explicit LL1Analyzer(const atn::ATN &atn) : _atn(atn) {} /// /// Calculates the SLL(1) expected lookahead set for each outgoing transition @@ -33,7 +35,7 @@ namespace atn { /// /// the ATN state /// the expected symbols for each outgoing transition of {@code s}. - virtual std::vector getDecisionLookahead(ATNState *s) const; + std::vector getDecisionLookahead(ATNState *s) const; /// /// Compute set of tokens that can follow {@code s} in the ATN in the @@ -50,7 +52,7 @@ namespace atn { /// /// The set of tokens that can follow {@code s} in the ATN in the /// specified {@code ctx}. - virtual misc::IntervalSet LOOK(ATNState *s, RuleContext *ctx) const; + misc::IntervalSet LOOK(ATNState *s, RuleContext *ctx) const; /// /// Compute set of tokens that can follow {@code s} in the ATN in the @@ -69,40 +71,10 @@ namespace atn { /// /// The set of tokens that can follow {@code s} in the ATN in the /// specified {@code ctx}. - virtual misc::IntervalSet LOOK(ATNState *s, ATNState *stopState, RuleContext *ctx) const; + misc::IntervalSet LOOK(ATNState *s, ATNState *stopState, RuleContext *ctx) const; - /// - /// Compute set of tokens that can follow {@code s} in the ATN in the - /// specified {@code ctx}. - ///

    - /// If {@code ctx} is {@code null} and {@code stopState} or the end of the - /// rule containing {@code s} is reached, is added to - /// the result set. If {@code ctx} is not {@code null} and {@code addEOF} is - /// {@code true} and {@code stopState} or the end of the outermost rule is - /// reached, is added to the result set. - ///

    - /// the ATN state. - /// the ATN state to stop at. This can be a - /// to detect epsilon paths through a closure. - /// The outer context, or {@code null} if the outer context should - /// not be used. - /// The result lookahead set. - /// A set used for preventing epsilon closures in the ATN - /// from causing a stack overflow. Outside code should pass - /// {@code new HashSet} for this argument. - /// A set used for preventing left recursion in the - /// ATN from causing a stack overflow. Outside code should pass - /// {@code new BitSet()} for this argument. - /// {@code true} to true semantic predicates as - /// implicitly {@code true} and "see through them", otherwise {@code false} - /// to treat semantic predicates as opaque and add to the - /// result if one is encountered. - /// Add to the result if the end of the - /// outermost context is reached. This parameter has no effect if {@code ctx} - /// is {@code null}. - protected: - virtual void _LOOK(ATNState *s, ATNState *stopState, Ref const& ctx, misc::IntervalSet &look, - ATNConfig::Set &lookBusy, antlrcpp::BitSet &calledRuleStack, bool seeThruPreds, bool addEOF) const; + private: + const atn::ATN &_atn; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp b/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp old mode 100755 new mode 100644 index db91c3f8ce..76f52f5152 --- a/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp @@ -3,51 +3,38 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "misc/MurmurHash.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/DecisionState.h" #include "atn/PredictionContext.h" #include "SemanticContext.h" #include "atn/LexerActionExecutor.h" #include "support/CPPUtils.h" +#include "support/Casts.h" #include "atn/LexerATNConfig.h" using namespace antlr4::atn; using namespace antlrcpp; -LexerATNConfig::LexerATNConfig(ATNState *state, int alt, Ref const& context) - : ATNConfig(state, alt, context, SemanticContext::NONE), _passedThroughNonGreedyDecision(false) { -} +LexerATNConfig::LexerATNConfig(ATNState *state, int alt, Ref context) + : ATNConfig(state, alt, std::move(context)) {} -LexerATNConfig::LexerATNConfig(ATNState *state, int alt, Ref const& context, - Ref const& lexerActionExecutor) - : ATNConfig(state, alt, context, SemanticContext::NONE), _lexerActionExecutor(lexerActionExecutor), - _passedThroughNonGreedyDecision(false) { -} +LexerATNConfig::LexerATNConfig(ATNState *state, int alt, Ref context, Ref lexerActionExecutor) + : ATNConfig(state, alt, std::move(context)), _lexerActionExecutor(std::move(lexerActionExecutor)) {} -LexerATNConfig::LexerATNConfig(Ref const& c, ATNState *state) - : ATNConfig(c, state, c->context, c->semanticContext), _lexerActionExecutor(c->_lexerActionExecutor), - _passedThroughNonGreedyDecision(checkNonGreedyDecision(c, state)) { -} +LexerATNConfig::LexerATNConfig(LexerATNConfig const& other, ATNState *state) + : ATNConfig(other, state), _lexerActionExecutor(other._lexerActionExecutor), _passedThroughNonGreedyDecision(checkNonGreedyDecision(other, state)) {} -LexerATNConfig::LexerATNConfig(Ref const& c, ATNState *state, Ref const& lexerActionExecutor) - : ATNConfig(c, state, c->context, c->semanticContext), _lexerActionExecutor(lexerActionExecutor), - _passedThroughNonGreedyDecision(checkNonGreedyDecision(c, state)) { -} +LexerATNConfig::LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref lexerActionExecutor) + : ATNConfig(other, state), _lexerActionExecutor(std::move(lexerActionExecutor)), _passedThroughNonGreedyDecision(checkNonGreedyDecision(other, state)) {} -LexerATNConfig::LexerATNConfig(Ref const& c, ATNState *state, Ref const& context) - : ATNConfig(c, state, context, c->semanticContext), _lexerActionExecutor(c->_lexerActionExecutor), - _passedThroughNonGreedyDecision(checkNonGreedyDecision(c, state)) { -} - -Ref LexerATNConfig::getLexerActionExecutor() const { - return _lexerActionExecutor; -} - -bool LexerATNConfig::hasPassedThroughNonGreedyDecision() { - return _passedThroughNonGreedyDecision; -} +LexerATNConfig::LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref context) + : ATNConfig(other, state, std::move(context)), _lexerActionExecutor(other._lexerActionExecutor), _passedThroughNonGreedyDecision(checkNonGreedyDecision(other, state)) {} size_t LexerATNConfig::hashCode() const { size_t hashCode = misc::MurmurHash::initialize(7); @@ -61,7 +48,7 @@ size_t LexerATNConfig::hashCode() const { return hashCode; } -bool LexerATNConfig::operator == (const LexerATNConfig& other) const +bool LexerATNConfig::operator==(const LexerATNConfig& other) const { if (this == &other) return true; @@ -75,10 +62,10 @@ bool LexerATNConfig::operator == (const LexerATNConfig& other) const return false; } - return ATNConfig::operator == (other); + return ATNConfig::operator==(other); } -bool LexerATNConfig::checkNonGreedyDecision(Ref const& source, ATNState *target) { - return source->_passedThroughNonGreedyDecision || - (is(target) && (static_cast(target))->nonGreedy); +bool LexerATNConfig::checkNonGreedyDecision(LexerATNConfig const& source, ATNState *target) { + return source._passedThroughNonGreedyDecision || + (DecisionState::is(target) && downCast(target)->nonGreedy); } diff --git a/runtime/Cpp/runtime/src/atn/LexerATNConfig.h b/runtime/Cpp/runtime/src/atn/LexerATNConfig.h old mode 100755 new mode 100644 index e25d3d1c54..afe832eaec --- a/runtime/Cpp/runtime/src/atn/LexerATNConfig.h +++ b/runtime/Cpp/runtime/src/atn/LexerATNConfig.h @@ -5,39 +5,42 @@ #pragma once +#include +#include "antlr4-common.h" +#include "atn/ATNState.h" #include "atn/ATNConfig.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC LexerATNConfig : public ATNConfig { + class ANTLR4CPP_PUBLIC LexerATNConfig final : public ATNConfig { public: - LexerATNConfig(ATNState *state, int alt, Ref const& context); - LexerATNConfig(ATNState *state, int alt, Ref const& context, Ref const& lexerActionExecutor); + LexerATNConfig(ATNState *state, int alt, Ref context); + LexerATNConfig(ATNState *state, int alt, Ref context, Ref lexerActionExecutor); - LexerATNConfig(Ref const& c, ATNState *state); - LexerATNConfig(Ref const& c, ATNState *state, Ref const& lexerActionExecutor); - LexerATNConfig(Ref const& c, ATNState *state, Ref const& context); + LexerATNConfig(LexerATNConfig const& other, ATNState *state); + LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref lexerActionExecutor); + LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref context); /** * Gets the {@link LexerActionExecutor} capable of executing the embedded * action(s) for the current configuration. */ - Ref getLexerActionExecutor() const; - bool hasPassedThroughNonGreedyDecision(); + const Ref& getLexerActionExecutor() const { return _lexerActionExecutor; } + bool hasPassedThroughNonGreedyDecision() const { return _passedThroughNonGreedyDecision; } - virtual size_t hashCode() const override; + size_t hashCode() const override; - bool operator == (const LexerATNConfig& other) const; + bool operator==(const LexerATNConfig& other) const; private: /** * This is the backing field for {@link #getLexerActionExecutor}. */ - const Ref _lexerActionExecutor; - const bool _passedThroughNonGreedyDecision; + const Ref _lexerActionExecutor; + const bool _passedThroughNonGreedyDecision = false; - static bool checkNonGreedyDecision(Ref const& source, ATNState *target); + static bool checkNonGreedyDecision(LexerATNConfig const& source, ATNState *target); }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp old mode 100755 new mode 100644 index 827c3d59f1..f9d04e1991 --- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp @@ -3,7 +3,16 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "IntStream.h" +#include "atn/ATNStateType.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/OrderedATNConfigSet.h" #include "Token.h" #include "LexerNoViableAltException.h" @@ -16,40 +25,30 @@ #include "misc/Interval.h" #include "dfa/DFA.h" #include "Lexer.h" +#include "internal/Synchronization.h" #include "dfa/DFAState.h" #include "atn/LexerATNConfig.h" #include "atn/LexerActionExecutor.h" -#include "atn/EmptyPredictionContext.h" #include "atn/LexerATNSimulator.h" -#define DEBUG_ATN 0 -#define DEBUG_DFA 0 +#ifndef LEXER_DEBUG_ATN +#define LEXER_DEBUG_ATN 0 +#endif +#ifndef LEXER_DEBUG_DFA +#define LEXER_DEBUG_DFA 0 +#endif using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; using namespace antlrcpp; -LexerATNSimulator::SimState::~SimState() { -} - void LexerATNSimulator::SimState::reset() { - index = INVALID_INDEX; - line = 0; - charPos = INVALID_INDEX; - dfaState = nullptr; // Don't delete. It's just a reference. + *this = SimState(); } -void LexerATNSimulator::SimState::InitializeInstanceFields() { - index = INVALID_INDEX; - line = 0; - charPos = INVALID_INDEX; -} - -int LexerATNSimulator::match_calls = 0; - - LexerATNSimulator::LexerATNSimulator(const ATN &atn, std::vector &decisionToDFA, PredictionContextCache &sharedContextCache) : LexerATNSimulator(nullptr, atn, decisionToDFA, sharedContextCache) { @@ -69,7 +68,6 @@ void LexerATNSimulator::copyState(LexerATNSimulator *simulator) { } size_t LexerATNSimulator::match(CharStream *input, size_t mode) { - match_calls++; _mode = mode; ssize_t mark = input->mark(); @@ -80,10 +78,15 @@ size_t LexerATNSimulator::match(CharStream *input, size_t mode) { _startIndex = input->index(); _prevAccept.reset(); const dfa::DFA &dfa = _decisionToDFA[mode]; - if (dfa.s0 == nullptr) { + dfa::DFAState* s0; + { + SharedLock stateLock(atn._stateMutex); + s0 = dfa.s0; + } + if (s0 == nullptr) { return matchATN(input); } else { - return execATN(input, dfa.s0); + return execATN(input, s0); } } @@ -111,10 +114,7 @@ size_t LexerATNSimulator::matchATN(CharStream *input) { bool suppressEdge = s0_closure->hasSemanticContext; s0_closure->hasSemanticContext = false; - dfa::DFAState *next = addDFAState(s0_closure.release()); - if (!suppressEdge) { - _decisionToDFA[_mode].s0 = next; - } + dfa::DFAState *next = addDFAState(s0_closure.release(), suppressEdge); size_t predict = execATN(input, next); @@ -182,10 +182,10 @@ size_t LexerATNSimulator::execATN(CharStream *input, dfa::DFAState *ds0) { dfa::DFAState *LexerATNSimulator::getExistingTargetState(dfa::DFAState *s, size_t t) { dfa::DFAState* retval = nullptr; - _edgeLock.readLock(); + SharedLock edgeLock(atn._edgeMutex); if (t <= MAX_DFA_EDGE) { auto iterator = s->edges.find(t - MIN_DFA_EDGE); -#if DEBUG_ATN == 1 +#if LEXER_DEBUG_ATN == 1 if (iterator != s->edges.end()) { std::cout << std::string("reuse state ") << s->stateNumber << std::string(" edge to ") << iterator->second->stateNumber << std::endl; } @@ -194,7 +194,6 @@ dfa::DFAState *LexerATNSimulator::getExistingTargetState(dfa::DFAState *s, size_ if (iterator != s->edges.end()) retval = iterator->second; } - _edgeLock.readUnlock(); return retval; } @@ -209,9 +208,9 @@ dfa::DFAState *LexerATNSimulator::computeTargetState(CharStream *input, dfa::DFA if (!reach->hasSemanticContext) { // we got nowhere on t, don't throw out this knowledge; it'd // cause a failover from DFA later. - delete reach; addDFAEdge(s, t, ERROR.get()); } + delete reach; // stop when we can't match any more char return ERROR.get(); @@ -223,8 +222,7 @@ dfa::DFAState *LexerATNSimulator::computeTargetState(CharStream *input, dfa::DFA size_t LexerATNSimulator::failOrAccept(CharStream *input, ATNConfigSet *reach, size_t t) { if (_prevAccept.dfaState != nullptr) { - Ref lexerActionExecutor = _prevAccept.dfaState->lexerActionExecutor; - accept(input, lexerActionExecutor, _startIndex, _prevAccept.index, _prevAccept.line, _prevAccept.charPos); + accept(input, _prevAccept.dfaState->lexerActionExecutor, _startIndex, _prevAccept.index, _prevAccept.line, _prevAccept.charPos); return _prevAccept.dfaState->prediction; } else { // if no accept and EOF is first char, return EOF @@ -241,29 +239,29 @@ void LexerATNSimulator::getReachableConfigSet(CharStream *input, ATNConfigSet *c // than a config that already reached an accept state for the same rule size_t skipAlt = ATN::INVALID_ALT_NUMBER; - for (auto c : closure_->configs) { + for (const auto &c : closure_->configs) { bool currentAltReachedAcceptState = c->alt == skipAlt; if (currentAltReachedAcceptState && (std::static_pointer_cast(c))->hasPassedThroughNonGreedyDecision()) { continue; } -#if DEBUG_ATN == 1 +#if LEXER_DEBUG_ATN == 1 std::cout << "testing " << getTokenName((int)t) << " at " << c->toString(true) << std::endl; #endif size_t n = c->state->transitions.size(); for (size_t ti = 0; ti < n; ti++) { // for each transition - Transition *trans = c->state->transitions[ti]; + const Transition *trans = c->state->transitions[ti].get(); ATNState *target = getReachableTarget(trans, (int)t); if (target != nullptr) { - Ref lexerActionExecutor = std::static_pointer_cast(c)->getLexerActionExecutor(); + auto lexerActionExecutor = downCast(*c).getLexerActionExecutor(); if (lexerActionExecutor != nullptr) { lexerActionExecutor = lexerActionExecutor->fixOffsetBeforeMatch((int)input->index() - (int)_startIndex); } bool treatEofAsEpsilon = t == Token::EOF; - Ref config = std::make_shared(std::static_pointer_cast(c), - target, lexerActionExecutor); + Ref config = std::make_shared(downCast(*c), + target, std::move(lexerActionExecutor)); if (closure(input, config, reach, currentAltReachedAcceptState, true, treatEofAsEpsilon)) { // any remaining configs for this alt have a lower priority than @@ -276,9 +274,9 @@ void LexerATNSimulator::getReachableConfigSet(CharStream *input, ATNConfigSet *c } } -void LexerATNSimulator::accept(CharStream *input, const Ref &lexerActionExecutor, size_t /*startIndex*/, +void LexerATNSimulator::accept(CharStream *input, const Ref &lexerActionExecutor, size_t /*startIndex*/, size_t index, size_t line, size_t charPos) { -#if DEBUG_ATN == 1 +#if LEXER_DEBUG_ATN == 1 std::cout << "ACTION "; std::cout << toString(lexerActionExecutor) << std::endl; #endif @@ -293,7 +291,7 @@ void LexerATNSimulator::accept(CharStream *input, const Ref } } -atn::ATNState *LexerATNSimulator::getReachableTarget(Transition *trans, size_t t) { +atn::ATNState *LexerATNSimulator::getReachableTarget(const Transition *trans, size_t t) { if (trans->matches(t, Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE)) { return trans->target; } @@ -302,7 +300,7 @@ atn::ATNState *LexerATNSimulator::getReachableTarget(Transition *trans, size_t t } std::unique_ptr LexerATNSimulator::computeStartState(CharStream *input, ATNState *p) { - Ref initialContext = PredictionContext::EMPTY; // ml: the purpose of this assignment is unclear + Ref initialContext = PredictionContext::EMPTY; // ml: the purpose of this assignment is unclear std::unique_ptr configs(new OrderedATNConfigSet()); for (size_t i = 0; i < p->transitions.size(); i++) { ATNState *target = p->transitions[i]->target; @@ -315,12 +313,12 @@ std::unique_ptr LexerATNSimulator::computeStartState(CharStream *i bool LexerATNSimulator::closure(CharStream *input, const Ref &config, ATNConfigSet *configs, bool currentAltReachedAcceptState, bool speculative, bool treatEofAsEpsilon) { -#if DEBUG_ATN == 1 +#if LEXER_DEBUG_ATN == 1 std::cout << "closure(" << config->toString(true) << ")" << std::endl; #endif - if (is(config->state)) { -#if DEBUG_ATN == 1 + if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) { +#if LEXER_DEBUG_ATN == 1 if (_recog != nullptr) { std::cout << "closure at " << _recog->getRuleNames()[config->state->ruleIndex] << " rule stop " << config << std::endl; } else { @@ -333,7 +331,7 @@ bool LexerATNSimulator::closure(CharStream *input, const Ref &co configs->add(config); return true; } else { - configs->add(std::make_shared(config, config->state, PredictionContext::EMPTY)); + configs->add(std::make_shared(*config, config->state, PredictionContext::EMPTY)); currentAltReachedAcceptState = true; } } @@ -341,9 +339,9 @@ bool LexerATNSimulator::closure(CharStream *input, const Ref &co if (config->context != nullptr && !config->context->isEmpty()) { for (size_t i = 0; i < config->context->size(); i++) { if (config->context->getReturnState(i) != PredictionContext::EMPTY_RETURN_STATE) { - std::weak_ptr newContext = config->context->getParent(i); // "pop" return state + Ref newContext = config->context->getParent(i); // "pop" return state ATNState *returnState = atn.states[config->context->getReturnState(i)]; - Ref c = std::make_shared(config, returnState, newContext.lock()); + Ref c = std::make_shared(*config, returnState, newContext); currentAltReachedAcceptState = closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon); } } @@ -361,7 +359,7 @@ bool LexerATNSimulator::closure(CharStream *input, const Ref &co ATNState *p = config->state; for (size_t i = 0; i < p->transitions.size(); i++) { - Transition *t = p->transitions[i]; + const Transition *t = p->transitions[i].get(); Ref c = getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon); if (c != nullptr) { currentAltReachedAcceptState = closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon); @@ -371,22 +369,22 @@ bool LexerATNSimulator::closure(CharStream *input, const Ref &co return currentAltReachedAcceptState; } -Ref LexerATNSimulator::getEpsilonTarget(CharStream *input, const Ref &config, Transition *t, +Ref LexerATNSimulator::getEpsilonTarget(CharStream *input, const Ref &config, const Transition *t, ATNConfigSet *configs, bool speculative, bool treatEofAsEpsilon) { Ref c = nullptr; - switch (t->getSerializationType()) { - case Transition::RULE: { - RuleTransition *ruleTransition = static_cast(t); - Ref newContext = SingletonPredictionContext::create(config->context, ruleTransition->followState->stateNumber); - c = std::make_shared(config, t->target, newContext); + switch (t->getTransitionType()) { + case TransitionType::RULE: { + const RuleTransition *ruleTransition = static_cast(t); + Ref newContext = SingletonPredictionContext::create(config->context, ruleTransition->followState->stateNumber); + c = std::make_shared(*config, t->target, newContext); break; } - case Transition::PRECEDENCE: + case TransitionType::PRECEDENCE: throw UnsupportedOperationException("Precedence predicates are not supported in lexers."); - case Transition::PREDICATE: { + case TransitionType::PREDICATE: { /* Track traversing semantic predicates. If we traverse, we cannot add a DFA state for this "reach" computation because the DFA would not test the predicate again in the @@ -405,20 +403,20 @@ Ref LexerATNSimulator::getEpsilonTarget(CharStream *input, const states reached by traversing predicates. Since this is when we test them, we cannot cash the DFA state target of ID. */ - PredicateTransition *pt = static_cast(t); + const PredicateTransition *pt = static_cast(t); -#if DEBUG_ATN == 1 - std::cout << "EVAL rule " << pt->ruleIndex << ":" << pt->predIndex << std::endl; +#if LEXER_DEBUG_ATN == 1 + std::cout << "EVAL rule " << pt->getRuleIndex() << ":" << pt->getPredIndex() << std::endl; #endif configs->hasSemanticContext = true; - if (evaluatePredicate(input, pt->ruleIndex, pt->predIndex, speculative)) { - c = std::make_shared(config, t->target); + if (evaluatePredicate(input, pt->getRuleIndex(), pt->getPredIndex(), speculative)) { + c = std::make_shared(*config, t->target); } break; } - case Transition::ACTION: + case TransitionType::ACTION: if (config->context == nullptr|| config->context->hasEmptyPath()) { // execute actions anywhere in the start rule for a token. // @@ -432,27 +430,27 @@ Ref LexerATNSimulator::getEpsilonTarget(CharStream *input, const // getEpsilonTarget to return two configurations, so // additional modifications are needed before we can support // the split operation. - Ref lexerActionExecutor = LexerActionExecutor::append(config->getLexerActionExecutor(), - atn.lexerActions[static_cast(t)->actionIndex]); - c = std::make_shared(config, t->target, lexerActionExecutor); + auto lexerActionExecutor = LexerActionExecutor::append(config->getLexerActionExecutor(), + atn.lexerActions[static_cast(t)->actionIndex]); + c = std::make_shared(*config, t->target, std::move(lexerActionExecutor)); break; } else { // ignore actions in referenced rules - c = std::make_shared(config, t->target); + c = std::make_shared(*config, t->target); break; } - case Transition::EPSILON: - c = std::make_shared(config, t->target); + case TransitionType::EPSILON: + c = std::make_shared(*config, t->target); break; - case Transition::ATOM: - case Transition::RANGE: - case Transition::SET: + case TransitionType::ATOM: + case TransitionType::RANGE: + case TransitionType::SET: if (treatEofAsEpsilon) { if (t->matches(Token::EOF, Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE)) { - c = std::make_shared(config, t->target); + c = std::make_shared(*config, t->target); break; } } @@ -530,12 +528,15 @@ void LexerATNSimulator::addDFAEdge(dfa::DFAState *p, size_t t, dfa::DFAState *q) return; } - _edgeLock.writeLock(); + UniqueLock edgeLock(atn._edgeMutex); p->edges[t - MIN_DFA_EDGE] = q; // connect - _edgeLock.writeUnlock(); } dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs) { + return addDFAState(configs, true); +} + +dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs, bool suppressEdge) { /* the lexer evaluates predicates on-the-fly; by this point configs * should not contain any configurations with unevaluated predicates. */ @@ -543,8 +544,8 @@ dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs) { dfa::DFAState *proposed = new dfa::DFAState(std::unique_ptr(configs)); /* mem-check: managed by the DFA or deleted below */ Ref firstConfigWithRuleStopState = nullptr; - for (auto &c : configs->configs) { - if (is(c->state)) { + for (const auto &c : configs->configs) { + if (RuleStopState::is(c->state)) { firstConfigWithRuleStopState = c; break; } @@ -552,28 +553,29 @@ dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs) { if (firstConfigWithRuleStopState != nullptr) { proposed->isAcceptState = true; - proposed->lexerActionExecutor = std::dynamic_pointer_cast(firstConfigWithRuleStopState)->getLexerActionExecutor(); + proposed->lexerActionExecutor = downCast(*firstConfigWithRuleStopState).getLexerActionExecutor(); proposed->prediction = atn.ruleToTokenType[firstConfigWithRuleStopState->state->ruleIndex]; } dfa::DFA &dfa = _decisionToDFA[_mode]; - _stateLock.writeLock(); - if (!dfa.states.empty()) { - auto iterator = dfa.states.find(proposed); - if (iterator != dfa.states.end()) { + { + UniqueLock stateLock(atn._stateMutex); + auto [existing, inserted] = dfa.states.insert(proposed); + if (!inserted) { delete proposed; - _stateLock.writeUnlock(); - return *iterator; + proposed = *existing; + } else { + // Previously we did a lookup, then set fields, then inserted. It was `dfa.states.size()`, + // since we already inserted we need to subtract one. + proposed->stateNumber = static_cast(dfa.states.size() - 1); + proposed->configs->setReadonly(true); + } + if (!suppressEdge) { + dfa.s0 = proposed; } } - proposed->stateNumber = (int)dfa.states.size(); - proposed->configs->setReadonly(true); - - dfa.states.insert(proposed); - _stateLock.writeUnlock(); - return proposed; } diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h old mode 100755 new mode 100644 index fa113f849c..c387bc61d1 --- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h @@ -5,7 +5,15 @@ #pragma once +#include +#include +#include +#include +#include + #include "atn/ATNSimulator.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "atn/LexerATNConfig.h" #include "atn/ATNConfigSet.h" @@ -15,31 +23,18 @@ namespace atn { /// "dup" of ParserInterpreter class ANTLR4CPP_PUBLIC LexerATNSimulator : public ATNSimulator { protected: - class SimState { - public: - virtual ~SimState(); - - protected: - size_t index; - size_t line; - size_t charPos; - dfa::DFAState *dfaState; - virtual void reset(); - friend class LexerATNSimulator; - - private: - void InitializeInstanceFields(); - - public: - SimState() { - InitializeInstanceFields(); - } - }; + struct ANTLR4CPP_PUBLIC SimState final { + size_t index = INVALID_INDEX; + size_t line = 0; + size_t charPos = INVALID_INDEX; + dfa::DFAState *dfaState = nullptr; + void reset(); + }; public: - static const size_t MIN_DFA_EDGE = 0; - static const size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN + static constexpr size_t MIN_DFA_EDGE = 0; + static constexpr size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN protected: /// @@ -82,17 +77,15 @@ namespace atn { SimState _prevAccept; public: - static int match_calls; - LexerATNSimulator(const ATN &atn, std::vector &decisionToDFA, PredictionContextCache &sharedContextCache); LexerATNSimulator(Lexer *recog, const ATN &atn, std::vector &decisionToDFA, PredictionContextCache &sharedContextCache); - virtual ~LexerATNSimulator () {} + ~LexerATNSimulator() override = default; virtual void copyState(LexerATNSimulator *simulator); virtual size_t match(CharStream *input, size_t mode); - virtual void reset() override; + void reset() override; - virtual void clearDFA() override; + void clearDFA() override; protected: virtual size_t matchATN(CharStream *input); @@ -133,10 +126,10 @@ namespace atn { void getReachableConfigSet(CharStream *input, ATNConfigSet *closure_, // closure_ as we have a closure() already ATNConfigSet *reach, size_t t); - virtual void accept(CharStream *input, const Ref &lexerActionExecutor, size_t startIndex, size_t index, + virtual void accept(CharStream *input, const Ref &lexerActionExecutor, size_t startIndex, size_t index, size_t line, size_t charPos); - virtual ATNState *getReachableTarget(Transition *trans, size_t t); + virtual ATNState *getReachableTarget(const Transition *trans, size_t t); virtual std::unique_ptr computeStartState(CharStream *input, ATNState *p); @@ -153,7 +146,7 @@ namespace atn { bool currentAltReachedAcceptState, bool speculative, bool treatEofAsEpsilon); // side-effect: can alter configs.hasSemanticContext - virtual Ref getEpsilonTarget(CharStream *input, const Ref &config, Transition *t, + virtual Ref getEpsilonTarget(CharStream *input, const Ref &config, const Transition *t, ATNConfigSet *configs, bool speculative, bool treatEofAsEpsilon); /// @@ -190,6 +183,8 @@ namespace atn { /// virtual dfa::DFAState *addDFAState(ATNConfigSet *configs); + virtual dfa::DFAState *addDFAState(ATNConfigSet *configs, bool suppressEdge); + public: dfa::DFA& getDFA(size_t mode); diff --git a/runtime/Cpp/runtime/src/atn/LexerAction.cpp b/runtime/Cpp/runtime/src/atn/LexerAction.cpp index 983ba6d52c..9ff8ee12a7 100644 --- a/runtime/Cpp/runtime/src/atn/LexerAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerAction.cpp @@ -1,9 +1,17 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - +#include +#include #include "LexerAction.h" -antlr4::atn::LexerAction::~LexerAction() { +using namespace antlr4::atn; + +size_t LexerAction::hashCode() const { + auto hash = cachedHashCode(); + if (hash == 0) { + hash = hashCodeImpl(); + if (hash == 0) { + hash = std::numeric_limits::max(); + } + _hashCode.store(hash, std::memory_order_relaxed); + } + return hash; } diff --git a/runtime/Cpp/runtime/src/atn/LexerAction.h b/runtime/Cpp/runtime/src/atn/LexerAction.h old mode 100755 new mode 100644 index 8e833b669f..daf2d3af27 --- a/runtime/Cpp/runtime/src/atn/LexerAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerAction.h @@ -5,6 +5,8 @@ #pragma once +#include +#include #include "atn/LexerActionType.h" #include "antlr4-common.h" @@ -21,13 +23,17 @@ namespace atn { /// class ANTLR4CPP_PUBLIC LexerAction { public: - virtual ~LexerAction(); + virtual ~LexerAction() = default; /// /// Gets the serialization type of the lexer action. /// /// The serialization type of the lexer action. - virtual LexerActionType getActionType() const = 0; + /// + /// IMPORTANT: Unlike Java, this returns LexerActionType::INDEXED_CUSTOM for instances of + /// LexerIndexedCustomAction. If you need the wrapped action type, use + /// LexerIndexedCustomAction::getAction()->getActionType(). + LexerActionType getActionType() const { return _actionType; } /// /// Gets whether the lexer action is position-dependent. Position-dependent @@ -42,7 +48,7 @@ namespace atn { /// {@code true} if the lexer action semantics can be affected by the /// position of the input at the time it is executed; /// otherwise, {@code false}. - virtual bool isPositionDependent() const = 0; + bool isPositionDependent() const { return _positionDependent; } /// /// Execute the lexer action in the context of the specified . @@ -51,16 +57,46 @@ namespace atn { /// positioned correctly prior to calling this method. /// /// The lexer instance. - virtual void execute(Lexer *lexer) = 0; + virtual void execute(Lexer *lexer) const = 0; - virtual size_t hashCode() const = 0; - virtual bool operator == (const LexerAction &obj) const = 0; - virtual bool operator != (const LexerAction &obj) const { - return !(*this == obj); - } + size_t hashCode() const; + + virtual bool equals(const LexerAction &other) const = 0; virtual std::string toString() const = 0; + + protected: + LexerAction(LexerActionType actionType, bool positionDependent) + : _actionType(actionType), _hashCode(0), _positionDependent(positionDependent) {} + + virtual size_t hashCodeImpl() const = 0; + + size_t cachedHashCode() const { return _hashCode.load(std::memory_order_relaxed); } + + private: + const LexerActionType _actionType; + mutable std::atomic _hashCode; + const bool _positionDependent; + }; + + inline bool operator==(const LexerAction &lhs, const LexerAction &rhs) { + return lhs.equals(rhs); + } + + inline bool operator!=(const LexerAction &lhs, const LexerAction &rhs) { + return !operator==(lhs, rhs); + } + +} // namespace atn +} // namespace antlr4 + +namespace std { + + template <> + struct hash<::antlr4::atn::LexerAction> { + size_t operator()(const ::antlr4::atn::LexerAction &lexerAction) const { + return lexerAction.hashCode(); + } }; -} // namespace atn -} // namespace antlr4 +} // namespace std diff --git a/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp b/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp old mode 100755 new mode 100644 index 1ae510f751..e8ef1e4ddd --- a/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp @@ -3,10 +3,18 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "misc/MurmurHash.h" +#include "antlr4-common.h" #include "atn/LexerIndexedCustomAction.h" +#include "atn/HashUtils.h" #include "support/CPPUtils.h" #include "support/Arrays.h" +#include "support/Casts.h" #include "atn/LexerActionExecutor.h" @@ -15,48 +23,50 @@ using namespace antlr4::atn; using namespace antlr4::misc; using namespace antlrcpp; -LexerActionExecutor::LexerActionExecutor(const std::vector> &lexerActions) - : _lexerActions(lexerActions), _hashCode(generateHashCode()) { -} +namespace { + + bool lexerActionEqual(const Ref &lhs, const Ref &rhs) { + return *lhs == *rhs; + } -LexerActionExecutor::~LexerActionExecutor() { } -Ref LexerActionExecutor::append(Ref const& lexerActionExecutor, - Ref const& lexerAction) { +LexerActionExecutor::LexerActionExecutor(std::vector> lexerActions) + : _lexerActions(std::move(lexerActions)), _hashCode(0) {} + +Ref LexerActionExecutor::append(const Ref &lexerActionExecutor, + Ref lexerAction) { if (lexerActionExecutor == nullptr) { - return std::make_shared(std::vector> { lexerAction }); + return std::make_shared(std::vector>{ std::move(lexerAction) }); } - - std::vector> lexerActions = lexerActionExecutor->_lexerActions; // Make a copy. - lexerActions.push_back(lexerAction); - return std::make_shared(lexerActions); + std::vector> lexerActions; + lexerActions.reserve(lexerActionExecutor->_lexerActions.size() + 1); + lexerActions.insert(lexerActions.begin(), lexerActionExecutor->_lexerActions.begin(), lexerActionExecutor->_lexerActions.end()); + lexerActions.push_back(std::move(lexerAction)); + return std::make_shared(std::move(lexerActions)); } -Ref LexerActionExecutor::fixOffsetBeforeMatch(int offset) { - std::vector> updatedLexerActions; +Ref LexerActionExecutor::fixOffsetBeforeMatch(int offset) const { + std::vector> updatedLexerActions; for (size_t i = 0; i < _lexerActions.size(); i++) { - if (_lexerActions[i]->isPositionDependent() && !is(_lexerActions[i])) { + if (_lexerActions[i]->isPositionDependent() && !LexerIndexedCustomAction::is(*_lexerActions[i])) { if (updatedLexerActions.empty()) { updatedLexerActions = _lexerActions; // Make a copy. } - updatedLexerActions[i] = std::make_shared(offset, _lexerActions[i]); } } - if (updatedLexerActions.empty()) { return shared_from_this(); } - - return std::make_shared(updatedLexerActions); + return std::make_shared(std::move(updatedLexerActions)); } -std::vector> LexerActionExecutor::getLexerActions() const { +const std::vector>& LexerActionExecutor::getLexerActions() const { return _lexerActions; } -void LexerActionExecutor::execute(Lexer *lexer, CharStream *input, size_t startIndex) { +void LexerActionExecutor::execute(Lexer *lexer, CharStream *input, size_t startIndex) const { bool requiresSeek = false; size_t stopIndex = input->index(); @@ -65,43 +75,40 @@ void LexerActionExecutor::execute(Lexer *lexer, CharStream *input, size_t startI input->seek(stopIndex); } }); - for (auto lexerAction : _lexerActions) { - if (is(lexerAction)) { - int offset = (std::static_pointer_cast(lexerAction))->getOffset(); + for (const auto &lexerAction : _lexerActions) { + if (LexerIndexedCustomAction::is(*lexerAction)) { + int offset = downCast(*lexerAction).getOffset(); input->seek(startIndex + offset); - lexerAction = std::static_pointer_cast(lexerAction)->getAction(); requiresSeek = (startIndex + offset) != stopIndex; } else if (lexerAction->isPositionDependent()) { input->seek(stopIndex); requiresSeek = false; } - lexerAction->execute(lexer); } } size_t LexerActionExecutor::hashCode() const { - return _hashCode; -} - -bool LexerActionExecutor::operator == (const LexerActionExecutor &obj) const { - if (&obj == this) { - return true; + auto hash = _hashCode.load(std::memory_order_relaxed); + if (hash == 0) { + hash = MurmurHash::initialize(); + for (const auto &lexerAction : _lexerActions) { + hash = MurmurHash::update(hash, lexerAction); + } + hash = MurmurHash::finish(hash, _lexerActions.size()); + if (hash == 0) { + hash = std::numeric_limits::max(); + } + _hashCode.store(hash, std::memory_order_relaxed); } - - return _hashCode == obj._hashCode && Arrays::equals(_lexerActions, obj._lexerActions); -} - -bool LexerActionExecutor::operator != (const LexerActionExecutor &obj) const { - return !operator==(obj); + return hash; } -size_t LexerActionExecutor::generateHashCode() const { - size_t hash = MurmurHash::initialize(); - for (auto lexerAction : _lexerActions) { - hash = MurmurHash::update(hash, lexerAction); +bool LexerActionExecutor::equals(const LexerActionExecutor &other) const { + if (this == std::addressof(other)) { + return true; } - hash = MurmurHash::finish(hash, _lexerActions.size()); - - return hash; + return cachedHashCodeEqual(_hashCode.load(std::memory_order_relaxed), other._hashCode.load(std::memory_order_relaxed)) && + _lexerActions.size() == other._lexerActions.size() && + std::equal(_lexerActions.begin(), _lexerActions.end(), other._lexerActions.begin(), lexerActionEqual); } diff --git a/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h b/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h old mode 100755 new mode 100644 index 488b54c01d..209d37e9af --- a/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h +++ b/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "CharStream.h" +#include "antlr4-common.h" #include "atn/LexerAction.h" namespace antlr4 { @@ -17,13 +20,12 @@ namespace atn { /// The executor tracks position information for position-dependent lexer actions /// efficiently, ensuring that actions appearing only at the end of the rule do /// not cause bloating of the created for the lexer. - class ANTLR4CPP_PUBLIC LexerActionExecutor : public std::enable_shared_from_this { + class ANTLR4CPP_PUBLIC LexerActionExecutor final : public std::enable_shared_from_this { public: /// /// Constructs an executor for a sequence of actions. /// The lexer actions to execute. - LexerActionExecutor(const std::vector> &lexerActions); - virtual ~LexerActionExecutor(); + explicit LexerActionExecutor(std::vector> lexerActions); /// /// Creates a which executes the actions for @@ -39,8 +41,8 @@ namespace atn { /// /// A for executing the combine actions /// of {@code lexerActionExecutor} and {@code lexerAction}. - static Ref append(Ref const& lexerActionExecutor, - Ref const& lexerAction); + static Ref append(const Ref &lexerActionExecutor, + Ref lexerAction); /// /// Creates a which encodes the current offset @@ -70,12 +72,12 @@ namespace atn { /// /// A which stores input stream offsets /// for all position-dependent lexer actions. - virtual Ref fixOffsetBeforeMatch(int offset); + Ref fixOffsetBeforeMatch(int offset) const; /// /// Gets the lexer actions to be executed by this executor. /// The lexer actions to be executed by this executor. - virtual std::vector> getLexerActions() const; + const std::vector>& getLexerActions() const; /// /// Execute the actions encapsulated by this executor within the context of a @@ -95,21 +97,35 @@ namespace atn { /// The token start index. This value may be passed to /// to set the {@code input} position to the beginning /// of the token. - virtual void execute(Lexer *lexer, CharStream *input, size_t startIndex); + void execute(Lexer *lexer, CharStream *input, size_t startIndex) const; - virtual size_t hashCode() const; - virtual bool operator == (const LexerActionExecutor &obj) const; - virtual bool operator != (const LexerActionExecutor &obj) const; + size_t hashCode() const; + + bool equals(const LexerActionExecutor &other) const; private: - const std::vector> _lexerActions; + const std::vector> _lexerActions; + mutable std::atomic _hashCode; + }; + + inline bool operator==(const LexerActionExecutor &lhs, const LexerActionExecutor &rhs) { + return lhs.equals(rhs); + } + + inline bool operator!=(const LexerActionExecutor &lhs, const LexerActionExecutor &rhs) { + return !operator==(lhs, rhs); + } + +} // namespace atn +} // namespace antlr4 - /// Caches the result of since the hash code is an element - /// of the performance-critical operation. - const size_t _hashCode; +namespace std { - size_t generateHashCode() const; + template <> + struct hash<::antlr4::atn::LexerActionExecutor> { + size_t operator()(const ::antlr4::atn::LexerActionExecutor &lexerActionExecutor) const { + return lexerActionExecutor.hashCode(); + } }; -} // namespace atn -} // namespace antlr4 +} // namespace std diff --git a/runtime/Cpp/runtime/src/atn/LexerActionType.h b/runtime/Cpp/runtime/src/atn/LexerActionType.h old mode 100755 new mode 100644 index a72f15c4b5..cc07c94ad8 --- a/runtime/Cpp/runtime/src/atn/LexerActionType.h +++ b/runtime/Cpp/runtime/src/atn/LexerActionType.h @@ -5,6 +5,7 @@ #pragma once +#include #include "antlr4-common.h" namespace antlr4 { @@ -20,7 +21,7 @@ namespace atn { /// /// The type of a action. /// - CHANNEL, + CHANNEL = 0, /// /// The type of a action. /// @@ -49,6 +50,8 @@ namespace atn { /// The type of a action. /// TYPE, + + INDEXED_CUSTOM, }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp old mode 100755 new mode 100644 index 959beab3d5..a43f71f8fd --- a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp @@ -3,53 +3,44 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" #include "Lexer.h" +#include "support/Casts.h" #include "atn/LexerChannelAction.h" using namespace antlr4::atn; using namespace antlr4::misc; +using namespace antlrcpp; -LexerChannelAction::LexerChannelAction(int channel) : _channel(channel) { -} - -int LexerChannelAction::getChannel() const { - return _channel; -} - -LexerActionType LexerChannelAction::getActionType() const { - return LexerActionType::CHANNEL; -} +LexerChannelAction::LexerChannelAction(int channel) + : LexerAction(LexerActionType::CHANNEL, false), _channel(channel) {} -bool LexerChannelAction::isPositionDependent() const { - return false; +void LexerChannelAction::execute(Lexer *lexer) const { + lexer->setChannel(getChannel()); } -void LexerChannelAction::execute(Lexer *lexer) { - lexer->setChannel(_channel); -} - -size_t LexerChannelAction::hashCode() const { +size_t LexerChannelAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); - hash = MurmurHash::update(hash, _channel); + hash = MurmurHash::update(hash, getChannel()); return MurmurHash::finish(hash, 2); } -bool LexerChannelAction::operator == (const LexerAction &obj) const { - if (&obj == this) { +bool LexerChannelAction::equals(const LexerAction &other) const { + if (this == std::addressof(other)) { return true; } - - const LexerChannelAction *action = dynamic_cast(&obj); - if (action == nullptr) { + if (getActionType() != other.getActionType()) { return false; } - - return _channel == action->_channel; + const auto &lexerAction = downCast(other); + return getChannel() == lexerAction.getChannel(); } std::string LexerChannelAction::toString() const { - return "channel(" + std::to_string(_channel) + ")"; + return "channel(" + std::to_string(getChannel()) + ")"; } diff --git a/runtime/Cpp/runtime/src/atn/LexerChannelAction.h b/runtime/Cpp/runtime/src/atn/LexerChannelAction.h old mode 100755 new mode 100644 index 73e3a26bf6..ab8ea6bb71 --- a/runtime/Cpp/runtime/src/atn/LexerChannelAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerChannelAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -22,26 +25,20 @@ namespace atn { /// class ANTLR4CPP_PUBLIC LexerChannelAction final : public LexerAction { public: + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::CHANNEL; } + + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } + /// /// Constructs a new {@code channel} action with the specified channel value. /// The channel value to pass to . - LexerChannelAction(int channel); + explicit LexerChannelAction(int channel); /// /// Gets the channel to use for the created by the lexer. /// /// The channel to use for the created by the lexer. - int getChannel() const; - - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; - - /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + int getChannel() const { return _channel; } /// /// {@inheritDoc} @@ -49,11 +46,13 @@ namespace atn { /// This action is implemented by calling with the /// value provided by . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &other) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: const int _channel; diff --git a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp old mode 100755 new mode 100644 index 1e977a310d..742d185cd7 --- a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp @@ -3,60 +3,46 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" -#include "support/CPPUtils.h" #include "Lexer.h" +#include "support/Casts.h" #include "atn/LexerCustomAction.h" using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; +using namespace antlrcpp; -LexerCustomAction::LexerCustomAction(size_t ruleIndex, size_t actionIndex) : _ruleIndex(ruleIndex), _actionIndex(actionIndex) { -} - -size_t LexerCustomAction::getRuleIndex() const { - return _ruleIndex; -} +LexerCustomAction::LexerCustomAction(size_t ruleIndex, size_t actionIndex) + : LexerAction(LexerActionType::CUSTOM, true), _ruleIndex(ruleIndex), _actionIndex(actionIndex) {} -size_t LexerCustomAction::getActionIndex() const { - return _actionIndex; +void LexerCustomAction::execute(Lexer *lexer) const { + lexer->action(nullptr, getRuleIndex(), getActionIndex()); } -LexerActionType LexerCustomAction::getActionType() const { - return LexerActionType::CUSTOM; -} - -bool LexerCustomAction::isPositionDependent() const { - return true; -} - -void LexerCustomAction::execute(Lexer *lexer) { - lexer->action(nullptr, _ruleIndex, _actionIndex); -} - -size_t LexerCustomAction::hashCode() const { +size_t LexerCustomAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); - hash = MurmurHash::update(hash, _ruleIndex); - hash = MurmurHash::update(hash, _actionIndex); + hash = MurmurHash::update(hash, getRuleIndex()); + hash = MurmurHash::update(hash, getActionIndex()); return MurmurHash::finish(hash, 3); } -bool LexerCustomAction::operator == (const LexerAction &obj) const { - if (&obj == this) { +bool LexerCustomAction::equals(const LexerAction &other) const { + if (this == std::addressof(other)) { return true; } - - const LexerCustomAction *action = dynamic_cast(&obj); - if (action == nullptr) { + if (getActionType() != other.getActionType()) { return false; } - - return _ruleIndex == action->_ruleIndex && _actionIndex == action->_actionIndex; + const auto &lexerAction = downCast(other); + return getRuleIndex() == lexerAction.getRuleIndex() && getActionIndex() == lexerAction.getActionIndex(); } std::string LexerCustomAction::toString() const { - return antlrcpp::toString(this); + return "custom(" + std::to_string(getRuleIndex()) + ", " + std::to_string(getActionIndex()) + ")"; } diff --git a/runtime/Cpp/runtime/src/atn/LexerCustomAction.h b/runtime/Cpp/runtime/src/atn/LexerCustomAction.h old mode 100755 new mode 100644 index bd1c5d3566..4faf239e02 --- a/runtime/Cpp/runtime/src/atn/LexerCustomAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerCustomAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -26,6 +29,10 @@ namespace atn { /// class ANTLR4CPP_PUBLIC LexerCustomAction final : public LexerAction { public: + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::CUSTOM; } + + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } + /// /// Constructs a custom lexer action with the specified rule and action /// indexes. @@ -40,31 +47,13 @@ namespace atn { /// Gets the rule index to use for calls to . /// /// The rule index for the custom action. - size_t getRuleIndex() const; + size_t getRuleIndex() const { return _ruleIndex; } /// /// Gets the action index to use for calls to . /// /// The action index for the custom action. - size_t getActionIndex() const; - - /// - /// {@inheritDoc} - /// - /// This method returns . - virtual LexerActionType getActionType() const override; - - /// - /// Gets whether the lexer action is position-dependent. Position-dependent - /// actions may have different semantics depending on the - /// index at the time the action is executed. - /// - /// Custom actions are position-dependent since they may represent a - /// user-defined embedded action which makes calls to methods like - /// . - /// - /// This method returns {@code true}. - virtual bool isPositionDependent() const override; + size_t getActionIndex() const { return _actionIndex; } /// /// {@inheritDoc} @@ -72,11 +61,13 @@ namespace atn { /// Custom actions are implemented by calling with the /// appropriate rule and action indexes. /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &other) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: const size_t _ruleIndex; diff --git a/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp b/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp old mode 100755 new mode 100644 index 9ea396a4c5..84d816e047 --- a/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp @@ -3,61 +3,53 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include "atn/HashUtils.h" +#include "antlr4-common.h" #include "misc/MurmurHash.h" #include "Lexer.h" #include "support/CPPUtils.h" +#include "support/Casts.h" #include "atn/LexerIndexedCustomAction.h" using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; +using namespace antlrcpp; -LexerIndexedCustomAction::LexerIndexedCustomAction(int offset, Ref const& action) - : _offset(offset), _action(action) { -} - -int LexerIndexedCustomAction::getOffset() const { - return _offset; -} +LexerIndexedCustomAction::LexerIndexedCustomAction(int offset, Ref action) + : LexerAction(LexerActionType::INDEXED_CUSTOM, true), _action(std::move(action)), _offset(offset) {} -Ref LexerIndexedCustomAction::getAction() const { - return _action; -} - -LexerActionType LexerIndexedCustomAction::getActionType() const { - return _action->getActionType(); -} - -bool LexerIndexedCustomAction::isPositionDependent() const { - return true; -} - -void LexerIndexedCustomAction::execute(Lexer *lexer) { +void LexerIndexedCustomAction::execute(Lexer *lexer) const { // assume the input stream position was properly set by the calling code - _action->execute(lexer); + getAction()->execute(lexer); } -size_t LexerIndexedCustomAction::hashCode() const { +size_t LexerIndexedCustomAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, _offset); - hash = MurmurHash::update(hash, _action); - return MurmurHash::finish(hash, 2); + hash = MurmurHash::update(hash, static_cast(getActionType())); + hash = MurmurHash::update(hash, getOffset()); + hash = MurmurHash::update(hash, getAction()); + return MurmurHash::finish(hash, 3); } -bool LexerIndexedCustomAction::operator == (const LexerAction &obj) const { - if (&obj == this) { +bool LexerIndexedCustomAction::equals(const LexerAction &other) const { + if (this == std::addressof(other)) { return true; } - - const LexerIndexedCustomAction *action = dynamic_cast(&obj); - if (action == nullptr) { + if (getActionType() != other.getActionType()) { return false; } - - return _offset == action->_offset && *_action == *action->_action; + const auto &lexerAction = downCast(other); + return getOffset() == lexerAction.getOffset() && + cachedHashCodeEqual(cachedHashCode(), lexerAction.cachedHashCode()) && + *getAction() == *lexerAction.getAction(); } std::string LexerIndexedCustomAction::toString() const { - return antlrcpp::toString(this); + return "indexedCustom(" + std::to_string(getOffset()) + ", " + getAction()->toString() + ")"; } diff --git a/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h b/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h old mode 100755 new mode 100644 index bb371f8e3a..87092c2664 --- a/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "RuleContext.h" +#include "antlr4-common.h" #include "atn/LexerAction.h" namespace antlr4 { @@ -26,6 +29,10 @@ namespace atn { /// class ANTLR4CPP_PUBLIC LexerIndexedCustomAction final : public LexerAction { public: + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::INDEXED_CUSTOM; } + + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } + /// /// Constructs a new indexed custom action by associating a character offset /// with a . @@ -38,7 +45,7 @@ namespace atn { /// executed. /// The lexer action to execute at a particular offset in the /// input . - LexerIndexedCustomAction(int offset, Ref const& action); + LexerIndexedCustomAction(int offset, Ref action); /// /// Gets the location in the input at which the lexer @@ -47,34 +54,24 @@ namespace atn { /// /// The location in the input at which the lexer /// action should be executed. - int getOffset() const; + int getOffset() const { return _offset; } /// /// Gets the lexer action to execute. /// /// A object which executes the lexer action. - Ref getAction() const; - - /// - /// {@inheritDoc} - /// - /// This method returns the result of calling - /// on the returned by . - virtual LexerActionType getActionType() const override; + const Ref& getAction() const { return _action; } - /// - /// {@inheritDoc} - /// This method returns {@code true}. - virtual bool isPositionDependent() const override; + void execute(Lexer *lexer) const override; + bool equals(const LexerAction &other) const override; + std::string toString() const override; - virtual void execute(Lexer *lexer) override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: + const Ref _action; const int _offset; - const Ref _action; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp old mode 100755 new mode 100644 index 0bda8b7afe..c3c6963360 --- a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp @@ -3,54 +3,44 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" #include "Lexer.h" +#include "support/Casts.h" #include "atn/LexerModeAction.h" using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; +using namespace antlrcpp; -LexerModeAction::LexerModeAction(int mode) : _mode(mode) { -} - -int LexerModeAction::getMode() { - return _mode; -} - -LexerActionType LexerModeAction::getActionType() const { - return LexerActionType::MODE; -} +LexerModeAction::LexerModeAction(int mode) : LexerAction(LexerActionType::MODE, false), _mode(mode) {} -bool LexerModeAction::isPositionDependent() const { - return false; +void LexerModeAction::execute(Lexer *lexer) const { + lexer->setMode(getMode()); } -void LexerModeAction::execute(Lexer *lexer) { - lexer->setMode(_mode); -} - -size_t LexerModeAction::hashCode() const { +size_t LexerModeAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); - hash = MurmurHash::update(hash, _mode); + hash = MurmurHash::update(hash, getMode()); return MurmurHash::finish(hash, 2); } -bool LexerModeAction::operator == (const LexerAction &obj) const { - if (&obj == this) { +bool LexerModeAction::equals(const LexerAction &other) const { + if (this == std::addressof(other)) { return true; } - - const LexerModeAction *action = dynamic_cast(&obj); - if (action == nullptr) { + if (getActionType() != other.getActionType()) { return false; } - - return _mode == action->_mode; + const auto &lexerAction = downCast(other); + return getMode() == lexerAction.getMode(); } std::string LexerModeAction::toString() const { - return "mode(" + std::to_string(_mode) + ")"; + return "mode(" + std::to_string(getMode()) + ")"; } diff --git a/runtime/Cpp/runtime/src/atn/LexerModeAction.h b/runtime/Cpp/runtime/src/atn/LexerModeAction.h old mode 100755 new mode 100644 index 49a858b371..0f2a3d5425 --- a/runtime/Cpp/runtime/src/atn/LexerModeAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerModeAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -20,26 +23,20 @@ namespace atn { /// class ANTLR4CPP_PUBLIC LexerModeAction final : public LexerAction { public: + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::MODE; } + + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } + /// /// Constructs a new {@code mode} action with the specified mode value. /// The mode value to pass to . - LexerModeAction(int mode); + explicit LexerModeAction(int mode); /// /// Get the lexer mode this action should transition the lexer to. /// /// The lexer mode for this {@code mode} command. - int getMode(); - - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; - - /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + int getMode() const { return _mode; } /// /// {@inheritDoc} @@ -47,11 +44,13 @@ namespace atn { /// This action is implemented by calling with the /// value provided by . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &obj) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: const int _mode; diff --git a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp old mode 100755 new mode 100644 index 99b2dd99bf..8dc7ed4e04 --- a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" +#include "antlr4-common.h" #include "Lexer.h" #include "atn/LexerMoreAction.h" @@ -12,34 +16,23 @@ using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; -const Ref LexerMoreAction::getInstance() { - static Ref instance(new LexerMoreAction()); +const Ref& LexerMoreAction::getInstance() { + static const Ref instance(new LexerMoreAction()); return instance; } -LexerMoreAction::LexerMoreAction() { -} - -LexerActionType LexerMoreAction::getActionType() const { - return LexerActionType::MORE; -} - -bool LexerMoreAction::isPositionDependent() const { - return false; -} - -void LexerMoreAction::execute(Lexer *lexer) { +void LexerMoreAction::execute(Lexer *lexer) const { lexer->more(); } -size_t LexerMoreAction::hashCode() const { +size_t LexerMoreAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } -bool LexerMoreAction::operator == (const LexerAction &obj) const { - return &obj == this; +bool LexerMoreAction::equals(const LexerAction &other) const { + return this == std::addressof(other); } std::string LexerMoreAction::toString() const { diff --git a/runtime/Cpp/runtime/src/atn/LexerMoreAction.h b/runtime/Cpp/runtime/src/atn/LexerMoreAction.h old mode 100755 new mode 100644 index ee3b2aa617..3fbcc1bbf5 --- a/runtime/Cpp/runtime/src/atn/LexerMoreAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerMoreAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -22,35 +25,31 @@ namespace atn { ///
    class ANTLR4CPP_PUBLIC LexerMoreAction final : public LexerAction { public: - /// - /// Provides a singleton instance of this parameterless lexer action. - /// - static const Ref getInstance(); + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::MORE; } - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + /// Provides a singleton instance of this parameterless lexer action. + ///
    + static const Ref& getInstance(); /// /// {@inheritDoc} /// /// This action is implemented by calling . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &obj) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: /// Constructs the singleton instance of the lexer {@code more} command. - LexerMoreAction(); + LexerMoreAction() : LexerAction(LexerActionType::MORE, false) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp old mode 100755 new mode 100644 index cac0996f48..732eac527e --- a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" +#include "antlr4-common.h" #include "Lexer.h" #include "atn/LexerPopModeAction.h" @@ -12,34 +16,23 @@ using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; -const Ref LexerPopModeAction::getInstance() { - static Ref instance(new LexerPopModeAction()); +const Ref& LexerPopModeAction::getInstance() { + static const Ref instance(new LexerPopModeAction()); return instance; } -LexerPopModeAction::LexerPopModeAction() { -} - -LexerActionType LexerPopModeAction::getActionType() const { - return LexerActionType::POP_MODE; -} - -bool LexerPopModeAction::isPositionDependent() const { - return false; -} - -void LexerPopModeAction::execute(Lexer *lexer) { +void LexerPopModeAction::execute(Lexer *lexer) const { lexer->popMode(); } -size_t LexerPopModeAction::hashCode() const { +size_t LexerPopModeAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } -bool LexerPopModeAction::operator == (const LexerAction &obj) const { - return &obj == this; +bool LexerPopModeAction::equals(const LexerAction &other) const { + return this == std::addressof(other); } std::string LexerPopModeAction::toString() const { diff --git a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h old mode 100755 new mode 100644 index 497305c963..4ab95bfea7 --- a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -22,35 +25,31 @@ namespace atn { ///
    class ANTLR4CPP_PUBLIC LexerPopModeAction final : public LexerAction { public: - /// - /// Provides a singleton instance of this parameterless lexer action. - /// - static const Ref getInstance(); + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::POP_MODE; } - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + /// Provides a singleton instance of this parameterless lexer action. + ///
    + static const Ref& getInstance(); /// /// {@inheritDoc} /// /// This action is implemented by calling . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &other) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: /// Constructs the singleton instance of the lexer {@code popMode} command. - LexerPopModeAction(); + LexerPopModeAction() : LexerAction(LexerActionType::POP_MODE, false) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp old mode 100755 new mode 100644 index 017abed04c..54f36d83cc --- a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp @@ -3,54 +3,44 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" #include "Lexer.h" +#include "support/Casts.h" #include "atn/LexerPushModeAction.h" using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; +using namespace antlrcpp; -LexerPushModeAction::LexerPushModeAction(int mode) : _mode(mode) { -} - -int LexerPushModeAction::getMode() const { - return _mode; -} - -LexerActionType LexerPushModeAction::getActionType() const { - return LexerActionType::PUSH_MODE; -} +LexerPushModeAction::LexerPushModeAction(int mode) : LexerAction(LexerActionType::PUSH_MODE, false), _mode(mode) {} -bool LexerPushModeAction::isPositionDependent() const { - return false; +void LexerPushModeAction::execute(Lexer *lexer) const { + lexer->pushMode(getMode()); } -void LexerPushModeAction::execute(Lexer *lexer) { - lexer->pushMode(_mode); -} - -size_t LexerPushModeAction::hashCode() const { +size_t LexerPushModeAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); - hash = MurmurHash::update(hash, _mode); + hash = MurmurHash::update(hash, getMode()); return MurmurHash::finish(hash, 2); } -bool LexerPushModeAction::operator == (const LexerAction &obj) const { - if (&obj == this) { +bool LexerPushModeAction::equals(const LexerAction &other) const { + if (this == std::addressof(other)) { return true; } - - const LexerPushModeAction *action = dynamic_cast(&obj); - if (action == nullptr) { + if (getActionType() != other.getActionType()) { return false; } - - return _mode == action->_mode; + const auto &lexerAction = downCast(other); + return getMode() == lexerAction.getMode(); } std::string LexerPushModeAction::toString() const { - return "pushMode(" + std::to_string(_mode) + ")"; + return "pushMode(" + std::to_string(getMode()) + ")"; } diff --git a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h old mode 100755 new mode 100644 index 43cb888c79..de2f720653 --- a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -20,26 +23,20 @@ namespace atn { ///
    class ANTLR4CPP_PUBLIC LexerPushModeAction final : public LexerAction { public: + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::PUSH_MODE; } + + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } + /// /// Constructs a new {@code pushMode} action with the specified mode value. /// The mode value to pass to . - LexerPushModeAction(int mode); + explicit LexerPushModeAction(int mode); /// /// Get the lexer mode this action should transition the lexer to. /// /// The lexer mode for this {@code pushMode} command. - int getMode() const; - - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; - - /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + int getMode() const { return _mode; } /// /// {@inheritDoc} @@ -47,11 +44,13 @@ namespace atn { /// This action is implemented by calling with the /// value provided by . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &obj) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: const int _mode; diff --git a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp old mode 100755 new mode 100644 index 01947ce78c..bb2792995b --- a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" +#include "antlr4-common.h" #include "Lexer.h" #include "atn/LexerSkipAction.h" @@ -12,34 +16,23 @@ using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; -const Ref LexerSkipAction::getInstance() { - static Ref instance(new LexerSkipAction()); +const Ref& LexerSkipAction::getInstance() { + static const Ref instance(new LexerSkipAction()); return instance; } -LexerSkipAction::LexerSkipAction() { -} - -LexerActionType LexerSkipAction::getActionType() const { - return LexerActionType::SKIP; -} - -bool LexerSkipAction::isPositionDependent() const { - return false; -} - -void LexerSkipAction::execute(Lexer *lexer) { +void LexerSkipAction::execute(Lexer *lexer) const { lexer->skip(); } -size_t LexerSkipAction::hashCode() const { +size_t LexerSkipAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } -bool LexerSkipAction::operator == (const LexerAction &obj) const { - return &obj == this; +bool LexerSkipAction::equals(const LexerAction &other) const { + return this == std::addressof(other); } std::string LexerSkipAction::toString() const { diff --git a/runtime/Cpp/runtime/src/atn/LexerSkipAction.h b/runtime/Cpp/runtime/src/atn/LexerSkipAction.h old mode 100755 new mode 100644 index 5bd2e1c166..3f0abca721 --- a/runtime/Cpp/runtime/src/atn/LexerSkipAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerSkipAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerAction.h" +#include "antlr4-common.h" #include "atn/LexerActionType.h" namespace antlr4 { @@ -22,33 +25,29 @@ namespace atn { ///
    class ANTLR4CPP_PUBLIC LexerSkipAction final : public LexerAction { public: - /// Provides a singleton instance of this parameterless lexer action. - static const Ref getInstance(); + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::SKIP; } - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } - /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + /// Provides a singleton instance of this parameterless lexer action. + static const Ref& getInstance(); /// /// {@inheritDoc} /// /// This action is implemented by calling . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &obj) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: /// Constructs the singleton instance of the lexer {@code skip} command. - LexerSkipAction(); + LexerSkipAction() : LexerAction(LexerActionType::SKIP, false) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp old mode 100755 new mode 100644 index 006778adc6..2a19512582 --- a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp @@ -3,54 +3,44 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "misc/MurmurHash.h" #include "Lexer.h" +#include "support/Casts.h" #include "atn/LexerTypeAction.h" using namespace antlr4; using namespace antlr4::atn; using namespace antlr4::misc; +using namespace antlrcpp; -LexerTypeAction::LexerTypeAction(int type) : _type(type) { -} - -int LexerTypeAction::getType() const { - return _type; -} - -LexerActionType LexerTypeAction::getActionType() const { - return LexerActionType::TYPE; -} +LexerTypeAction::LexerTypeAction(int type) : LexerAction(LexerActionType::TYPE, false), _type(type) {} -bool LexerTypeAction::isPositionDependent() const { - return false; +void LexerTypeAction::execute(Lexer *lexer) const { + lexer->setType(getType()); } -void LexerTypeAction::execute(Lexer *lexer) { - lexer->setType(_type); -} - -size_t LexerTypeAction::hashCode() const { +size_t LexerTypeAction::hashCodeImpl() const { size_t hash = MurmurHash::initialize(); hash = MurmurHash::update(hash, static_cast(getActionType())); - hash = MurmurHash::update(hash, _type); + hash = MurmurHash::update(hash, getType()); return MurmurHash::finish(hash, 2); } -bool LexerTypeAction::operator == (const LexerAction &obj) const { - if (&obj == this) { +bool LexerTypeAction::equals(const LexerAction &other) const { + if (this == std::addressof(other)) { return true; } - - const LexerTypeAction *action = dynamic_cast(&obj); - if (action == nullptr) { + if (getActionType() != other.getActionType()) { return false; } - - return _type == action->_type; + const auto &lexerAction = downCast(other); + return getType() == lexerAction.getType(); } std::string LexerTypeAction::toString() const { - return "type(" + std::to_string(_type) + ")"; + return "type(" + std::to_string(getType()) + ")"; } diff --git a/runtime/Cpp/runtime/src/atn/LexerTypeAction.h b/runtime/Cpp/runtime/src/atn/LexerTypeAction.h old mode 100755 new mode 100644 index 1c4a8a17c1..3cc28b31e0 --- a/runtime/Cpp/runtime/src/atn/LexerTypeAction.h +++ b/runtime/Cpp/runtime/src/atn/LexerTypeAction.h @@ -5,7 +5,10 @@ #pragma once +#include +#include #include "atn/LexerActionType.h" +#include "antlr4-common.h" #include "atn/LexerAction.h" namespace antlr4 { @@ -13,27 +16,21 @@ namespace atn { /// Implements the {@code type} lexer action by calling /// with the assigned type. - class ANTLR4CPP_PUBLIC LexerTypeAction : public LexerAction { + class ANTLR4CPP_PUBLIC LexerTypeAction final : public LexerAction { public: + static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::TYPE; } + + static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); } + /// /// Constructs a new {@code type} action with the specified token type value. /// The type to assign to the token using . - LexerTypeAction(int type); + explicit LexerTypeAction(int type); /// /// Gets the type to assign to a token created by the lexer. /// The type to assign to a token created by the lexer. - virtual int getType() const; - - /// - /// {@inheritDoc} - /// This method returns . - virtual LexerActionType getActionType() const override; - - /// - /// {@inheritDoc} - /// This method returns {@code false}. - virtual bool isPositionDependent() const override; + int getType() const { return _type; } /// /// {@inheritDoc} @@ -41,11 +38,13 @@ namespace atn { /// This action is implemented by calling with the /// value provided by . /// - virtual void execute(Lexer *lexer) override; + void execute(Lexer *lexer) const override; + + bool equals(const LexerAction &obj) const override; + std::string toString() const override; - virtual size_t hashCode() const override; - virtual bool operator == (const LexerAction &obj) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; private: const int _type; diff --git a/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp b/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp old mode 100755 new mode 100644 index aa3f9124c7..82539813ef --- a/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/LookaheadEventInfo.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h b/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h old mode 100755 new mode 100644 index f5fc24fde2..a7c09b6b58 --- a/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h +++ b/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "atn/DecisionEventInfo.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/atn/LoopEndState.cpp b/runtime/Cpp/runtime/src/atn/LoopEndState.cpp deleted file mode 100755 index e00889856e..0000000000 --- a/runtime/Cpp/runtime/src/atn/LoopEndState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/LoopEndState.h" - -using namespace antlr4::atn; - -size_t LoopEndState::getStateType() { - return LOOP_END; -} diff --git a/runtime/Cpp/runtime/src/atn/LoopEndState.h b/runtime/Cpp/runtime/src/atn/LoopEndState.h old mode 100755 new mode 100644 index c90efa398c..8441d29e0c --- a/runtime/Cpp/runtime/src/atn/LoopEndState.h +++ b/runtime/Cpp/runtime/src/atn/LoopEndState.h @@ -5,6 +5,8 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { @@ -13,9 +15,13 @@ namespace atn { /// Mark the end of a * or + loop. class ANTLR4CPP_PUBLIC LoopEndState final : public ATNState { public: + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::LOOP_END; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + ATNState *loopBackState = nullptr; - virtual size_t getStateType() override; + LoopEndState() : ATNState(ATNStateType::LOOP_END) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/Makefile b/runtime/Cpp/runtime/src/atn/Makefile deleted file mode 100644 index 480bd85929..0000000000 --- a/runtime/Cpp/runtime/src/atn/Makefile +++ /dev/null @@ -1,67 +0,0 @@ - -CXXFLAGS += -g -std=c++0x -Wall #-Wextra -CXXFLAGS += -I. -I../ -I../misc/ -I../tree/ -I../dfa/ \ - -I../../../../../antlrcpp/ - -#TODO LDFLAGS += ? - -ALL_CXXFLAGS = $(CPPFLAGS) $(CXXFLAGS) -ALL_LDFLAGS = $(LDFLAGS) - -# Escote's files -SRCS = \ - AbstractPredicateTransition.cpp \ - ActionTransition.cpp \ - ArrayPredictionContext.cpp \ - ATNDeserializationOptions.cpp \ - ATNDeserializer.cpp \ - ATNState.cpp \ - ATNType.cpp \ - AtomTransition.cpp \ - BasicBlockStartState.cpp \ - BasicState.cpp \ - BlockEndState.cpp \ - BlockStartState.cpp \ - DecisionState.cpp \ - EmptyPredictionContext.cpp \ - EpsilonTransition.cpp \ - LexerATNConfig.cpp \ - LoopEndState.cpp -# Escote's TODO: LL1Analyzer.cpp LexerATNSimulator.cpp ATNSimulator.cpp \ - ATNSerializer.cpp ATNConfigSet.cpp ATNConfig.cpp \ - ATN.cpp - -# Alejandro's files -SRCS += \ - NotSetTransition.cpp \ - OrderedATNConfigSet.cpp \ - PlusBlockStartState.cpp \ - PlusLoopbackState.cpp \ - PredicateTransition.cpp \ - PredictionMode.cpp \ - RangeTransition.cpp \ - RuleStartState.cpp \ - RuleStopState.cpp \ - RuleTransition.cpp \ - SemanticContext.cpp \ - SetTransition.cpp \ - SingletonPredictionContext.cpp \ - StarBlockStartState.cpp \ - StarLoopbackState.cpp \ - StarLoopEntryState.cpp \ - TokensStartState.cpp \ - Transition.cpp \ - WildcardTransition.cpp -# Alejandro's TODO: PredictionContext.cpp PredictionContextCache.cpp \ - PrecedencePredicateTransition.cpp ParserATNSimulator.cpp - -OBJS = $(SRCS:.cpp=.o) - -all: $(OBJS) - -%.o: %.cpp - $(CXX) -c $(ALL_CXXFLAGS) $< -o $@ - -clean: - $(RM) $(OBJS) - diff --git a/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp b/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp old mode 100755 new mode 100644 index b02910dd06..1d141a302a --- a/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp @@ -3,19 +3,18 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/NotSetTransition.h" +#include "atn/TransitionType.h" #include "atn/ATNState.h" #include "misc/IntervalSet.h" using namespace antlr4; using namespace antlr4::atn; -NotSetTransition::NotSetTransition(ATNState *target, const misc::IntervalSet &set) : SetTransition(target, set) { -} - -Transition::SerializationType NotSetTransition::getSerializationType() const { - return NOT_SET; -} +NotSetTransition::NotSetTransition(ATNState *target, misc::IntervalSet set) : SetTransition(TransitionType::NOT_SET, target, std::move(set)) {} bool NotSetTransition::matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const { return symbol >= minVocabSymbol && symbol <= maxVocabSymbol diff --git a/runtime/Cpp/runtime/src/atn/NotSetTransition.h b/runtime/Cpp/runtime/src/atn/NotSetTransition.h old mode 100755 new mode 100644 index 214fb06031..f28f23bca7 --- a/runtime/Cpp/runtime/src/atn/NotSetTransition.h +++ b/runtime/Cpp/runtime/src/atn/NotSetTransition.h @@ -5,6 +5,12 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/SetTransition.h" namespace antlr4 { @@ -12,13 +18,15 @@ namespace atn { class ANTLR4CPP_PUBLIC NotSetTransition final : public SetTransition { public: - NotSetTransition(ATNState *target, const misc::IntervalSet &set); + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::NOT_SET; } - virtual SerializationType getSerializationType() const override; + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + NotSetTransition(ATNState *target, misc::IntervalSet set); - virtual std::string toString() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + + std::string toString() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp b/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp old mode 100755 new mode 100644 index a731def936..5ccaed4978 --- a/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp +++ b/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp @@ -3,10 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "atn/OrderedATNConfigSet.h" using namespace antlr4::atn; -size_t OrderedATNConfigSet::getHash(ATNConfig *c) { - return c->hashCode(); +size_t OrderedATNConfigSet::hashCode(const ATNConfig &atnConfig) const { + return atnConfig.hashCode(); +} + +bool OrderedATNConfigSet::equals(const ATNConfig &lhs, const ATNConfig &rhs) const { + return lhs == rhs; } diff --git a/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h b/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h old mode 100755 new mode 100644 index 4ce43bb965..cc546bb471 --- a/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h +++ b/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h @@ -5,15 +5,22 @@ #pragma once +#include #include "atn/ATNConfigSet.h" +#include "antlr4-common.h" #include "atn/ATNConfig.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC OrderedATNConfigSet : public ATNConfigSet { - protected: - virtual size_t getHash(ATNConfig *c) override; + class ANTLR4CPP_PUBLIC OrderedATNConfigSet final : public ATNConfigSet { + public: + OrderedATNConfigSet() = default; + + private: + size_t hashCode(const ATNConfig &atnConfig) const override; + + bool equals(const ATNConfig &lhs, const ATNConfig &rhs) const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/ParseInfo.cpp b/runtime/Cpp/runtime/src/atn/ParseInfo.cpp old mode 100755 new mode 100644 index 95a89ac855..4d494922ea --- a/runtime/Cpp/runtime/src/atn/ParseInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/ParseInfo.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "atn/ProfilingATNSimulator.h" #include "dfa/DFA.h" diff --git a/runtime/Cpp/runtime/src/atn/ParseInfo.h b/runtime/Cpp/runtime/src/atn/ParseInfo.h old mode 100755 new mode 100644 index 7ced7de433..7f75849910 --- a/runtime/Cpp/runtime/src/atn/ParseInfo.h +++ b/runtime/Cpp/runtime/src/atn/ParseInfo.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "atn/DecisionInfo.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp old mode 100755 new mode 100644 index e0b85f695b..cf9f80bb97 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp @@ -3,24 +3,36 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include +#include +#include #include "dfa/DFA.h" +#include "atn/ATNStateType.h" +#include "atn/TransitionType.h" +#include "Token.h" +#include "antlr4-common.h" #include "NoViableAltException.h" #include "atn/DecisionState.h" #include "ParserRuleContext.h" #include "misc/IntervalSet.h" #include "Parser.h" #include "CommonTokenStream.h" -#include "atn/EmptyPredictionContext.h" #include "atn/NotSetTransition.h" #include "atn/AtomTransition.h" #include "atn/RuleTransition.h" #include "atn/PredicateTransition.h" #include "atn/PrecedencePredicateTransition.h" +#include "atn/SingletonPredictionContext.h" #include "atn/ActionTransition.h" #include "atn/EpsilonTransition.h" #include "atn/RuleStopState.h" #include "atn/ATNConfigSet.h" #include "atn/ATNConfig.h" +#include "internal/Synchronization.h" #include "atn/StarLoopEntryState.h" #include "atn/BlockStartState.h" @@ -31,17 +43,26 @@ #include "Vocabulary.h" #include "support/Arrays.h" +#include "support/Casts.h" #include "atn/ParserATNSimulator.h" +#ifndef DEBUG_ATN #define DEBUG_ATN 0 -#define DEBUG_LIST_ATN_DECISIONS 0 -#define DEBUG_DFA 0 +#endif +#ifndef TRACE_ATN_SIM +#define TRACE_ATN_SIM 0 +#endif +#ifndef DFA_DEBUG +#define DFA_DEBUG 0 +#endif +#ifndef RETRY_DEBUG #define RETRY_DEBUG 0 +#endif using namespace antlr4; using namespace antlr4::atn; - +using namespace antlr4::internal; using namespace antlrcpp; const bool ParserATNSimulator::TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = ParserATNSimulator::getLrLoopSetting(); @@ -53,7 +74,13 @@ ParserATNSimulator::ParserATNSimulator(const ATN &atn, std::vector &de ParserATNSimulator::ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, PredictionContextCache &sharedContextCache) -: ATNSimulator(atn, sharedContextCache), decisionToDFA(decisionToDFA), parser(parser) { +: ParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache, ParserATNSimulatorOptions()) {} + +ParserATNSimulator::ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, + PredictionContextCache &sharedContextCache, + const ParserATNSimulatorOptions &options) +: ATNSimulator(atn, sharedContextCache), decisionToDFA(decisionToDFA), parser(parser), + mergeCache(options.getPredictionContextMergeCacheOptions()) { InitializeInstanceFields(); } @@ -70,7 +97,7 @@ void ParserATNSimulator::clearDFA() { size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext) { -#if DEBUG_ATN == 1 || DEBUG_LIST_ATN_DECISIONS == 1 +#if DEBUG_ATN == 1 || TRACE_ATN_SIM == 1 std::cout << "adaptivePredict decision " << decision << " exec LA(1)==" << getLookaheadName(input) << " line " << input->LT(1)->getLine() << ":" << input->LT(1)->getCharPositionInLine() << std::endl; #endif @@ -87,28 +114,37 @@ size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, // Now we are certain to have a specific decision's DFA // But, do we still need an initial state? auto onExit = finally([this, input, index, m] { - mergeCache.clear(); // wack cache after each prediction + if (mergeCache.getOptions().getClearEveryN() != 0) { + if (++_mergeCacheCounter == mergeCache.getOptions().getClearEveryN()) { + mergeCache.clear(); + _mergeCacheCounter = 0; + } + } _dfa = nullptr; input->seek(index); input->release(m); }); dfa::DFAState *s0; - if (dfa.isPrecedenceDfa()) { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(parser->getPrecedence()); - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0; + { + SharedLock stateLock(atn._stateMutex); + if (dfa.isPrecedenceDfa()) { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + SharedLock edgeLock(atn._edgeMutex); + s0 = dfa.getPrecedenceStartState(parser->getPrecedence()); + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0; + } } if (s0 == nullptr) { - bool fullCtx = false; - std::unique_ptr s0_closure = computeStartState(dynamic_cast(dfa.atnStartState), - &ParserRuleContext::EMPTY, fullCtx); - - _stateLock.writeLock(); + auto s0_closure = computeStartState(dfa.atnStartState, &ParserRuleContext::EMPTY, false); + std::unique_ptr newState; + std::unique_ptr oldState; + UniqueLock stateLock(atn._stateMutex); + dfa::DFAState* ds0 = dfa.s0; if (dfa.isPrecedenceDfa()) { /* If this is a precedence DFA, we use applyPrecedenceFilter * to convert the computed start state to a precedence start @@ -116,26 +152,22 @@ size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, * appropriate start state for the precedence level rather * than simply setting DFA.s0. */ - dfa.s0->configs = std::move(s0_closure); // not used for prediction but useful to know start configs anyway - dfa::DFAState *newState = new dfa::DFAState(applyPrecedenceFilter(dfa.s0->configs.get())); /* mem-check: managed by the DFA or deleted below */ - s0 = addDFAState(dfa, newState); - dfa.setPrecedenceStartState(parser->getPrecedence(), s0, _edgeLock); - if (s0 != newState) { - delete newState; // If there was already a state with this config set we don't need the new one. - } + ds0->configs = std::move(s0_closure); // not used for prediction but useful to know start configs anyway + newState = std::make_unique(applyPrecedenceFilter(ds0->configs.get())); + s0 = addDFAState(dfa, newState.get()); + UniqueLock edgeLock(atn._edgeMutex); + dfa.setPrecedenceStartState(parser->getPrecedence(), s0); } else { - dfa::DFAState *newState = new dfa::DFAState(std::move(s0_closure)); /* mem-check: managed by the DFA or deleted below */ - s0 = addDFAState(dfa, newState); - - if (dfa.s0 != s0) { - delete dfa.s0; // Delete existing s0 DFA state, if there's any. + newState = std::make_unique(std::move(s0_closure)); + s0 = addDFAState(dfa, newState.get()); + if (ds0 != s0) { + oldState.reset(ds0); dfa.s0 = s0; } - if (s0 != newState) { - delete newState; // If there was already a state with this config set we don't need the new one. - } } - _stateLock.writeUnlock(); + if (s0 == newState.get()) { + newState.release(); + } } // We can start with an existing DFA. @@ -147,15 +179,16 @@ size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream *input, size_t startIndex, ParserRuleContext *outerContext) { -#if DEBUG_ATN == 1 || DEBUG_LIST_ATN_DECISIONS == 1 - std::cout << "execATN decision " << dfa.decision << " exec LA(1)==" << getLookaheadName(input) << +#if DEBUG_ATN == 1 || TRACE_ATN_SIM == 1 + std::cout << "execATN decision " << dfa.decision << ", DFA state " << s0->toString() << + ", LA(1)==" << getLookaheadName(input) << " line " << input->LT(1)->getLine() << ":" << input->LT(1)->getCharPositionInLine() << std::endl; #endif dfa::DFAState *previousD = s0; #if DEBUG_ATN == 1 - std::cout << "s0 = " << s0 << std::endl; + std::cout << "s0 = " << s0->toString() << std::endl; #endif size_t t = input->LA(1); @@ -215,12 +248,12 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream } } -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::cout << "ctx sensitive state " << outerContext << " in " << D << std::endl; #endif bool fullCtx = true; - Ref s0_closure = computeStartState(dfa.atnStartState, outerContext, fullCtx); + std::unique_ptr s0_closure = computeStartState(dfa.atnStartState, outerContext, fullCtx); reportAttemptingFullContext(dfa, conflictingAlts, D->configs.get(), startIndex, input->index()); size_t alt = execATNWithFullContext(dfa, D, s0_closure.get(), input, startIndex, outerContext); return alt; @@ -260,10 +293,9 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream dfa::DFAState *ParserATNSimulator::getExistingTargetState(dfa::DFAState *previousD, size_t t) { dfa::DFAState* retval; - _edgeLock.readLock(); + SharedLock edgeLock(atn._edgeMutex); auto iterator = previousD->edges.find(t); retval = (iterator == previousD->edges.end()) ? nullptr : iterator->second; - _edgeLock.readUnlock(); return retval; } @@ -315,7 +347,7 @@ void ParserATNSimulator::predicateDFAState(dfa::DFAState *dfaState, DecisionStat // Update DFA so reach becomes accept state with (predicate,alt) // pairs if preds found for conflicting alts BitSet altsToCollectPredsFrom = getConflictingAltsOrUniqueAlt(dfaState->configs.get()); - std::vector> altToPred = getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState->configs.get(), nalts); + std::vector> altToPred = getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState->configs.get(), nalts); if (!altToPred.empty()) { dfaState->predicates = getPredicatePredictions(altsToCollectPredsFrom, altToPred); dfaState->prediction = ATN::INVALID_ALT_NUMBER; // make sure we use preds @@ -330,6 +362,10 @@ void ParserATNSimulator::predicateDFAState(dfa::DFAState *dfaState, DecisionStat size_t ParserATNSimulator::execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0, TokenStream *input, size_t startIndex, ParserRuleContext *outerContext) { +#if TRACE_ATN_SIM == 1 + std::cout << "execATNWithFullContext " << s0->toString() << std::endl; +#endif + bool fullCtx = true; bool foundExactAmbig = false; @@ -452,8 +488,8 @@ std::unique_ptr ParserATNSimulator::computeReachSet(ATNConfigSet * std::vector> skippedStopStates; // First figure out where we can reach on input t - for (auto &c : closure_->configs) { - if (is(c->state)) { + for (const auto &c : closure_->configs) { + if (RuleStopState::is(c->state)) { assert(c->context->isEmpty()); if (fullCtx || t == Token::EOF) { @@ -465,10 +501,10 @@ std::unique_ptr ParserATNSimulator::computeReachSet(ATNConfigSet * size_t n = c->state->transitions.size(); for (size_t ti = 0; ti < n; ti++) { // for each transition - Transition *trans = c->state->transitions[ti]; + const Transition *trans = c->state->transitions[ti].get(); ATNState *target = getReachableTarget(trans, (int)t); if (target != nullptr) { - intermediate->add(std::make_shared(c, target), &mergeCache); + intermediate->add(std::make_shared(*c, target), &mergeCache); } } } @@ -507,7 +543,7 @@ std::unique_ptr ParserATNSimulator::computeReachSet(ATNConfigSet * ATNConfig::Set closureBusy; bool treatEofAsEpsilon = t == Token::EOF; - for (auto c : intermediate->configs) { + for (const auto &c : intermediate->configs) { closure(c, reach.get(), closureBusy, false, fullCtx, treatEofAsEpsilon); } } @@ -546,12 +582,16 @@ std::unique_ptr ParserATNSimulator::computeReachSet(ATNConfigSet * if (skippedStopStates.size() > 0 && (!fullCtx || !PredictionModeClass::hasConfigInRuleStopState(reach.get()))) { assert(!skippedStopStates.empty()); - for (auto c : skippedStopStates) { + for (const auto &c : skippedStopStates) { reach->add(c, &mergeCache); } } - if (reach->isEmpty()) { +#if DEBUG_ATN == 1 || TRACE_ATN_SIM == 1 + std::cout << "computeReachSet " << closure_->toString() << " -> " << reach->toString() << std::endl; +#endif + + if (reach->isEmpty()) { return nullptr; } return reach; @@ -565,8 +605,8 @@ ATNConfigSet* ParserATNSimulator::removeAllConfigsNotInRuleStopState(ATNConfigSe ATNConfigSet *result = new ATNConfigSet(configs->fullCtx); /* mem-check: released by caller */ - for (auto &config : configs->configs) { - if (is(config->state)) { + for (const auto &config : configs->configs) { + if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) { result->add(config, &mergeCache); continue; } @@ -575,7 +615,7 @@ ATNConfigSet* ParserATNSimulator::removeAllConfigsNotInRuleStopState(ATNConfigSe misc::IntervalSet nextTokens = atn.nextTokens(config->state); if (nextTokens.contains(Token::EPSILON)) { ATNState *endOfRuleState = atn.ruleToStopState[config->state->ruleIndex]; - result->add(std::make_shared(config, endOfRuleState), &mergeCache); + result->add(std::make_shared(*config, endOfRuleState), &mergeCache); } } } @@ -585,10 +625,14 @@ ATNConfigSet* ParserATNSimulator::removeAllConfigsNotInRuleStopState(ATNConfigSe std::unique_ptr ParserATNSimulator::computeStartState(ATNState *p, RuleContext *ctx, bool fullCtx) { // always at least the implicit call to start rule - Ref initialContext = PredictionContext::fromRuleContext(atn, ctx); + Ref initialContext = PredictionContext::fromRuleContext(atn, ctx); std::unique_ptr configs(new ATNConfigSet(fullCtx)); - for (size_t i = 0; i < p->transitions.size(); i++) { +#if DEBUG_ATN == 1 || TRACE_ATN_SIM == 1 + std::cout << "computeStartState from ATN state " << p->toString() << " initialContext=" << initialContext->toString() << std::endl; +#endif + + for (size_t i = 0; i < p->transitions.size(); i++) { ATNState *target = p->transitions[i]->target; Ref c = std::make_shared(target, (int)i + 1, initialContext); ATNConfig::Set closureBusy; @@ -599,15 +643,15 @@ std::unique_ptr ParserATNSimulator::computeStartState(ATNState *p, } std::unique_ptr ParserATNSimulator::applyPrecedenceFilter(ATNConfigSet *configs) { - std::map> statesFromAlt1; + std::map> statesFromAlt1; std::unique_ptr configSet(new ATNConfigSet(configs->fullCtx)); - for (Ref &config : configs->configs) { + for (const auto &config : configs->configs) { // handle alt 1 first if (config->alt != 1) { continue; } - Ref updatedContext = config->semanticContext->evalPrecedence(parser, _outerContext); + Ref updatedContext = config->semanticContext->evalPrecedence(parser, _outerContext); if (updatedContext == nullptr) { // the configuration was eliminated continue; @@ -615,14 +659,14 @@ std::unique_ptr ParserATNSimulator::applyPrecedenceFilter(ATNConfi statesFromAlt1[config->state->stateNumber] = config->context; if (updatedContext != config->semanticContext) { - configSet->add(std::make_shared(config, updatedContext), &mergeCache); + configSet->add(std::make_shared(*config, updatedContext), &mergeCache); } else { configSet->add(config, &mergeCache); } } - for (Ref &config : configs->configs) { + for (const auto &config : configs->configs) { if (config->alt == 1) { // already handled continue; @@ -646,7 +690,7 @@ std::unique_ptr ParserATNSimulator::applyPrecedenceFilter(ATNConfi return configSet; } -atn::ATNState* ParserATNSimulator::getReachableTarget(Transition *trans, size_t ttype) { +atn::ATNState* ParserATNSimulator::getReachableTarget(const Transition *trans, size_t ttype) { if (trans->matches(ttype, 0, atn.maxTokenType)) { return trans->target; } @@ -655,7 +699,7 @@ atn::ATNState* ParserATNSimulator::getReachableTarget(Transition *trans, size_t } // Note that caller must memory manage the returned value from this function -std::vector> ParserATNSimulator::getPredsForAmbigAlts(const BitSet &ambigAlts, +std::vector> ParserATNSimulator::getPredsForAmbigAlts(const BitSet &ambigAlts, ATNConfigSet *configs, size_t nalts) { // REACH=[1|1|[]|0:0, 1|2|[]|0:1] /* altToPred starts as an array of all null contexts. The entry at index i @@ -669,9 +713,9 @@ std::vector> ParserATNSimulator::getPredsForAmbigAlts(const * * From this, it is clear that NONE||anything==NONE. */ - std::vector> altToPred(nalts + 1); + std::vector> altToPred(nalts + 1); - for (auto &c : configs->configs) { + for (const auto &c : configs->configs) { if (ambigAlts.test(c->alt)) { altToPred[c->alt] = SemanticContext::Or(altToPred[c->alt], c->semanticContext); } @@ -680,8 +724,8 @@ std::vector> ParserATNSimulator::getPredsForAmbigAlts(const size_t nPredAlts = 0; for (size_t i = 1; i <= nalts; i++) { if (altToPred[i] == nullptr) { - altToPred[i] = SemanticContext::NONE; - } else if (altToPred[i] != SemanticContext::NONE) { + altToPred[i] = SemanticContext::Empty::Instance; + } else if (altToPred[i] != SemanticContext::Empty::Instance) { nPredAlts++; } } @@ -697,21 +741,19 @@ std::vector> ParserATNSimulator::getPredsForAmbigAlts(const return altToPred; } -std::vector ParserATNSimulator::getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, - std::vector> const& altToPred) { - bool containsPredicate = std::find_if(altToPred.begin(), altToPred.end(), [](Ref const context) { - return context != SemanticContext::NONE; +std::vector ParserATNSimulator::getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, + const std::vector> &altToPred) { + bool containsPredicate = std::find_if(altToPred.begin(), altToPred.end(), [](const Ref &context) { + return context != SemanticContext::Empty::Instance; }) != altToPred.end(); - if (!containsPredicate) - return {}; - - std::vector pairs; - for (size_t i = 1; i < altToPred.size(); ++i) { - Ref const& pred = altToPred[i]; - assert(pred != nullptr); // unpredicted is indicated by SemanticContext.NONE - - if (ambigAlts.test(i)) { - pairs.push_back(new dfa::DFAState::PredPrediction(pred, (int)i)); /* mem-check: managed by the DFAState it will be assigned to after return */ + std::vector pairs; + if (containsPredicate) { + for (size_t i = 1; i < altToPred.size(); i++) { + const auto &pred = altToPred[i]; + assert(pred != nullptr); // unpredicted is indicated by SemanticContext.NONE + if (ambigAlts.test(i)) { + pairs.emplace_back(pred, static_cast(i)); + } } } return pairs; @@ -739,8 +781,8 @@ size_t ParserATNSimulator::getSynValidOrSemInvalidAltThatFinishedDecisionEntryRu size_t ParserATNSimulator::getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs) { misc::IntervalSet alts; - for (auto &c : configs->configs) { - if (c->getOuterContextDepth() > 0 || (is(c->state) && c->context->hasEmptyPath())) { + for (const auto &c : configs->configs) { + if (c->getOuterContextDepth() > 0 || (c->state != nullptr && c->state->getStateType() == ATNStateType::RULE_STOP && c->context->hasEmptyPath())) { alts.add(c->alt); } } @@ -756,8 +798,8 @@ std::pair ParserATNSimulator::splitAccordingToSe // mem-check: both pointers must be freed by the caller. ATNConfigSet *succeeded(new ATNConfigSet(configs->fullCtx)); ATNConfigSet *failed(new ATNConfigSet(configs->fullCtx)); - for (Ref &c : configs->configs) { - if (c->semanticContext != SemanticContext::NONE) { + for (const auto &c : configs->configs) { + if (c->semanticContext != SemanticContext::Empty::Instance) { bool predicateEvaluationResult = evalSemanticContext(c->semanticContext, outerContext, c->alt, configs->fullCtx); if (predicateEvaluationResult) { succeeded->add(c); @@ -771,12 +813,12 @@ std::pair ParserATNSimulator::splitAccordingToSe return { succeeded, failed }; } -BitSet ParserATNSimulator::evalSemanticContext(std::vector predPredictions, +BitSet ParserATNSimulator::evalSemanticContext(const std::vector &predPredictions, ParserRuleContext *outerContext, bool complete) { BitSet predictions; - for (auto prediction : predPredictions) { - if (prediction->pred == SemanticContext::NONE) { - predictions.set(prediction->alt); + for (const auto &prediction : predPredictions) { + if (prediction.pred == SemanticContext::Empty::Instance) { + predictions.set(prediction.alt); if (!complete) { break; } @@ -784,17 +826,17 @@ BitSet ParserATNSimulator::evalSemanticContext(std::vectorpred, outerContext, prediction->alt, fullCtx); -#if DEBUG_ATN == 1 || DEBUG_DFA == 1 - std::cout << "eval pred " << prediction->toString() << " = " << predicateEvaluationResult << std::endl; + bool predicateEvaluationResult = evalSemanticContext(prediction.pred, outerContext, prediction.alt, fullCtx); +#if DEBUG_ATN == 1 || DFA_DEBUG == 1 + std::cout << "eval pred " << prediction.toString() << " = " << predicateEvaluationResult << std::endl; #endif if (predicateEvaluationResult) { -#if DEBUG_ATN == 1 || DEBUG_DFA == 1 - std::cout << "PREDICT " << prediction->alt << std::endl; +#if DEBUG_ATN == 1 || DFA_DEBUG == 1 + std::cout << "PREDICT " << prediction.alt << std::endl; #endif - predictions.set(prediction->alt); + predictions.set(prediction.alt); if (!complete) { break; } @@ -804,7 +846,7 @@ BitSet ParserATNSimulator::evalSemanticContext(std::vector const& pred, ParserRuleContext *parserCallStack, +bool ParserATNSimulator::evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, size_t /*alt*/, bool /*fullCtx*/) { return pred->eval(parser, parserCallStack); } @@ -820,18 +862,18 @@ void ParserATNSimulator::closure(Ref const& config, ATNConfigSet *con void ParserATNSimulator::closureCheckingStopState(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon) { -#if DEBUG_ATN == 1 +#if TRACE_ATN_SIM == 1 std::cout << "closure(" << config->toString(true) << ")" << std::endl; #endif - if (is(config->state)) { + if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) { // We hit rule end. If we have context info, use it // run thru all possible stack tops in ctx if (!config->context->isEmpty()) { for (size_t i = 0; i < config->context->size(); i++) { if (config->context->getReturnState(i) == PredictionContext::EMPTY_RETURN_STATE) { if (fullCtx) { - configs->add(std::make_shared(config, config->state, PredictionContext::EMPTY), &mergeCache); + configs->add(std::make_shared(*config, config->state, PredictionContext::EMPTY), &mergeCache); continue; } else { // we have no context info, just chase follow links (if greedy) @@ -843,8 +885,8 @@ void ParserATNSimulator::closureCheckingStopState(Ref const& config, continue; } ATNState *returnState = atn.states[config->context->getReturnState(i)]; - std::weak_ptr newContext = config->context->getParent(i); // "pop" return state - Ref c = std::make_shared(returnState, config->alt, newContext.lock(), config->semanticContext); + Ref newContext = config->context->getParent(i); // "pop" return state + Ref c = std::make_shared(returnState, config->alt, newContext, config->semanticContext); // While we have context to pop back from, we may have // gotten that context AFTER having falling off a rule. // Make sure we track that we are now out of context. @@ -884,12 +926,12 @@ void ParserATNSimulator::closure_(Ref const& config, ATNConfigSet *co if (i == 0 && canDropLoopEntryEdgeInLeftRecursiveRule(config.get())) continue; - Transition *t = p->transitions[i]; - bool continueCollecting = !is(t) && collectPredicates; + const Transition *t = p->transitions[i].get(); + bool continueCollecting = !(t != nullptr && t->getTransitionType() == TransitionType::ACTION) && collectPredicates; Ref c = getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon); if (c != nullptr) { int newDepth = depth; - if (is(config->state)) { + if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) { assert(!fullCtx); // target fell off end of rule; mark resulting c as having dipped into outer context @@ -905,7 +947,7 @@ void ParserATNSimulator::closure_(Ref const& config, ATNConfigSet *co closureBusy.insert(c); if (_dfa != nullptr && _dfa->isPrecedenceDfa()) { - size_t outermostPrecedenceReturn = dynamic_cast(t)->outermostPrecedenceReturn(); + size_t outermostPrecedenceReturn = downCast(t)->outermostPrecedenceReturn(); if (outermostPrecedenceReturn == _dfa->atnStartState->ruleIndex) { c->setPrecedenceFilterSuppressed(true); } @@ -926,7 +968,7 @@ void ParserATNSimulator::closure_(Ref const& config, ATNConfigSet *co assert(newDepth > INT_MIN); newDepth--; -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::cout << "dips into outer ctx: " << c << std::endl; #endif @@ -939,7 +981,7 @@ void ParserATNSimulator::closure_(Ref const& config, ATNConfigSet *co } } - if (is(t)) { + if (t != nullptr && t->getTransitionType() == TransitionType::RULE) { // latch when newDepth goes negative - once we step out of the entry context we can't return if (newDepth >= 0) { newDepth++; @@ -961,7 +1003,7 @@ bool ParserATNSimulator::canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *conf // left-recursion elimination. For efficiency, also check if // the context has an empty stack case. If so, it would mean // global FOLLOW so we can't perform optimization - if (p->getStateType() != ATNState::STAR_LOOP_ENTRY || + if (p->getStateType() != ATNStateType::STAR_LOOP_ENTRY || !((StarLoopEntryState *)p)->isPrecedenceDecision || // Are we the special loop entry/exit state? config->context->isEmpty() || // If SLL wildcard config->context->hasEmptyPath()) @@ -995,7 +1037,7 @@ bool ParserATNSimulator::canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *conf // Look for prefix op case like 'not expr', (' type ')' expr ATNState *returnStateTarget = returnState->transitions[0]->target; - if (returnState->getStateType() == ATNState::BLOCK_END && returnStateTarget == p) { + if (returnState->getStateType() == ATNStateType::BLOCK_END && returnStateTarget == p) { continue; } @@ -1014,7 +1056,7 @@ bool ParserATNSimulator::canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *conf // Look for complex prefix 'between expr and expr' case where 2nd expr's // return state points at block end state of (...)* internal block - if (returnStateTarget->getStateType() == ATNState::BLOCK_END && + if (returnStateTarget->getStateType() == ATNStateType::BLOCK_END && returnStateTarget->transitions.size() == 1 && returnStateTarget->transitions[0]->isEpsilon() && returnStateTarget->transitions[0]->target == p) @@ -1036,32 +1078,32 @@ std::string ParserATNSimulator::getRuleName(size_t index) { return ""; } -Ref ParserATNSimulator::getEpsilonTarget(Ref const& config, Transition *t, bool collectPredicates, +Ref ParserATNSimulator::getEpsilonTarget(Ref const& config, const Transition *t, bool collectPredicates, bool inContext, bool fullCtx, bool treatEofAsEpsilon) { - switch (t->getSerializationType()) { - case Transition::RULE: - return ruleTransition(config, static_cast(t)); + switch (t->getTransitionType()) { + case TransitionType::RULE: + return ruleTransition(config, static_cast(t)); - case Transition::PRECEDENCE: - return precedenceTransition(config, static_cast(t), collectPredicates, inContext, fullCtx); + case TransitionType::PRECEDENCE: + return precedenceTransition(config, static_cast(t), collectPredicates, inContext, fullCtx); - case Transition::PREDICATE: - return predTransition(config, static_cast(t), collectPredicates, inContext, fullCtx); + case TransitionType::PREDICATE: + return predTransition(config, static_cast(t), collectPredicates, inContext, fullCtx); - case Transition::ACTION: - return actionTransition(config, static_cast(t)); + case TransitionType::ACTION: + return actionTransition(config, static_cast(t)); - case Transition::EPSILON: - return std::make_shared(config, t->target); + case TransitionType::EPSILON: + return std::make_shared(*config, t->target); - case Transition::ATOM: - case Transition::RANGE: - case Transition::SET: + case TransitionType::ATOM: + case TransitionType::RANGE: + case TransitionType::SET: // EOF transitions act like epsilon transitions after the first EOF // transition is traversed if (treatEofAsEpsilon) { if (t->matches(Token::EOF, 0, 1)) { - return std::make_shared(config, t->target); + return std::make_shared(*config, t->target); } } @@ -1072,18 +1114,18 @@ Ref ParserATNSimulator::getEpsilonTarget(Ref const& config } } -Ref ParserATNSimulator::actionTransition(Ref const& config, ActionTransition *t) { -#if DEBUG_DFA == 1 +Ref ParserATNSimulator::actionTransition(Ref const& config, const ActionTransition *t) { +#if DFA_DEBUG == 1 std::cout << "ACTION edge " << t->ruleIndex << ":" << t->actionIndex << std::endl; #endif - return std::make_shared(config, t->target); + return std::make_shared(*config, t->target); } -Ref ParserATNSimulator::precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt, +Ref ParserATNSimulator::precedenceTransition(Ref const& config, const PrecedencePredicateTransition *pt, bool collectPredicates, bool inContext, bool fullCtx) { -#if DEBUG_DFA == 1 - std::cout << "PRED (collectPredicates=" << collectPredicates << ") " << pt->precedence << ">=_p" << ", ctx dependent=true" << std::endl; +#if DFA_DEBUG == 1 + std::cout << "PRED (collectPredicates=" << collectPredicates << ") " << pt->getPrecedence() << ">=_p" << ", ctx dependent=true" << std::endl; if (parser != nullptr) { std::cout << "context surrounding pred is " << Arrays::listToString(parser->getRuleInvocationStack(), ", ") << std::endl; } @@ -1091,7 +1133,7 @@ Ref ParserATNSimulator::precedenceTransition(Ref const& co Ref c; if (collectPredicates && inContext) { - Ref predicate = pt->getPredicate(); + const auto &predicate = pt->getPredicate(); if (fullCtx) { // In full context mode, we can evaluate predicates on-the-fly @@ -1100,38 +1142,38 @@ Ref ParserATNSimulator::precedenceTransition(Ref const& co // later during conflict resolution. size_t currentPosition = _input->index(); _input->seek(_startIndex); - bool predSucceeds = evalSemanticContext(pt->getPredicate(), _outerContext, config->alt, fullCtx); + bool predSucceeds = evalSemanticContext(predicate, _outerContext, config->alt, fullCtx); _input->seek(currentPosition); if (predSucceeds) { - c = std::make_shared(config, pt->target); // no pred context + c = std::make_shared(*config, pt->target); // no pred context } } else { - Ref newSemCtx = SemanticContext::And(config->semanticContext, predicate); - c = std::make_shared(config, pt->target, newSemCtx); + Ref newSemCtx = SemanticContext::And(config->semanticContext, predicate); + c = std::make_shared(*config, pt->target, std::move(newSemCtx)); } } else { - c = std::make_shared(config, pt->target); + c = std::make_shared(*config, pt->target); } -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::cout << "config from pred transition=" << c << std::endl; #endif return c; } -Ref ParserATNSimulator::predTransition(Ref const& config, PredicateTransition *pt, +Ref ParserATNSimulator::predTransition(Ref const& config, const PredicateTransition *pt, bool collectPredicates, bool inContext, bool fullCtx) { -#if DEBUG_DFA == 1 - std::cout << "PRED (collectPredicates=" << collectPredicates << ") " << pt->ruleIndex << ":" << pt->predIndex << ", ctx dependent=" << pt->isCtxDependent << std::endl; +#if DFA_DEBUG == 1 + std::cout << "PRED (collectPredicates=" << collectPredicates << ") " << pt->getRuleIndex() << ":" << pt->getPredIndex() << ", ctx dependent=" << pt->isCtxDependent() << std::endl; if (parser != nullptr) { std::cout << "context surrounding pred is " << Arrays::listToString(parser->getRuleInvocationStack(), ", ") << std::endl; } #endif Ref c = nullptr; - if (collectPredicates && (!pt->isCtxDependent || (pt->isCtxDependent && inContext))) { - Ref predicate = pt->getPredicate(); + if (collectPredicates && (!pt->isCtxDependent() || (pt->isCtxDependent() && inContext))) { + const auto &predicate = pt->getPredicate(); if (fullCtx) { // In full context mode, we can evaluate predicates on-the-fly // during closure, which dramatically reduces the size of @@ -1139,34 +1181,34 @@ Ref ParserATNSimulator::predTransition(Ref const& config, // later during conflict resolution. size_t currentPosition = _input->index(); _input->seek(_startIndex); - bool predSucceeds = evalSemanticContext(pt->getPredicate(), _outerContext, config->alt, fullCtx); + bool predSucceeds = evalSemanticContext(predicate, _outerContext, config->alt, fullCtx); _input->seek(currentPosition); if (predSucceeds) { - c = std::make_shared(config, pt->target); // no pred context + c = std::make_shared(*config, pt->target); // no pred context } } else { - Ref newSemCtx = SemanticContext::And(config->semanticContext, predicate); - c = std::make_shared(config, pt->target, newSemCtx); + Ref newSemCtx = SemanticContext::And(config->semanticContext, predicate); + c = std::make_shared(*config, pt->target, std::move(newSemCtx)); } } else { - c = std::make_shared(config, pt->target); + c = std::make_shared(*config, pt->target); } -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::cout << "config from pred transition=" << c << std::endl; #endif return c; } -Ref ParserATNSimulator::ruleTransition(Ref const& config, RuleTransition *t) { -#if DEBUG_DFA == 1 +Ref ParserATNSimulator::ruleTransition(Ref const& config, const RuleTransition *t) { +#if DFA_DEBUG == 1 std::cout << "CALL rule " << getRuleName(t->target->ruleIndex) << ", ctx=" << config->context << std::endl; #endif atn::ATNState *returnState = t->followState; - Ref newContext = SingletonPredictionContext::create(config->context, returnState->stateNumber); - return std::make_shared(config, t->target, newContext); + Ref newContext = SingletonPredictionContext::create(config->context, returnState->stateNumber); + return std::make_shared(*config, t->target, newContext); } BitSet ParserATNSimulator::getConflictingAlts(ATNConfigSet *configs) { @@ -1189,7 +1231,7 @@ std::string ParserATNSimulator::getTokenName(size_t t) { return "EOF"; } - const dfa::Vocabulary &vocabulary = parser != nullptr ? parser->getVocabulary() : dfa::Vocabulary::EMPTY_VOCABULARY; + const dfa::Vocabulary &vocabulary = parser != nullptr ? parser->getVocabulary() : dfa::Vocabulary(); std::string displayName = vocabulary.getDisplayName(t); if (displayName == std::to_string(t)) { return displayName; @@ -1204,18 +1246,20 @@ std::string ParserATNSimulator::getLookaheadName(TokenStream *input) { void ParserATNSimulator::dumpDeadEndConfigs(NoViableAltException &nvae) { std::cerr << "dead end configs: "; - for (auto c : nvae.getDeadEndConfigs()->configs) { + for (const auto &c : nvae.getDeadEndConfigs()->configs) { std::string trans = "no edges"; if (c->state->transitions.size() > 0) { - Transition *t = c->state->transitions[0]; - if (is(t)) { - AtomTransition *at = static_cast(t); + const Transition *t = c->state->transitions[0].get(); + if (t != nullptr && t->getTransitionType() == TransitionType::ATOM) { + const AtomTransition *at = static_cast(t); trans = "Atom " + getTokenName(at->_label); - } else if (is(t)) { - SetTransition *st = static_cast(t); - bool is_not = is(st); - trans = (is_not ? "~" : ""); - trans += "Set "; + } else if (t != nullptr && t->getTransitionType() == TransitionType::SET) { + const SetTransition *st = static_cast(t); + trans = "Set "; + trans += st->set.toString(); + } else if (t != nullptr && t->getTransitionType() == TransitionType::NOT_SET) { + const SetTransition *st = static_cast(t); + trans = "~Set "; trans += st->set.toString(); } } @@ -1230,7 +1274,7 @@ NoViableAltException ParserATNSimulator::noViableAlt(TokenStream *input, ParserR size_t ParserATNSimulator::getUniqueAlt(ATNConfigSet *configs) { size_t alt = ATN::INVALID_ALT_NUMBER; - for (auto &c : configs->configs) { + for (const auto &c : configs->configs) { if (alt == ATN::INVALID_ALT_NUMBER) { alt = c->alt; // found first alt } else if (c->alt != alt) { @@ -1241,7 +1285,7 @@ size_t ParserATNSimulator::getUniqueAlt(ATNConfigSet *configs) { } dfa::DFAState *ParserATNSimulator::addDFAEdge(dfa::DFA &dfa, dfa::DFAState *from, ssize_t t, dfa::DFAState *to) { -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::cout << "EDGE " << from << " -> " << to << " upon " << getTokenName(t) << std::endl; #endif @@ -1249,25 +1293,25 @@ dfa::DFAState *ParserATNSimulator::addDFAEdge(dfa::DFA &dfa, dfa::DFAState *from return nullptr; } - _stateLock.writeLock(); - to = addDFAState(dfa, to); // used existing if possible not incoming - _stateLock.writeUnlock(); + { + UniqueLock stateLock(atn._stateMutex); + to = addDFAState(dfa, to); // used existing if possible not incoming + } if (from == nullptr || t > (int)atn.maxTokenType) { return to; } { - _edgeLock.writeLock(); + UniqueLock edgeLock(atn._edgeMutex); from->edges[t] = to; // connect - _edgeLock.writeUnlock(); } -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::string dfaText; if (parser != nullptr) { dfaText = dfa.toString(parser->getVocabulary()); } else { - dfaText = dfa.toString(dfa::Vocabulary::EMPTY_VOCABULARY); + dfaText = dfa.toString(dfa::Vocabulary()); } std::cout << "DFA=\n" << dfaText << std::endl; #endif @@ -1280,20 +1324,30 @@ dfa::DFAState *ParserATNSimulator::addDFAState(dfa::DFA &dfa, dfa::DFAState *D) return D; } - auto existing = dfa.states.find(D); - if (existing != dfa.states.end()) { + // Optimizing the configs below should not alter the hash code. Thus we can just do an insert + // which will only succeed if an equivalent DFAState does not already exist. + auto [existing, inserted] = dfa.states.insert(D); + if (!inserted) { +#if TRACE_ATN_SIM == 1 + std::cout << "addDFAState " << D->toString() << " exists" << std::endl; +#endif return *existing; } - D->stateNumber = (int)dfa.states.size(); + // Previously we did a lookup, then set fields, then inserted. It was `dfa.states.size()`, since + // we already inserted we need to subtract one. + D->stateNumber = static_cast(dfa.states.size() - 1); + +#if TRACE_ATN_SIM == 1 + std::cout << "addDFAState new " << D->toString() << std::endl; +#endif + if (!D->configs->isReadonly()) { D->configs->optimizeConfigs(this); D->configs->setReadonly(true); } - dfa.states.insert(D); - -#if DEBUG_DFA == 1 +#if DFA_DEBUG == 1 std::cout << "adding new DFA state: " << D << std::endl; #endif @@ -1302,7 +1356,7 @@ dfa::DFAState *ParserATNSimulator::addDFAState(dfa::DFA &dfa, dfa::DFAState *D) void ParserATNSimulator::reportAttemptingFullContext(dfa::DFA &dfa, const antlrcpp::BitSet &conflictingAlts, ATNConfigSet *configs, size_t startIndex, size_t stopIndex) { -#if DEBUG_DFA == 1 || RETRY_DEBUG == 1 +#if DFA_DEBUG == 1 || RETRY_DEBUG == 1 misc::Interval interval = misc::Interval((int)startIndex, (int)stopIndex); std::cout << "reportAttemptingFullContext decision=" << dfa.decision << ":" << configs << ", input=" << parser->getTokenStream()->getText(interval) << std::endl; #endif @@ -1314,7 +1368,7 @@ void ParserATNSimulator::reportAttemptingFullContext(dfa::DFA &dfa, const antlrc void ParserATNSimulator::reportContextSensitivity(dfa::DFA &dfa, size_t prediction, ATNConfigSet *configs, size_t startIndex, size_t stopIndex) { -#if DEBUG_DFA == 1 || RETRY_DEBUG == 1 +#if DFA_DEBUG == 1 || RETRY_DEBUG == 1 misc::Interval interval = misc::Interval(startIndex, stopIndex); std::cout << "reportContextSensitivity decision=" << dfa.decision << ":" << configs << ", input=" << parser->getTokenStream()->getText(interval) << std::endl; #endif @@ -1326,7 +1380,7 @@ void ParserATNSimulator::reportContextSensitivity(dfa::DFA &dfa, size_t predicti void ParserATNSimulator::reportAmbiguity(dfa::DFA &dfa, dfa::DFAState * /*D*/, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs) { -#if DEBUG_DFA == 1 || RETRY_DEBUG == 1 +#if DFA_DEBUG == 1 || RETRY_DEBUG == 1 misc::Interval interval = misc::Interval((int)startIndex, (int)stopIndex); std::cout << "reportAmbiguity " << ambigAlts << ":" << configs << ", input=" << parser->getTokenStream()->getText(interval) << std::endl; #endif @@ -1348,7 +1402,9 @@ Parser* ParserATNSimulator::getParser() { return parser; } -#pragma warning (disable:4996) // 'getenv': This function or variable may be unsafe. Consider using _dupenv_s instead. +#ifdef _MSC_VER +#pragma warning (disable:4996) // 'getenv': This function or variable may be unsafe. Consider using _dupenv_s instead. +#endif bool ParserATNSimulator::getLrLoopSetting() { char *var = std::getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"); @@ -1358,7 +1414,9 @@ bool ParserATNSimulator::getLrLoopSetting() { return value == "true" || value == "1"; } +#ifdef _MSC_VER #pragma warning (default:4996) +#endif void ParserATNSimulator::InitializeInstanceFields() { _mode = PredictionMode::LL; diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h old mode 100755 new mode 100644 index 6520a44bde..720b20fa8a --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h @@ -5,10 +5,19 @@ #pragma once +#include +#include +#include +#include +#include #include "PredictionMode.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" #include "dfa/DFAState.h" #include "atn/ATNSimulator.h" #include "atn/PredictionContext.h" +#include "atn/PredictionContextMergeCache.h" +#include "atn/ParserATNSimulatorOptions.h" #include "SemanticContext.h" #include "atn/ATNConfig.h" @@ -251,14 +260,18 @@ namespace atn { ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, PredictionContextCache &sharedContextCache); - virtual void reset() override; - virtual void clearDFA() override; + ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, + PredictionContextCache &sharedContextCache, + const ParserATNSimulatorOptions &options); + + void reset() override; + void clearDFA() override; virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext); - + static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT; std::vector &decisionToDFA; - + /** Implements first-edge (loop entry) elimination as an optimization * during closure operations. See antlr/antlr4#1398. * @@ -348,14 +361,14 @@ namespace atn { bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const; virtual std::string getRuleName(size_t index); - virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt, + virtual Ref precedenceTransition(Ref const& config, const PrecedencePredicateTransition *pt, bool collectPredicates, bool inContext, bool fullCtx); void setPredictionMode(PredictionMode newMode); PredictionMode getPredictionMode(); Parser* getParser(); - + virtual std::string getTokenName(size_t t); virtual std::string getLookaheadName(TokenStream *input); @@ -366,7 +379,7 @@ namespace atn { /// "dead" code for a bit. ///
    virtual void dumpDeadEndConfigs(NoViableAltException &nvae); - + protected: Parser *const parser; @@ -380,13 +393,14 @@ namespace atn { /// also be examined during cache lookup. ///
    PredictionContextMergeCache mergeCache; + size_t _mergeCacheCounter = 0; // LAME globals to avoid parameters!!!!! I need these down deep in predTransition TokenStream *_input; size_t _startIndex; ParserRuleContext *_outerContext; dfa::DFA *_dfa; // Reference into the decisionToDFA vector. - + /// /// Performs ATN simulation to compute a predicted alternative based /// upon the remaining input, but also updates the DFA cache to avoid @@ -646,13 +660,13 @@ namespace atn { */ std::unique_ptr applyPrecedenceFilter(ATNConfigSet *configs); - virtual ATNState *getReachableTarget(Transition *trans, size_t ttype); + virtual ATNState *getReachableTarget(const Transition *trans, size_t ttype); - virtual std::vector> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts, + virtual std::vector> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs, size_t nalts); - virtual std::vector getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, - std::vector> const& altToPred); + std::vector getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, + const std::vector> &altToPred); /** * This method is used to improve the localization of error messages by @@ -724,8 +738,8 @@ namespace atn { /// then we stop at the first predicate that evaluates to true. This /// includes pairs with null predicates. /// - virtual antlrcpp::BitSet evalSemanticContext(std::vector predPredictions, - ParserRuleContext *outerContext, bool complete); + antlrcpp::BitSet evalSemanticContext(const std::vector &predPredictions, + ParserRuleContext *outerContext, bool complete); /** * Evaluate a semantic context within a specific parser context. @@ -757,7 +771,7 @@ namespace atn { * * @since 4.3 */ - virtual bool evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, + virtual bool evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, size_t alt, bool fullCtx); /* TODO: If we are doing predicates, there is no point in pursuing @@ -771,19 +785,19 @@ namespace atn { virtual void closureCheckingStopState(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon); - + /// Do the actual work of walking epsilon edges. virtual void closure_(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon); - - virtual Ref getEpsilonTarget(Ref const& config, Transition *t, bool collectPredicates, + + virtual Ref getEpsilonTarget(Ref const& config, const Transition *t, bool collectPredicates, bool inContext, bool fullCtx, bool treatEofAsEpsilon); - virtual Ref actionTransition(Ref const& config, ActionTransition *t); + virtual Ref actionTransition(Ref const& config, const ActionTransition *t); - virtual Ref predTransition(Ref const& config, PredicateTransition *pt, bool collectPredicates, + virtual Ref predTransition(Ref const& config, const PredicateTransition *pt, bool collectPredicates, bool inContext, bool fullCtx); - virtual Ref ruleTransition(Ref const& config, RuleTransition *t); + virtual Ref ruleTransition(Ref const& config, const RuleTransition *t); /** * Gets a {@link BitSet} containing the alternatives in {@code configs} diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulatorOptions.h b/runtime/Cpp/runtime/src/atn/ParserATNSimulatorOptions.h new file mode 100644 index 0000000000..86d1a4b346 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulatorOptions.h @@ -0,0 +1,52 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include "antlr4-common.h" +#include "atn/PredictionContextMergeCacheOptions.h" + +namespace antlr4 { +namespace atn { + + class ANTLR4CPP_PUBLIC ParserATNSimulatorOptions final { + public: + ParserATNSimulatorOptions& setPredictionContextMergeCacheOptions( + PredictionContextMergeCacheOptions predictionContextMergeCacheOptions) { + _predictionContextMergeCacheOptions = std::move(predictionContextMergeCacheOptions); + return *this; + } + + const PredictionContextMergeCacheOptions& getPredictionContextMergeCacheOptions() const { + return _predictionContextMergeCacheOptions; + } + + private: + PredictionContextMergeCacheOptions _predictionContextMergeCacheOptions; + }; + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp b/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp deleted file mode 100755 index b0ee12aee4..0000000000 --- a/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/PlusBlockStartState.h" - -using namespace antlr4::atn; - -size_t PlusBlockStartState::getStateType() { - return PLUS_BLOCK_START; -} diff --git a/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h b/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h old mode 100755 new mode 100644 index a3affb823e..fe5ccdebf8 --- a/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h +++ b/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/BlockStartState.h" namespace antlr4 { @@ -16,9 +19,13 @@ namespace atn { /// real decision-making note for {@code A+}. class ANTLR4CPP_PUBLIC PlusBlockStartState final : public BlockStartState { public: + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::PLUS_BLOCK_START; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + PlusLoopbackState *loopBackState = nullptr; - virtual size_t getStateType() override; + PlusBlockStartState() : BlockStartState(ATNStateType::PLUS_BLOCK_START) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp b/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp deleted file mode 100755 index 1edab24b41..0000000000 --- a/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/PlusLoopbackState.h" - -using namespace antlr4::atn; - -size_t PlusLoopbackState::getStateType() { - return PLUS_LOOP_BACK; -} diff --git a/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h b/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h old mode 100755 new mode 100644 index ba7a4b64e3..91c07cbfa8 --- a/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h +++ b/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/DecisionState.h" namespace antlr4 { @@ -13,9 +16,12 @@ namespace atn { /// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: /// one to the loop back to start of the block and one to exit. class ANTLR4CPP_PUBLIC PlusLoopbackState final : public DecisionState { - public: - virtual size_t getStateType() override; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::PLUS_LOOP_BACK; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + + PlusLoopbackState() : DecisionState(ATNStateType::PLUS_LOOP_BACK) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp b/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp old mode 100755 new mode 100644 index 9aedc9de4a..45e4e72068 --- a/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp @@ -3,17 +3,17 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include "atn/ATNState.h" +#include "atn/TransitionType.h" #include "atn/PrecedencePredicateTransition.h" using namespace antlr4::atn; PrecedencePredicateTransition::PrecedencePredicateTransition(ATNState *target, int precedence) - : AbstractPredicateTransition(target), precedence(precedence) { -} - -Transition::SerializationType PrecedencePredicateTransition::getSerializationType() const { - return PRECEDENCE; -} + : Transition(TransitionType::PRECEDENCE, target), _predicate(std::make_shared(precedence)) {} bool PrecedencePredicateTransition::isEpsilon() const { return true; @@ -23,10 +23,6 @@ bool PrecedencePredicateTransition::matches(size_t /*symbol*/, size_t /*minVocab return false; } -Ref PrecedencePredicateTransition::getPredicate() const { - return std::make_shared(precedence); -} - std::string PrecedencePredicateTransition::toString() const { - return "PRECEDENCE " + Transition::toString() + " { precedence: " + std::to_string(precedence) + " }"; + return "PRECEDENCE " + Transition::toString() + " { precedence: " + std::to_string(getPrecedence()) + " }"; } diff --git a/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h b/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h old mode 100755 new mode 100644 index bc22146e7e..ab72d2246e --- a/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h +++ b/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h @@ -5,24 +5,36 @@ #pragma once -#include "atn/AbstractPredicateTransition.h" -#include "SemanticContext.h" +#include +#include +#include +#include "atn/Transition.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" +#include "atn/SemanticContext.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC PrecedencePredicateTransition final : public AbstractPredicateTransition { + class ANTLR4CPP_PUBLIC PrecedencePredicateTransition final : public Transition { public: - const int precedence; + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::PRECEDENCE; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } PrecedencePredicateTransition(ATNState *target, int precedence); - virtual SerializationType getSerializationType() const override; - virtual bool isEpsilon() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - Ref getPredicate() const; - virtual std::string toString() const override; + int getPrecedence() const { return _predicate->precedence; } + + bool isEpsilon() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + std::string toString() const override; + + const Ref& getPredicate() const { return _predicate; } + private: + const std::shared_ptr _predicate; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp b/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp old mode 100755 new mode 100644 index 3d86bfee0b..93952c81a5 --- a/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp +++ b/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp @@ -3,15 +3,18 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "SemanticContext.h" +#include "antlr4-common.h" #include "atn/PredicateEvalInfo.h" using namespace antlr4; using namespace antlr4::atn; PredicateEvalInfo::PredicateEvalInfo(size_t decision, TokenStream *input, size_t startIndex, size_t stopIndex, - Ref const& semctx, bool evalResult, size_t predictedAlt, bool fullCtx) + Ref semctx, bool evalResult, size_t predictedAlt, bool fullCtx) : DecisionEventInfo(decision, nullptr, input, startIndex, stopIndex, fullCtx), - semctx(semctx), predictedAlt(predictedAlt), evalResult(evalResult) { + semctx(std::move(semctx)), predictedAlt(predictedAlt), evalResult(evalResult) { } diff --git a/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h b/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h old mode 100755 new mode 100644 index b0513aea2b..736972bb2f --- a/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h +++ b/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "atn/DecisionEventInfo.h" namespace antlr4 { @@ -20,7 +22,7 @@ namespace atn { class ANTLR4CPP_PUBLIC PredicateEvalInfo : public DecisionEventInfo { public: /// The semantic context which was evaluated. - const Ref semctx; + const Ref semctx; /// /// The alternative number for the decision which is guarded by the semantic @@ -55,7 +57,7 @@ namespace atn { /// /// PredicateEvalInfo(size_t decision, TokenStream *input, size_t startIndex, size_t stopIndex, - Ref const& semctx, bool evalResult, size_t predictedAlt, bool fullCtx); + Ref semctx, bool evalResult, size_t predictedAlt, bool fullCtx); }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp b/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp old mode 100755 new mode 100644 index 984fc20562..ac936415a4 --- a/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp @@ -3,16 +3,17 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include "atn/ATNState.h" +#include "atn/TransitionType.h" #include "atn/PredicateTransition.h" using namespace antlr4::atn; -PredicateTransition::PredicateTransition(ATNState *target, size_t ruleIndex, size_t predIndex, bool isCtxDependent) : AbstractPredicateTransition(target), ruleIndex(ruleIndex), predIndex(predIndex), isCtxDependent(isCtxDependent) { -} - -Transition::SerializationType PredicateTransition::getSerializationType() const { - return PREDICATE; -} +PredicateTransition::PredicateTransition(ATNState *target, size_t ruleIndex, size_t predIndex, bool isCtxDependent) + : Transition(TransitionType::PREDICATE, target), _predicate(std::make_shared(ruleIndex, predIndex, isCtxDependent)) {} bool PredicateTransition::isEpsilon() const { return true; @@ -22,13 +23,7 @@ bool PredicateTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/, return false; } -Ref PredicateTransition::getPredicate() const { - return std::make_shared(ruleIndex, predIndex, isCtxDependent); -} - std::string PredicateTransition::toString() const { - return "PREDICATE " + Transition::toString() + " { ruleIndex: " + std::to_string(ruleIndex) + - ", predIndex: " + std::to_string(predIndex) + ", isCtxDependent: " + std::to_string(isCtxDependent) + " }"; - - // Generate and add a predicate context here? + return "PREDICATE " + Transition::toString() + " { ruleIndex: " + std::to_string(getRuleIndex()) + + ", predIndex: " + std::to_string(getPredIndex()) + ", isCtxDependent: " + std::to_string(isCtxDependent()) + " }"; } diff --git a/runtime/Cpp/runtime/src/atn/PredicateTransition.h b/runtime/Cpp/runtime/src/atn/PredicateTransition.h old mode 100755 new mode 100644 index 4d9b4205dc..1229470df7 --- a/runtime/Cpp/runtime/src/atn/PredicateTransition.h +++ b/runtime/Cpp/runtime/src/atn/PredicateTransition.h @@ -5,8 +5,14 @@ #pragma once -#include "atn/AbstractPredicateTransition.h" -#include "SemanticContext.h" +#include +#include +#include +#include "atn/Transition.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" +#include "atn/SemanticContext.h" namespace antlr4 { namespace atn { @@ -16,23 +22,34 @@ namespace atn { /// In the ATN, labels will always be exactly one predicate, but the DFA /// may have to combine a bunch of them as it collects predicates from /// multiple ATN configurations into a single DFA state. - class ANTLR4CPP_PUBLIC PredicateTransition final : public AbstractPredicateTransition { + class ANTLR4CPP_PUBLIC PredicateTransition final : public Transition { public: - const size_t ruleIndex; - const size_t predIndex; - const bool isCtxDependent; // e.g., $i ref in pred + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::PREDICATE; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } PredicateTransition(ATNState *target, size_t ruleIndex, size_t predIndex, bool isCtxDependent); - virtual SerializationType getSerializationType() const override; + size_t getRuleIndex() const { + return _predicate->ruleIndex; + } + + size_t getPredIndex() const { + return _predicate->predIndex; + } - virtual bool isEpsilon() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + bool isCtxDependent() const { + return _predicate->isCtxDependent; + } - Ref getPredicate() const; + bool isEpsilon() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + std::string toString() const override; - virtual std::string toString() const override; + const Ref& getPredicate() const { return _predicate; } + private: + const std::shared_ptr _predicate; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.cpp b/runtime/Cpp/runtime/src/atn/PredictionContext.cpp old mode 100755 new mode 100644 index 860a18056d..96e487b74d --- a/runtime/Cpp/runtime/src/atn/PredictionContext.cpp +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.cpp @@ -3,35 +3,147 @@ * can be found in the LICENSE.txt file in the project root. */ -#include "atn/EmptyPredictionContext.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "atn/SingletonPredictionContext.h" +#include "antlr4-common.h" #include "misc/MurmurHash.h" #include "atn/ArrayPredictionContext.h" +#include "atn/PredictionContextCache.h" +#include "atn/PredictionContextMergeCache.h" #include "RuleContext.h" #include "ParserRuleContext.h" #include "atn/RuleTransition.h" #include "support/Arrays.h" #include "support/CPPUtils.h" +#include "support/Casts.h" #include "atn/PredictionContext.h" using namespace antlr4; using namespace antlr4::misc; using namespace antlr4::atn; - using namespace antlrcpp; -size_t PredictionContext::globalNodeCount = 0; -const Ref PredictionContext::EMPTY = std::make_shared(); +namespace { -//----------------- PredictionContext ---------------------------------------------------------------------------------- + void combineCommonParents(std::vector> &parents) { + std::unordered_set> uniqueParents; + uniqueParents.reserve(parents.size()); + for (const auto &parent : parents) { + uniqueParents.insert(parent); + } + for (auto &parent : parents) { + parent = *uniqueParents.find(parent); + } + } -PredictionContext::PredictionContext(size_t cachedHashCode) : id(globalNodeCount++), cachedHashCode(cachedHashCode) { -} + Ref getCachedContextImpl(const Ref &context, + PredictionContextCache &contextCache, + std::unordered_map, + Ref> &visited) { + if (context->isEmpty()) { + return context; + } + + { + auto iterator = visited.find(context); + if (iterator != visited.end()) { + return iterator->second; // Not necessarly the same as context. + } + } + + auto cached = contextCache.get(context); + if (cached) { + visited[context] = cached; + return cached; + } + + bool changed = false; + + std::vector> parents(context->size()); + for (size_t i = 0; i < parents.size(); i++) { + auto parent = getCachedContextImpl(context->getParent(i), contextCache, visited); + if (changed || parent != context->getParent(i)) { + if (!changed) { + parents.clear(); + for (size_t j = 0; j < context->size(); j++) { + parents.push_back(context->getParent(j)); + } + + changed = true; + } + + parents[i] = std::move(parent); + } + } + + if (!changed) { + visited[context] = context; + contextCache.put(context); + return context; + } + + Ref updated; + if (parents.empty()) { + updated = PredictionContext::EMPTY; + } else if (parents.size() == 1) { + updated = SingletonPredictionContext::create(std::move(parents[0]), context->getReturnState(0)); + contextCache.put(updated); + } else { + updated = std::make_shared(std::move(parents), downCast(context.get())->returnStates); + contextCache.put(updated); + } + + visited[updated] = updated; + visited[context] = updated; + + return updated; + } + + void getAllContextNodesImpl(const Ref &context, + std::vector> &nodes, + std::unordered_set &visited) { + + if (visited.find(context.get()) != visited.end()) { + return; // Already done. + } + + visited.insert(context.get()); + nodes.push_back(context); + + for (size_t i = 0; i < context->size(); i++) { + getAllContextNodesImpl(context->getParent(i), nodes, visited); + } + } + + size_t insertOrAssignNodeId(std::unordered_map &nodeIds, size_t &nodeId, const PredictionContext *node) { + auto existing = nodeIds.find(node); + if (existing != nodeIds.end()) { + return existing->second; + } + return nodeIds.insert({node, nodeId++}).first->second; + } -PredictionContext::~PredictionContext() { } -Ref PredictionContext::fromRuleContext(const ATN &atn, RuleContext *outerContext) { +const Ref PredictionContext::EMPTY = std::make_shared(nullptr, PredictionContext::EMPTY_RETURN_STATE); + +//----------------- PredictionContext ---------------------------------------------------------------------------------- + +PredictionContext::PredictionContext(PredictionContextType contextType) : _contextType(contextType), _hashCode(0) {} + +PredictionContext::PredictionContext(PredictionContext&& other) : _contextType(other._contextType), _hashCode(other._hashCode.exchange(0, std::memory_order_relaxed)) {} + +Ref PredictionContext::fromRuleContext(const ATN &atn, RuleContext *outerContext) { if (outerContext == nullptr) { return PredictionContext::EMPTY; } @@ -43,15 +155,9 @@ Ref PredictionContext::fromRuleContext(const ATN &atn, RuleCo } // If we have a parent, convert it to a PredictionContext graph - Ref parent = PredictionContext::fromRuleContext(atn, dynamic_cast(outerContext->parent)); - - ATNState *state = atn.states.at(outerContext->invokingState); - RuleTransition *transition = (RuleTransition *)state->transitions[0]; - return SingletonPredictionContext::create(parent, transition->followState->stateNumber); -} - -bool PredictionContext::isEmpty() const { - return this == EMPTY.get(); + auto parent = PredictionContext::fromRuleContext(atn, RuleContext::is(outerContext->parent) ? downCast(outerContext->parent) : nullptr); + const auto *transition = downCast(atn.states[outerContext->invokingState]->transitions[0].get()); + return SingletonPredictionContext::create(std::move(parent), transition->followState->stateNumber); } bool PredictionContext::hasEmptyPath() const { @@ -60,40 +166,19 @@ bool PredictionContext::hasEmptyPath() const { } size_t PredictionContext::hashCode() const { - return cachedHashCode; -} - -size_t PredictionContext::calculateEmptyHashCode() { - size_t hash = MurmurHash::initialize(INITIAL_HASH); - hash = MurmurHash::finish(hash, 0); - return hash; -} - -size_t PredictionContext::calculateHashCode(Ref parent, size_t returnState) { - size_t hash = MurmurHash::initialize(INITIAL_HASH); - hash = MurmurHash::update(hash, parent); - hash = MurmurHash::update(hash, returnState); - hash = MurmurHash::finish(hash, 2); - return hash; -} - -size_t PredictionContext::calculateHashCode(const std::vector> &parents, - const std::vector &returnStates) { - size_t hash = MurmurHash::initialize(INITIAL_HASH); - - for (auto parent : parents) { - hash = MurmurHash::update(hash, parent); - } - - for (auto returnState : returnStates) { - hash = MurmurHash::update(hash, returnState); + auto hash = cachedHashCode(); + if (hash == 0) { + hash = hashCodeImpl(); + if (hash == 0) { + hash = std::numeric_limits::max(); + } + _hashCode.store(hash, std::memory_order_relaxed); } - - return MurmurHash::finish(hash, parents.size() + returnStates.size()); + return hash; } -Ref PredictionContext::merge(const Ref &a, - const Ref &b, bool rootIsWildcard, PredictionContextMergeCache *mergeCache) { +Ref PredictionContext::merge(Ref a, Ref b, + bool rootIsWildcard, PredictionContextMergeCache *mergeCache) { assert(a && b); // share same graph if both same @@ -101,42 +186,45 @@ Ref PredictionContext::merge(const Ref &a, return a; } - if (is(a) && is(b)) { - return mergeSingletons(std::dynamic_pointer_cast(a), - std::dynamic_pointer_cast(b), rootIsWildcard, mergeCache); + const auto aType = a->getContextType(); + const auto bType = b->getContextType(); + + if (aType == PredictionContextType::SINGLETON && bType == PredictionContextType::SINGLETON) { + return mergeSingletons(std::static_pointer_cast(std::move(a)), + std::static_pointer_cast(std::move(b)), rootIsWildcard, mergeCache); } // At least one of a or b is array. // If one is $ and rootIsWildcard, return $ as * wildcard. if (rootIsWildcard) { - if (is(a)) { + if (a == PredictionContext::EMPTY) { return a; } - if (is(b)) { + if (b == PredictionContext::EMPTY) { return b; } } // convert singleton so both are arrays to normalize - Ref left; - if (is(a)) { - left = std::make_shared(std::dynamic_pointer_cast(a)); + Ref left; + if (aType == PredictionContextType::SINGLETON) { + left = std::make_shared(downCast(*a)); } else { - left = std::dynamic_pointer_cast(a); + left = std::static_pointer_cast(std::move(a)); } - Ref right; - if (is(b)) { - right = std::make_shared(std::dynamic_pointer_cast(b)); + Ref right; + if (bType == PredictionContextType::SINGLETON) { + right = std::make_shared(downCast(*b)); } else { - right = std::dynamic_pointer_cast(b); + right = std::static_pointer_cast(std::move(b)); } - return mergeArrays(left, right, rootIsWildcard, mergeCache); + return mergeArrays(std::move(left), std::move(right), rootIsWildcard, mergeCache); } -Ref PredictionContext::mergeSingletons(const Ref &a, - const Ref &b, bool rootIsWildcard, PredictionContextMergeCache *mergeCache) { +Ref PredictionContext::mergeSingletons(Ref a, Ref b, + bool rootIsWildcard, PredictionContextMergeCache *mergeCache) { - if (mergeCache != nullptr) { // Can be null if not given to the ATNState from which this call originates. + if (mergeCache) { auto existing = mergeCache->get(a, b); if (existing) { return existing; @@ -147,18 +235,18 @@ Ref PredictionContext::mergeSingletons(const Ref rootMerge = mergeRoot(a, b, rootIsWildcard); + auto rootMerge = mergeRoot(a, b, rootIsWildcard); if (rootMerge) { - if (mergeCache != nullptr) { - mergeCache->put(a, b, rootMerge); + if (mergeCache) { + return mergeCache->put(a, b, std::move(rootMerge)); } return rootMerge; } - Ref parentA = a->parent; - Ref parentB = b->parent; + const auto& parentA = a->parent; + const auto& parentB = b->parent; if (a->returnState == b->returnState) { // a == b - Ref parent = merge(parentA, parentB, rootIsWildcard, mergeCache); + auto parent = merge(parentA, parentB, rootIsWildcard, mergeCache); // If parent is same as existing a or b parent or reduced to a parent, return it. if (parent == parentA) { // ax + bx = ax, if a=b @@ -172,55 +260,55 @@ Ref PredictionContext::mergeSingletons(const Ref a_ = SingletonPredictionContext::create(parent, a->returnState); - if (mergeCache != nullptr) { - mergeCache->put(a, b, a_); - } - return a_; - } else { - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - Ref singleParent; - if (a == b || (*parentA == *parentB)) { // ax + bx = [a,b]x - singleParent = parentA; - } - if (singleParent) { // parents are same, sort payloads and use same parent - std::vector payloads = { a->returnState, b->returnState }; - if (a->returnState > b->returnState) { - payloads[0] = b->returnState; - payloads[1] = a->returnState; - } - std::vector> parents = { singleParent, singleParent }; - Ref a_ = std::make_shared(parents, payloads); - if (mergeCache != nullptr) { - mergeCache->put(a, b, a_); - } - return a_; - } - - // parents differ and can't merge them. Just pack together - // into array; can't merge. - // ax + by = [ax,by] - Ref a_; - if (a->returnState > b->returnState) { // sort by payload - std::vector payloads = { b->returnState, a->returnState }; - std::vector> parents = { b->parent, a->parent }; - a_ = std::make_shared(parents, payloads); - } else { - std::vector payloads = {a->returnState, b->returnState}; - std::vector> parents = { a->parent, b->parent }; - a_ = std::make_shared(parents, payloads); - } - - if (mergeCache != nullptr) { - mergeCache->put(a, b, a_); - } - return a_; - } + auto c = SingletonPredictionContext::create(std::move(parent), a->returnState); + if (mergeCache) { + return mergeCache->put(a, b, std::move(c)); + } + return c; + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + Ref singleParent; + if (a == b || (*parentA == *parentB)) { // ax + bx = [a,b]x + singleParent = parentA; + } + if (singleParent) { // parents are same, sort payloads and use same parent + std::vector payloads = { a->returnState, b->returnState }; + if (a->returnState > b->returnState) { + payloads[0] = b->returnState; + payloads[1] = a->returnState; + } + std::vector> parents = { singleParent, singleParent }; + auto c = std::make_shared(std::move(parents), std::move(payloads)); + if (mergeCache) { + return mergeCache->put(a, b, std::move(c)); + } + return c; + } + + // parents differ and can't merge them. Just pack together + // into array; can't merge. + // ax + by = [ax,by] + if (a->returnState > b->returnState) { // sort by payload + std::vector payloads = { b->returnState, a->returnState }; + std::vector> parents = { b->parent, a->parent }; + auto c = std::make_shared(std::move(parents), std::move(payloads)); + if (mergeCache) { + return mergeCache->put(a, b, std::move(c)); + } + return c; + } + std::vector payloads = {a->returnState, b->returnState}; + std::vector> parents = { a->parent, b->parent }; + auto c = std::make_shared(std::move(parents), std::move(payloads)); + if (mergeCache) { + return mergeCache->put(a, b, std::move(c)); + } + return c; } -Ref PredictionContext::mergeRoot(const Ref &a, - const Ref &b, bool rootIsWildcard) { +Ref PredictionContext::mergeRoot(Ref a, Ref b, + bool rootIsWildcard) { if (rootIsWildcard) { if (a == EMPTY) { // * + b = * return EMPTY; @@ -234,30 +322,33 @@ Ref PredictionContext::mergeRoot(const Ref payloads = { b->returnState, EMPTY_RETURN_STATE }; - std::vector> parents = { b->parent, nullptr }; - Ref joined = std::make_shared(parents, payloads); - return joined; + std::vector> parents = { b->parent, nullptr }; + return std::make_shared(std::move(parents), std::move(payloads)); } if (b == EMPTY) { // x + $ = [$,x] ($ is always first if present) std::vector payloads = { a->returnState, EMPTY_RETURN_STATE }; - std::vector> parents = { a->parent, nullptr }; - Ref joined = std::make_shared(parents, payloads); - return joined; + std::vector> parents = { a->parent, nullptr }; + return std::make_shared(std::move(parents), std::move(payloads)); } } return nullptr; } -Ref PredictionContext::mergeArrays(const Ref &a, - const Ref &b, bool rootIsWildcard, PredictionContextMergeCache *mergeCache) { - - if (mergeCache != nullptr) { +Ref PredictionContext::mergeArrays(Ref a, Ref b, + bool rootIsWildcard, PredictionContextMergeCache *mergeCache) { + if (mergeCache) { auto existing = mergeCache->get(a, b); if (existing) { +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> previous" << std::endl; +#endif return existing; } existing = mergeCache->get(b, a); if (existing) { +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> previous" << std::endl; +#endif return existing; } } @@ -268,36 +359,33 @@ Ref PredictionContext::mergeArrays(const Ref mergedReturnStates(a->returnStates.size() + b->returnStates.size()); - std::vector> mergedParents(a->returnStates.size() + b->returnStates.size()); + std::vector> mergedParents(a->returnStates.size() + b->returnStates.size()); // walk and merge to yield mergedParents, mergedReturnStates while (i < a->returnStates.size() && j < b->returnStates.size()) { - Ref a_parent = a->parents[i]; - Ref b_parent = b->parents[j]; + const auto& parentA = a->parents[i]; + const auto& parentB = b->parents[j]; if (a->returnStates[i] == b->returnStates[j]) { // same payload (stack tops are equal), must yield merged singleton size_t payload = a->returnStates[i]; // $+$ = $ - bool both$ = payload == EMPTY_RETURN_STATE && !a_parent && !b_parent; - bool ax_ax = (a_parent && b_parent) && *a_parent == *b_parent; // ax+ax -> ax + bool both$ = payload == EMPTY_RETURN_STATE && !parentA && !parentB; + bool ax_ax = (parentA && parentB) && *parentA == *parentB; // ax+ax -> ax if (both$ || ax_ax) { - mergedParents[k] = a_parent; // choose left + mergedParents[k] = parentA; // choose left mergedReturnStates[k] = payload; - } - else { // ax+ay -> a'[x,y] - Ref mergedParent = merge(a_parent, b_parent, rootIsWildcard, mergeCache); - mergedParents[k] = mergedParent; + } else { // ax+ay -> a'[x,y] + mergedParents[k] = merge(parentA, parentB, rootIsWildcard, mergeCache); mergedReturnStates[k] = payload; } i++; // hop over left one as usual j++; // but also skip one in right side since we merge } else if (a->returnStates[i] < b->returnStates[j]) { // copy a[i] to M - mergedParents[k] = a_parent; + mergedParents[k] = parentA; mergedReturnStates[k] = a->returnStates[i]; i++; - } - else { // b > a, copy b[j] to M - mergedParents[k] = b_parent; + } else { // b > a, copy b[j] to M + mergedParents[k] = parentB; mergedReturnStates[k] = b->returnStates[j]; j++; } @@ -306,13 +394,13 @@ Ref PredictionContext::mergeArrays(const RefreturnStates.size()) { - for (std::vector::size_type p = i; p < a->returnStates.size(); p++) { + for (auto p = i; p < a->returnStates.size(); p++) { mergedParents[k] = a->parents[p]; mergedReturnStates[k] = a->returnStates[p]; k++; } } else { - for (std::vector::size_type p = j; p < b->returnStates.size(); p++) { + for (auto p = j; p < b->returnStates.size(); p++) { mergedParents[k] = b->parents[p]; mergedReturnStates[k] = b->returnStates[p]; k++; @@ -322,63 +410,59 @@ Ref PredictionContext::mergeArrays(const Ref a_ = SingletonPredictionContext::create(mergedParents[0], mergedReturnStates[0]); - if (mergeCache != nullptr) { - mergeCache->put(a, b, a_); + auto c = SingletonPredictionContext::create(std::move(mergedParents[0]), mergedReturnStates[0]); + if (mergeCache) { + return mergeCache->put(a, b, std::move(c)); } - return a_; + return c; } mergedParents.resize(k); mergedReturnStates.resize(k); } - Ref M = std::make_shared(mergedParents, mergedReturnStates); + ArrayPredictionContext m(std::move(mergedParents), std::move(mergedReturnStates)); // if we created same array as a or b, return that instead // TODO: track whether this is possible above during merge sort for speed - if (*M == *a) { - if (mergeCache != nullptr) { - mergeCache->put(a, b, a); - } + if (m == *a) { + if (mergeCache) { +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> a" << std::endl; +#endif + return mergeCache->put(a, b, a); + } +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> a" << std::endl; +#endif return a; } - if (*M == *b) { - if (mergeCache != nullptr) { - mergeCache->put(a, b, b); + if (m == *b) { + if (mergeCache) { +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> b" << std::endl; +#endif + return mergeCache->put(a, b, b); } +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> b" << std::endl; +#endif return b; } - // ml: this part differs from Java code. We have to recreate the context as the parents array is copied on creation. - if (combineCommonParents(mergedParents)) { - mergedReturnStates.resize(mergedParents.size()); - M = std::make_shared(mergedParents, mergedReturnStates); - } - - if (mergeCache != nullptr) { - mergeCache->put(a, b, M); - } - return M; -} - -bool PredictionContext::combineCommonParents(std::vector> &parents) { + combineCommonParents(m.parents); + auto c = std::make_shared(std::move(m)); - std::set> uniqueParents; - for (size_t p = 0; p < parents.size(); ++p) { - Ref parent = parents[p]; - if (uniqueParents.find(parent) == uniqueParents.end()) { // don't replace - uniqueParents.insert(parent); - } - } +#if TRACE_ATN_SIM == 1 + std::cout << "mergeArrays a=" << a->toString() << ",b=" << b->toString() << " -> " << c->toString() << std::endl; +#endif - for (size_t p = 0; p < parents.size(); ++p) { - parents[p] = *uniqueParents.find(parents[p]); + if (mergeCache) { + return mergeCache->put(a, b, std::move(c)); } - - return true; + return c; } -std::string PredictionContext::toDOTString(const Ref &context) { +std::string PredictionContext::toDOTString(const Ref &context) { if (context == nullptr) { return ""; } @@ -386,24 +470,23 @@ std::string PredictionContext::toDOTString(const Ref &context std::stringstream ss; ss << "digraph G {\n" << "rankdir=LR;\n"; - std::vector> nodes = getAllContextNodes(context); - std::sort(nodes.begin(), nodes.end(), [](const Ref &o1, const Ref &o2) { - return o1->id - o2->id; - }); + std::vector> nodes = getAllContextNodes(context); + std::unordered_map nodeIds; + size_t nodeId = 0; - for (auto current : nodes) { - if (is(current)) { - std::string s = std::to_string(current->id); + for (const auto ¤t : nodes) { + if (current->getContextType() == PredictionContextType::SINGLETON) { + std::string s = std::to_string(insertOrAssignNodeId(nodeIds, nodeId, current.get())); ss << " s" << s; std::string returnState = std::to_string(current->getReturnState(0)); - if (is(current)) { + if (current == PredictionContext::EMPTY) { returnState = "$"; } ss << " [label=\"" << returnState << "\"];\n"; continue; } - Ref arr = std::static_pointer_cast(current); - ss << " s" << arr->id << " [shape=box, label=\"" << "["; + Ref arr = std::static_pointer_cast(current); + ss << " s" << insertOrAssignNodeId(nodeIds, nodeId, arr.get()) << " [shape=box, label=\"" << "["; bool first = true; for (auto inv : arr->returnStates) { if (!first) { @@ -420,7 +503,7 @@ std::string PredictionContext::toDOTString(const Ref &context ss << "\"];\n"; } - for (auto current : nodes) { + for (const auto ¤t : nodes) { if (current == EMPTY) { continue; } @@ -428,7 +511,7 @@ std::string PredictionContext::toDOTString(const Ref &context if (!current->getParent(i)) { continue; } - ss << " s" << current->id << "->" << "s" << current->getParent(i)->id; + ss << " s" << insertOrAssignNodeId(nodeIds, nodeId, current.get()) << "->" << "s" << insertOrAssignNodeId(nodeIds, nodeId, current->getParent(i).get()); if (current->size() > 1) { ss << " [label=\"parent[" << i << "]\"];\n"; } else { @@ -442,112 +525,31 @@ std::string PredictionContext::toDOTString(const Ref &context } // The "visited" map is just a temporary structure to control the retrieval process (which is recursive). -Ref PredictionContext::getCachedContext(const Ref &context, - PredictionContextCache &contextCache, std::map, Ref> &visited) { - if (context->isEmpty()) { - return context; - } - - { - auto iterator = visited.find(context); - if (iterator != visited.end()) - return iterator->second; // Not necessarly the same as context. - } - - auto iterator = contextCache.find(context); - if (iterator != contextCache.end()) { - visited[context] = *iterator; - - return *iterator; - } - - bool changed = false; - - std::vector> parents(context->size()); - for (size_t i = 0; i < parents.size(); i++) { - Ref parent = getCachedContext(context->getParent(i), contextCache, visited); - if (changed || parent != context->getParent(i)) { - if (!changed) { - parents.clear(); - for (size_t j = 0; j < context->size(); j++) { - parents.push_back(context->getParent(j)); - } - - changed = true; - } - - parents[i] = parent; - } - } - - if (!changed) { - contextCache.insert(context); - visited[context] = context; - - return context; - } - - Ref updated; - if (parents.empty()) { - updated = EMPTY; - } else if (parents.size() == 1) { - updated = SingletonPredictionContext::create(parents[0], context->getReturnState(0)); - contextCache.insert(updated); - } else { - updated = std::make_shared(parents, std::dynamic_pointer_cast(context)->returnStates); - contextCache.insert(updated); - } - - visited[updated] = updated; - visited[context] = updated; - - return updated; +Ref PredictionContext::getCachedContext(const Ref &context, + PredictionContextCache &contextCache) { + std::unordered_map, Ref> visited; + return getCachedContextImpl(context, contextCache, visited); } -std::vector> PredictionContext::getAllContextNodes(const Ref &context) { - std::vector> nodes; - std::set visited; - getAllContextNodes_(context, nodes, visited); +std::vector> PredictionContext::getAllContextNodes(const Ref &context) { + std::vector> nodes; + std::unordered_set visited; + getAllContextNodesImpl(context, nodes, visited); return nodes; } - -void PredictionContext::getAllContextNodes_(const Ref &context, std::vector> &nodes, - std::set &visited) { - - if (visited.find(context.get()) != visited.end()) { - return; // Already done. - } - - visited.insert(context.get()); - nodes.push_back(context); - - for (size_t i = 0; i < context->size(); i++) { - getAllContextNodes_(context->getParent(i), nodes, visited); - } -} - -std::string PredictionContext::toString() const { - - return antlrcpp::toString(this); -} - -std::string PredictionContext::toString(Recognizer * /*recog*/) const { - return toString(); -} - -std::vector PredictionContext::toStrings(Recognizer *recognizer, int currentState) { +std::vector PredictionContext::toStrings(Recognizer *recognizer, int currentState) const { return toStrings(recognizer, EMPTY, currentState); } -std::vector PredictionContext::toStrings(Recognizer *recognizer, const Ref &stop, int currentState) { +std::vector PredictionContext::toStrings(Recognizer *recognizer, const Ref &stop, int currentState) const { std::vector result; for (size_t perm = 0; ; perm++) { size_t offset = 0; bool last = true; - PredictionContext *p = this; + const PredictionContext *p = this; size_t stateNumber = currentState; std::stringstream ss; @@ -608,55 +610,3 @@ std::vector PredictionContext::toStrings(Recognizer *recognizer, co return result; } - -//----------------- PredictionContextMergeCache ------------------------------------------------------------------------ - -Ref PredictionContextMergeCache::put(Ref const& key1, Ref const& key2, - Ref const& value) { - Ref previous; - - auto iterator = _data.find(key1); - if (iterator == _data.end()) - _data[key1][key2] = value; - else { - auto iterator2 = iterator->second.find(key2); - if (iterator2 != iterator->second.end()) - previous = iterator2->second; - iterator->second[key2] = value; - } - - return previous; -} - -Ref PredictionContextMergeCache::get(Ref const& key1, Ref const& key2) { - auto iterator = _data.find(key1); - if (iterator == _data.end()) - return nullptr; - - auto iterator2 = iterator->second.find(key2); - if (iterator2 == iterator->second.end()) - return nullptr; - - return iterator2->second; -} - -void PredictionContextMergeCache::clear() { - _data.clear(); -} - -std::string PredictionContextMergeCache::toString() const { - std::string result; - for (auto pair : _data) - for (auto pair2 : pair.second) - result += pair2.second->toString() + "\n"; - - return result; -} - -size_t PredictionContextMergeCache::count() const { - size_t result = 0; - for (auto entry : _data) - result += entry.second.size(); - return result; -} - diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.h b/runtime/Cpp/runtime/src/atn/PredictionContext.h old mode 100755 new mode 100644 index 9a52e00e5b..2ae5712387 --- a/runtime/Cpp/runtime/src/atn/PredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.h @@ -5,24 +5,35 @@ #pragma once +#include +#include +#include +#include +#include + #include "Recognizer.h" +#include "antlr4-common.h" #include "atn/ATN.h" #include "atn/ATNState.h" +#include "atn/PredictionContextType.h" namespace antlr4 { + + class RuleContext; + namespace atn { - struct PredictionContextHasher; - struct PredictionContextComparer; + class ATN; + class ArrayPredictionContext; + class SingletonPredictionContext; + class PredictionContextCache; class PredictionContextMergeCache; - typedef std::unordered_set, PredictionContextHasher, PredictionContextComparer> PredictionContextCache; - class ANTLR4CPP_PUBLIC PredictionContext { public: /// Represents $ in local context prediction, which means wildcard. /// *+x = *. - static const Ref EMPTY; + static const Ref EMPTY; /// Represents $ in an array in full context mode, when $ /// doesn't mean wildcard: $ + x = [$,x]. Here, @@ -30,68 +41,13 @@ namespace atn { // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where // -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't // conflict with real return states. - static const size_t EMPTY_RETURN_STATE = static_cast(-10); // std::numeric_limits::max() - 9; - - private: - static const size_t INITIAL_HASH = 1; - - public: - static size_t globalNodeCount; - const size_t id; - - /// - /// Stores the computed hash code of this . The hash - /// code is computed in parts to match the following reference algorithm. - /// - ///
    -    ///  private int referenceHashCode() {
    -    ///      int hash = ();
    -    ///
    -    ///      for (int i = 0; i < ; i++) {
    -    ///          hash = (hash, (i));
    -    ///      }
    -    ///
    -    ///      for (int i = 0; i < ; i++) {
    -    ///          hash = (hash, (i));
    -    ///      }
    -    ///
    -    ///      hash = (hash, 2 * );
    -    ///      return hash;
    -    ///  }
    -    /// 
    - ///
    - const size_t cachedHashCode; + static constexpr size_t EMPTY_RETURN_STATE = std::numeric_limits::max() - 9; - protected: - PredictionContext(size_t cachedHashCode); - ~PredictionContext(); - - public: - /// Convert a RuleContext tree to a PredictionContext graph. - /// Return EMPTY if outerContext is empty. - static Ref fromRuleContext(const ATN &atn, RuleContext *outerContext); - - virtual size_t size() const = 0; - virtual Ref getParent(size_t index) const = 0; - virtual size_t getReturnState(size_t index) const = 0; - - virtual bool operator == (const PredictionContext &o) const = 0; - - /// This means only the EMPTY (wildcard? not sure) context is in set. - virtual bool isEmpty() const; - virtual bool hasEmptyPath() const; - virtual size_t hashCode() const; - - protected: - static size_t calculateEmptyHashCode(); - static size_t calculateHashCode(Ref parent, size_t returnState); - static size_t calculateHashCode(const std::vector> &parents, - const std::vector &returnStates); - - public: // dispatch - static Ref merge(const Ref &a, const Ref &b, - bool rootIsWildcard, PredictionContextMergeCache *mergeCache); + static Ref merge(Ref a, + Ref b, + bool rootIsWildcard, + PredictionContextMergeCache *mergeCache); /// /// Merge two instances. @@ -127,8 +83,10 @@ namespace atn { /// {@code true} if this is a local-context merge, /// otherwise false to indicate a full-context merge /// - static Ref mergeSingletons(const Ref &a, - const Ref &b, bool rootIsWildcard, PredictionContextMergeCache *mergeCache); + static Ref mergeSingletons(Ref a, + Ref b, + bool rootIsWildcard, + PredictionContextMergeCache *mergeCache); /** * Handle case where at least one of {@code a} or {@code b} is @@ -168,8 +126,9 @@ namespace atn { * @param rootIsWildcard {@code true} if this is a local-context merge, * otherwise false to indicate a full-context merge */ - static Ref mergeRoot(const Ref &a, - const Ref &b, bool rootIsWildcard); + static Ref mergeRoot(Ref a, + Ref b, + bool rootIsWildcard); /** * Merge two {@link ArrayPredictionContext} instances. @@ -190,65 +149,82 @@ namespace atn { * {@link SingletonPredictionContext}.
    *

    */ - static Ref mergeArrays(const Ref &a, - const Ref &b, bool rootIsWildcard, PredictionContextMergeCache *mergeCache); + static Ref mergeArrays(Ref a, + Ref b, + bool rootIsWildcard, + PredictionContextMergeCache *mergeCache); - protected: - /// Make pass over all M parents; merge any equal() ones. - /// @returns true if the list has been changed (i.e. duplicates where found). - static bool combineCommonParents(std::vector> &parents); + static std::string toDOTString(const Ref &context); - public: - static std::string toDOTString(const Ref &context); + static Ref getCachedContext(const Ref &context, + PredictionContextCache &contextCache); - static Ref getCachedContext(const Ref &context, - PredictionContextCache &contextCache, - std::map, Ref> &visited); + static std::vector> getAllContextNodes(const Ref &context); - // ter's recursive version of Sam's getAllNodes() - static std::vector> getAllContextNodes(const Ref &context); - static void getAllContextNodes_(const Ref &context, - std::vector> &nodes, std::set &visited); + /// Convert a RuleContext tree to a PredictionContext graph. + /// Return EMPTY if outerContext is empty. + static Ref fromRuleContext(const ATN &atn, RuleContext *outerContext); - virtual std::string toString() const; - virtual std::string toString(Recognizer *recog) const; + PredictionContext(const PredictionContext&) = delete; - std::vector toStrings(Recognizer *recognizer, int currentState); - std::vector toStrings(Recognizer *recognizer, const Ref &stop, int currentState); - }; + virtual ~PredictionContext() = default; - struct PredictionContextHasher { - size_t operator () (const Ref &k) const { - return k->hashCode(); - } - }; + PredictionContext& operator=(const PredictionContext&) = delete; + PredictionContext& operator=(PredictionContext&&) = delete; - struct PredictionContextComparer { - bool operator () (const Ref &lhs, const Ref &rhs) const - { - if (lhs == rhs) // Object identity. - return true; - return (lhs->hashCode() == rhs->hashCode()) && (*lhs == *rhs); - } - }; + PredictionContextType getContextType() const { return _contextType; } - class PredictionContextMergeCache { - public: - Ref put(Ref const& key1, Ref const& key2, - Ref const& value); - Ref get(Ref const& key1, Ref const& key2); + virtual size_t size() const = 0; + virtual const Ref& getParent(size_t index) const = 0; + virtual size_t getReturnState(size_t index) const = 0; - void clear(); - std::string toString() const; - size_t count() const; + /// This means only the EMPTY (wildcard? not sure) context is in set. + virtual bool isEmpty() const = 0; + bool hasEmptyPath() const; - private: - std::unordered_map, - std::unordered_map, Ref, PredictionContextHasher, PredictionContextComparer>, - PredictionContextHasher, PredictionContextComparer> _data; + size_t hashCode() const; + + virtual bool equals(const PredictionContext &other) const = 0; + virtual std::string toString() const = 0; + + std::vector toStrings(Recognizer *recognizer, int currentState) const; + std::vector toStrings(Recognizer *recognizer, + const Ref &stop, + int currentState) const; + + protected: + explicit PredictionContext(PredictionContextType contextType); + + PredictionContext(PredictionContext&& other); + + virtual size_t hashCodeImpl() const = 0; + + size_t cachedHashCode() const { return _hashCode.load(std::memory_order_relaxed); } + + private: + const PredictionContextType _contextType; + mutable std::atomic _hashCode; }; -} // namespace atn -} // namespace antlr4 + inline bool operator==(const PredictionContext &lhs, const PredictionContext &rhs) { + return lhs.equals(rhs); + } + + inline bool operator!=(const PredictionContext &lhs, const PredictionContext &rhs) { + return !operator==(lhs, rhs); + } + +} // namespace atn +} // namespace antlr4 + +namespace std { + + template <> + struct hash<::antlr4::atn::PredictionContext> { + size_t operator()(const ::antlr4::atn::PredictionContext &predictionContext) const { + return predictionContext.hashCode(); + } + }; +} // namespace std diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextCache.cpp b/runtime/Cpp/runtime/src/atn/PredictionContextCache.cpp new file mode 100644 index 0000000000..3db124306c --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/PredictionContextCache.cpp @@ -0,0 +1,59 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include "antlr4-common.h" +#include "atn/PredictionContextCache.h" + +using namespace antlr4::atn; + +void PredictionContextCache::put(const Ref &value) { + assert(value); + + _data.insert(value); +} + +Ref PredictionContextCache::get( + const Ref &value) const { + assert(value); + + auto iterator = _data.find(value); + if (iterator == _data.end()) { + return nullptr; + } + return *iterator; +} + +size_t PredictionContextCache::PredictionContextHasher::operator()( + const Ref &predictionContext) const { + return predictionContext->hashCode(); +} + +bool PredictionContextCache::PredictionContextComparer::operator()( + const Ref &lhs, + const Ref &rhs) const { + return *lhs == *rhs; +} diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextCache.h b/runtime/Cpp/runtime/src/atn/PredictionContextCache.h new file mode 100644 index 0000000000..e38f1f30fd --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/PredictionContextCache.h @@ -0,0 +1,65 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include "atn/PredictionContext.h" +#include "antlr4-common.h" +#include "FlatHashSet.h" + +namespace antlr4 { +namespace atn { + + class ANTLR4CPP_PUBLIC PredictionContextCache final { + public: + PredictionContextCache() = default; + + PredictionContextCache(const PredictionContextCache&) = delete; + PredictionContextCache(PredictionContextCache&&) = delete; + + PredictionContextCache& operator=(const PredictionContextCache&) = delete; + PredictionContextCache& operator=(PredictionContextCache&&) = delete; + + void put(const Ref &value); + + Ref get(const Ref &value) const; + + private: + struct ANTLR4CPP_PUBLIC PredictionContextHasher final { + size_t operator()(const Ref &predictionContext) const; + }; + + struct ANTLR4CPP_PUBLIC PredictionContextComparer final { + bool operator()(const Ref &lhs, + const Ref &rhs) const; + }; + + FlatHashSet, + PredictionContextHasher, PredictionContextComparer> _data; + }; + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.cpp b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.cpp new file mode 100644 index 0000000000..978e3a9715 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.cpp @@ -0,0 +1,171 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include "atn/PredictionContextMergeCache.h" + +#include "antlr4-common.h" +#include "misc/MurmurHash.h" + +using namespace antlr4::atn; +using namespace antlr4::misc; + +PredictionContextMergeCache::PredictionContextMergeCache( + const PredictionContextMergeCacheOptions &options) : _options(options) {} + +Ref PredictionContextMergeCache::put( + const Ref &key1, + const Ref &key2, + Ref value) { + assert(key1); + assert(key2); + + if (getOptions().getMaxSize() == 0) { + // Cache is effectively disabled. + return value; + } + + auto [existing, inserted] = _entries.try_emplace(std::make_pair(key1.get(), key2.get())); + if (inserted) { + try { + existing->second.reset(new Entry()); + } catch (...) { + _entries.erase(existing); + throw; + } + existing->second->key = std::make_pair(key1, key2); + existing->second->value = std::move(value); + pushToFront(existing->second.get()); + } else { + if (existing->second->value != value) { + existing->second->value = std::move(value); + } + moveToFront(existing->second.get()); + } + compact(existing->second.get()); + return existing->second->value; +} + +Ref PredictionContextMergeCache::get( + const Ref &key1, + const Ref &key2) const { + assert(key1); + assert(key2); + + if (getOptions().getMaxSize() == 0) { + // Cache is effectively disabled. + return nullptr; + } + + auto iterator = _entries.find(std::make_pair(key1.get(), key2.get())); + if (iterator == _entries.end()) { + return nullptr; + } + moveToFront(iterator->second.get()); + return iterator->second->value; +} + +void PredictionContextMergeCache::clear() { + Container().swap(_entries); + _head = _tail = nullptr; + _size = 0; +} + +void PredictionContextMergeCache::moveToFront(Entry *entry) const { + if (entry->prev == nullptr) { + assert(entry == _head); + return; + } + entry->prev->next = entry->next; + if (entry->next != nullptr) { + entry->next->prev = entry->prev; + } else { + assert(entry == _tail); + _tail = entry->prev; + } + entry->prev = nullptr; + entry->next = _head; + _head->prev = entry; + _head = entry; + assert(entry->prev == nullptr); +} + +void PredictionContextMergeCache::pushToFront(Entry *entry) { + ++_size; + entry->prev = nullptr; + entry->next = _head; + if (_head != nullptr) { + _head->prev = entry; + _head = entry; + } else { + assert(entry->next == nullptr); + _head = entry; + _tail = entry; + } + assert(entry->prev == nullptr); +} + +void PredictionContextMergeCache::remove(Entry *entry) { + if (entry->prev != nullptr) { + entry->prev->next = entry->next; + } else { + assert(entry == _head); + _head = entry->next; + } + if (entry->next != nullptr) { + entry->next->prev = entry->prev; + } else { + assert(entry == _tail); + _tail = entry->prev; + } + --_size; + _entries.erase(std::make_pair(entry->key.first.get(), entry->key.second.get())); +} + +void PredictionContextMergeCache::compact(const Entry *preserve) { + Entry *entry = _tail; + while (entry != nullptr && _size > getOptions().getMaxSize()) { + Entry *next = entry->prev; + if (entry != preserve) { + remove(entry); + } + entry = next; + } +} + +size_t PredictionContextMergeCache::PredictionContextHasher::operator()( + const PredictionContextPair &value) const { + size_t hash = MurmurHash::initialize(); + hash = MurmurHash::update(hash, value.first->hashCode()); + hash = MurmurHash::update(hash, value.second->hashCode()); + return MurmurHash::finish(hash, 2); +} + +bool PredictionContextMergeCache::PredictionContextComparer::operator()( + const PredictionContextPair &lhs, const PredictionContextPair &rhs) const { + return *lhs.first == *rhs.first && *lhs.second == *rhs.second; +} diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h new file mode 100644 index 0000000000..69ea7ee27c --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h @@ -0,0 +1,104 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include +#include + +#include "atn/PredictionContext.h" +#include "antlr4-common.h" +#include "atn/PredictionContextMergeCacheOptions.h" +#include "FlatHashMap.h" + +namespace antlr4 { +namespace atn { + + class ANTLR4CPP_PUBLIC PredictionContextMergeCache final { + public: + PredictionContextMergeCache() + : PredictionContextMergeCache(PredictionContextMergeCacheOptions()) {} + + explicit PredictionContextMergeCache(const PredictionContextMergeCacheOptions &options); + + PredictionContextMergeCache(const PredictionContextMergeCache&) = delete; + PredictionContextMergeCache(PredictionContextMergeCache&&) = delete; + + PredictionContextMergeCache& operator=(const PredictionContextMergeCache&) = delete; + PredictionContextMergeCache& operator=(PredictionContextMergeCache&&) = delete; + + Ref put(const Ref &key1, + const Ref &key2, + Ref value); + + Ref get(const Ref &key1, + const Ref &key2) const; + + const PredictionContextMergeCacheOptions& getOptions() const { return _options; } + + void clear(); + + private: + using PredictionContextPair = std::pair; + + struct ANTLR4CPP_PUBLIC PredictionContextHasher final { + size_t operator()(const PredictionContextPair &value) const; + }; + + struct ANTLR4CPP_PUBLIC PredictionContextComparer final { + bool operator()(const PredictionContextPair &lhs, const PredictionContextPair &rhs) const; + }; + + struct ANTLR4CPP_PUBLIC Entry final { + std::pair, Ref> key; + Ref value; + Entry *prev = nullptr; + Entry *next = nullptr; + }; + + void moveToFront(Entry *entry) const; + + void pushToFront(Entry *entry); + + void remove(Entry *entry); + + void compact(const Entry *preserve); + + using Container = FlatHashMap, + PredictionContextHasher, PredictionContextComparer>; + + const PredictionContextMergeCacheOptions _options; + + Container _entries; + + mutable Entry *_head = nullptr; + mutable Entry *_tail = nullptr; + + size_t _size = 0; + }; + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextMergeCacheOptions.h b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCacheOptions.h new file mode 100644 index 0000000000..7331cc17e0 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCacheOptions.h @@ -0,0 +1,71 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include +#include + +#include "antlr4-common.h" + +namespace antlr4 { +namespace atn { + + class ANTLR4CPP_PUBLIC PredictionContextMergeCacheOptions final { + public: + PredictionContextMergeCacheOptions() = default; + + size_t getMaxSize() const { return _maxSize; } + + bool hasMaxSize() const { return getMaxSize() != std::numeric_limits::max(); } + + PredictionContextMergeCacheOptions& setMaxSize(size_t maxSize) { + _maxSize = maxSize; + return *this; + } + + size_t getClearEveryN() const { + return _clearEveryN; + } + + bool hasClearEveryN() const { return getClearEveryN() != 0; } + + PredictionContextMergeCacheOptions& setClearEveryN(uint64_t clearEveryN) { + _clearEveryN = clearEveryN; + return *this; + } + + PredictionContextMergeCacheOptions& neverClear() { + return setClearEveryN(0); + } + + private: + size_t _maxSize = std::numeric_limits::max(); + uint64_t _clearEveryN = 1; + }; + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextType.h b/runtime/Cpp/runtime/src/atn/PredictionContextType.h new file mode 100644 index 0000000000..c8c4473e13 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/PredictionContextType.h @@ -0,0 +1,21 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include + +#include "antlr4-common.h" + +namespace antlr4 { +namespace atn { + + enum class PredictionContextType : size_t { + SINGLETON = 1, + ARRAY = 2, + }; + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/PredictionMode.cpp b/runtime/Cpp/runtime/src/atn/PredictionMode.cpp old mode 100755 new mode 100644 index d15a8268f4..efecc50cc7 --- a/runtime/Cpp/runtime/src/atn/PredictionMode.cpp +++ b/runtime/Cpp/runtime/src/atn/PredictionMode.cpp @@ -1,9 +1,14 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "atn/RuleStopState.h" +#include "antlr4-common.h" #include "atn/ATNConfigSet.h" #include "atn/ATNConfig.h" #include "misc/MurmurHash.h" @@ -62,7 +67,7 @@ bool PredictionModeClass::hasSLLConflictTerminatingPrediction(PredictionMode mod // dup configs, tossing out semantic predicates ATNConfigSet dup(true); for (auto &config : configs->configs) { - Ref c = std::make_shared(config, SemanticContext::NONE); + Ref c = std::make_shared(*config, SemanticContext::Empty::Instance); dup.add(c); } std::vector altsets = getConflictingAltSubsets(&dup); @@ -73,8 +78,8 @@ bool PredictionModeClass::hasSLLConflictTerminatingPrediction(PredictionMode mod } bool PredictionModeClass::hasConfigInRuleStopState(ATNConfigSet *configs) { - for (auto &c : configs->configs) { - if (is(c->state)) { + for (const auto &config : configs->configs) { + if (RuleStopState::is(config->state)) { return true; } } @@ -83,8 +88,8 @@ bool PredictionModeClass::hasConfigInRuleStopState(ATNConfigSet *configs) { } bool PredictionModeClass::allConfigsInRuleStopStates(ATNConfigSet *configs) { - for (auto &config : configs->configs) { - if (!is(config->state)) { + for (const auto &config : configs->configs) { + if (!RuleStopState::is(config->state)) { return false; } } @@ -142,7 +147,7 @@ size_t PredictionModeClass::getUniqueAlt(const std::vector& al antlrcpp::BitSet PredictionModeClass::getAlts(const std::vector& altsets) { antlrcpp::BitSet all; - for (antlrcpp::BitSet alts : altsets) { + for (const auto &alts : altsets) { all |= alts; } @@ -151,43 +156,44 @@ antlrcpp::BitSet PredictionModeClass::getAlts(const std::vectorconfigs) { + for (const auto &config : configs->configs) { alts.set(config->alt); } return alts; } std::vector PredictionModeClass::getConflictingAltSubsets(ATNConfigSet *configs) { - std::unordered_map configToAlts; + std::unordered_map configToAlts; for (auto &config : configs->configs) { configToAlts[config.get()].set(config->alt); } std::vector values; - for (auto it : configToAlts) { - values.push_back(it.second); + values.reserve(configToAlts.size()); + for (const auto &pair : configToAlts) { + values.push_back(pair.second); } return values; } -std::map PredictionModeClass::getStateToAltMap(ATNConfigSet *configs) { - std::map m; - for (auto &c : configs->configs) { +std::unordered_map PredictionModeClass::getStateToAltMap(ATNConfigSet *configs) { + std::unordered_map m; + for (const auto &c : configs->configs) { m[c->state].set(c->alt); } return m; } bool PredictionModeClass::hasStateAssociatedWithOneAlt(ATNConfigSet *configs) { - std::map x = getStateToAltMap(configs); - for (std::map::iterator it = x.begin(); it != x.end(); it++){ - if (it->second.count() == 1) return true; + auto x = getStateToAltMap(configs); + for (const auto &pair : x){ + if (pair.second.count() == 1) return true; } return false; } size_t PredictionModeClass::getSingleViableAlt(const std::vector& altsets) { antlrcpp::BitSet viableAlts; - for (antlrcpp::BitSet alts : altsets) { + for (const auto &alts : altsets) { size_t minAlt = alts.nextSetBit(0); viableAlts.set(minAlt); diff --git a/runtime/Cpp/runtime/src/atn/PredictionMode.h b/runtime/Cpp/runtime/src/atn/PredictionMode.h old mode 100755 new mode 100644 index 726f4cf40f..faef431d40 --- a/runtime/Cpp/runtime/src/atn/PredictionMode.h +++ b/runtime/Cpp/runtime/src/atn/PredictionMode.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "atn/ATNState.h" #include "support/BitSet.h" namespace antlr4 { @@ -425,7 +430,7 @@ namespace atn { /// cref="ATNConfig#alt alt"/> /// ///
    - static std::map getStateToAltMap(ATNConfigSet *configs); + static std::unordered_map getStateToAltMap(ATNConfigSet *configs); static bool hasStateAssociatedWithOneAlt(ATNConfigSet *configs); diff --git a/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp old mode 100755 new mode 100644 index 62fc12f0ac..a386d4856b --- a/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp @@ -10,6 +10,10 @@ #include "support/CPPUtils.h" #include "atn/ProfilingATNSimulator.h" +#include +#include +#include +#include using namespace antlr4; using namespace antlr4::atn; @@ -112,10 +116,10 @@ std::unique_ptr ProfilingATNSimulator::computeReachSet(ATNConfigSe return reachConfigs; } -bool ProfilingATNSimulator::evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, +bool ProfilingATNSimulator::evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, size_t alt, bool fullCtx) { bool result = ParserATNSimulator::evalSemanticContext(pred, parserCallStack, alt, fullCtx); - if (!(std::dynamic_pointer_cast(pred) != nullptr)) { + if (!(std::dynamic_pointer_cast(pred) != nullptr)) { bool fullContext = _llStopIndex >= 0; int stopIndex = fullContext ? _llStopIndex : _sllStopIndex; _decisions[_currentDecision].predicateEvals.push_back( diff --git a/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h b/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h old mode 100755 new mode 100644 index 79ecd00b48..94663a555f --- a/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h @@ -5,7 +5,11 @@ #pragma once +#include +#include +#include #include "atn/ParserATNSimulator.h" +#include "antlr4-common.h" #include "atn/DecisionInfo.h" namespace antlr4 { @@ -13,9 +17,9 @@ namespace atn { class ANTLR4CPP_PUBLIC ProfilingATNSimulator : public ParserATNSimulator { public: - ProfilingATNSimulator(Parser *parser); + explicit ProfilingATNSimulator(Parser *parser); - virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext) override; + size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext) override; virtual std::vector getDecisionInfo() const; virtual dfa::DFAState* getCurrentState() const; @@ -43,16 +47,16 @@ namespace atn { ///
    size_t conflictingAltResolvedBySLL = 0; - virtual dfa::DFAState* getExistingTargetState(dfa::DFAState *previousD, size_t t) override; - virtual dfa::DFAState* computeTargetState(dfa::DFA &dfa, dfa::DFAState *previousD, size_t t) override; - virtual std::unique_ptr computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx) override; - virtual bool evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, + dfa::DFAState* getExistingTargetState(dfa::DFAState *previousD, size_t t) override; + dfa::DFAState* computeTargetState(dfa::DFA &dfa, dfa::DFAState *previousD, size_t t) override; + std::unique_ptr computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx) override; + bool evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, size_t alt, bool fullCtx) override; - virtual void reportAttemptingFullContext(dfa::DFA &dfa, const antlrcpp::BitSet &conflictingAlts, ATNConfigSet *configs, + void reportAttemptingFullContext(dfa::DFA &dfa, const antlrcpp::BitSet &conflictingAlts, ATNConfigSet *configs, size_t startIndex, size_t stopIndex) override; - virtual void reportContextSensitivity(dfa::DFA &dfa, size_t prediction, ATNConfigSet *configs, + void reportContextSensitivity(dfa::DFA &dfa, size_t prediction, ATNConfigSet *configs, size_t startIndex, size_t stopIndex) override; - virtual void reportAmbiguity(dfa::DFA &dfa, dfa::DFAState *D, size_t startIndex, size_t stopIndex, bool exact, + void reportAmbiguity(dfa::DFA &dfa, dfa::DFAState *D, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs) override; }; diff --git a/runtime/Cpp/runtime/src/atn/RangeTransition.cpp b/runtime/Cpp/runtime/src/atn/RangeTransition.cpp old mode 100755 new mode 100644 index 58d668c45e..6db19e3e57 --- a/runtime/Cpp/runtime/src/atn/RangeTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/RangeTransition.cpp @@ -3,18 +3,18 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/RangeTransition.h" using namespace antlr4; using namespace antlr4::atn; -RangeTransition::RangeTransition(ATNState *target, size_t from, size_t to) : Transition(target), from(from), to(to) { -} - -Transition::SerializationType RangeTransition::getSerializationType() const { - return RANGE; +RangeTransition::RangeTransition(ATNState *target, size_t from, size_t to) : Transition(TransitionType::RANGE, target), from(from), to(to) { } misc::IntervalSet RangeTransition::label() const { diff --git a/runtime/Cpp/runtime/src/atn/RangeTransition.h b/runtime/Cpp/runtime/src/atn/RangeTransition.h old mode 100755 new mode 100644 index 14093e2f91..9a5c72b049 --- a/runtime/Cpp/runtime/src/atn/RangeTransition.h +++ b/runtime/Cpp/runtime/src/atn/RangeTransition.h @@ -5,6 +5,12 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { @@ -12,17 +18,19 @@ namespace atn { class ANTLR4CPP_PUBLIC RangeTransition final : public Transition { public: + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::RANGE; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } + const size_t from; const size_t to; RangeTransition(ATNState *target, size_t from, size_t to); - virtual SerializationType getSerializationType() const override; - - virtual misc::IntervalSet label() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + misc::IntervalSet label() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - virtual std::string toString() const override; + std::string toString() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/RuleStartState.cpp b/runtime/Cpp/runtime/src/atn/RuleStartState.cpp deleted file mode 100755 index 555f8c2e93..0000000000 --- a/runtime/Cpp/runtime/src/atn/RuleStartState.cpp +++ /dev/null @@ -1,16 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/RuleStartState.h" - -using namespace antlr4::atn; - -RuleStartState::RuleStartState() { - isLeftRecursiveRule = false; -} - -size_t RuleStartState::getStateType() { - return RULE_START; -} diff --git a/runtime/Cpp/runtime/src/atn/RuleStartState.h b/runtime/Cpp/runtime/src/atn/RuleStartState.h old mode 100755 new mode 100644 index 94ab0e4138..fa615b5b4e --- a/runtime/Cpp/runtime/src/atn/RuleStartState.h +++ b/runtime/Cpp/runtime/src/atn/RuleStartState.h @@ -5,6 +5,8 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { @@ -12,13 +14,14 @@ namespace atn { class ANTLR4CPP_PUBLIC RuleStartState final : public ATNState { public: - RuleStartState(); + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::RULE_START; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } RuleStopState *stopState = nullptr; bool isLeftRecursiveRule = false; - virtual size_t getStateType() override; - + RuleStartState() : ATNState(ATNStateType::RULE_START) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/RuleStopState.cpp b/runtime/Cpp/runtime/src/atn/RuleStopState.cpp deleted file mode 100755 index 3ceece4c77..0000000000 --- a/runtime/Cpp/runtime/src/atn/RuleStopState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/RuleStopState.h" - -using namespace antlr4::atn; - -size_t RuleStopState::getStateType() { - return RULE_STOP; -} diff --git a/runtime/Cpp/runtime/src/atn/RuleStopState.h b/runtime/Cpp/runtime/src/atn/RuleStopState.h old mode 100755 new mode 100644 index 8a4a580f6a..e8f8b8a7f9 --- a/runtime/Cpp/runtime/src/atn/RuleStopState.h +++ b/runtime/Cpp/runtime/src/atn/RuleStopState.h @@ -5,6 +5,8 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { @@ -15,10 +17,12 @@ namespace atn { /// references to all calls to this rule to compute FOLLOW sets for /// error handling. class ANTLR4CPP_PUBLIC RuleStopState final : public ATNState { - public: - virtual size_t getStateType() override; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::RULE_STOP; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + RuleStopState() : ATNState(ATNStateType::RULE_STOP) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/RuleTransition.cpp b/runtime/Cpp/runtime/src/atn/RuleTransition.cpp old mode 100755 new mode 100644 index c52f16d187..438b63441f --- a/runtime/Cpp/runtime/src/atn/RuleTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/RuleTransition.cpp @@ -3,7 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "atn/RuleStartState.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/RuleTransition.h" using namespace antlr4::atn; @@ -13,14 +18,10 @@ RuleTransition::RuleTransition(RuleStartState *ruleStart, size_t ruleIndex, ATNS } RuleTransition::RuleTransition(RuleStartState *ruleStart, size_t ruleIndex, int precedence, ATNState *followState) - : Transition(ruleStart), ruleIndex(ruleIndex), precedence(precedence) { + : Transition(TransitionType::RULE, ruleStart), ruleIndex(ruleIndex), precedence(precedence) { this->followState = followState; } -Transition::SerializationType RuleTransition::getSerializationType() const { - return RULE; -} - bool RuleTransition::isEpsilon() const { return true; } diff --git a/runtime/Cpp/runtime/src/atn/RuleTransition.h b/runtime/Cpp/runtime/src/atn/RuleTransition.h old mode 100755 new mode 100644 index 50d3d29dee..94dcbbee83 --- a/runtime/Cpp/runtime/src/atn/RuleTransition.h +++ b/runtime/Cpp/runtime/src/atn/RuleTransition.h @@ -5,13 +5,22 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC RuleTransition : public Transition { + class ANTLR4CPP_PUBLIC RuleTransition final : public Transition { public: + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::RULE; } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } + /// Ptr to the rule definition object for this rule ref. const size_t ruleIndex; // no Rule object at runtime @@ -28,12 +37,10 @@ namespace atn { RuleTransition(RuleTransition const&) = delete; RuleTransition& operator=(RuleTransition const&) = delete; - virtual SerializationType getSerializationType() const override; - - virtual bool isEpsilon() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + bool isEpsilon() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - virtual std::string toString() const override; + std::string toString() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp old mode 100755 new mode 100644 index 0531e37f8c..8fbff68d9d --- a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp +++ b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp @@ -1,9 +1,19 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include +#include +#include + #include "misc/MurmurHash.h" +#include "antlr4-common.h" +#include "support/Casts.h" #include "support/CPPUtils.h" #include "support/Arrays.h" @@ -13,41 +23,109 @@ using namespace antlr4; using namespace antlr4::atn; using namespace antlrcpp; -//------------------ Predicate ----------------------------------------------------------------------------------------- +namespace { -SemanticContext::Predicate::Predicate() : Predicate(INVALID_INDEX, INVALID_INDEX, false) { -} + struct SemanticContextHasher final { + size_t operator()(const SemanticContext *semanticContext) const { + return semanticContext->hashCode(); + } + }; + + struct SemanticContextComparer final { + bool operator()(const SemanticContext *lhs, const SemanticContext *rhs) const { + return *lhs == *rhs; + } + }; + + template + void insertSemanticContext(const Ref &semanticContext, + std::unordered_set &operandSet, + std::vector> &operandList, + Ref &precedencePredicate, + Comparer comparer) { + if (semanticContext != nullptr) { + if (semanticContext->getContextType() == SemanticContextType::PRECEDENCE) { + if (precedencePredicate == nullptr || comparer(downCast(semanticContext.get())->precedence, precedencePredicate->precedence)) { + precedencePredicate = std::static_pointer_cast(semanticContext); + } + } else { + auto [existing, inserted] = operandSet.insert(semanticContext.get()); + if (inserted) { + operandList.push_back(semanticContext); + } + } + } + } + + template + void insertSemanticContext(Ref &&semanticContext, + std::unordered_set &operandSet, + std::vector> &operandList, + Ref &precedencePredicate, + Comparer comparer) { + if (semanticContext != nullptr) { + if (semanticContext->getContextType() == SemanticContextType::PRECEDENCE) { + if (precedencePredicate == nullptr || comparer(downCast(semanticContext.get())->precedence, precedencePredicate->precedence)) { + precedencePredicate = std::static_pointer_cast(std::move(semanticContext)); + } + } else { + auto [existing, inserted] = operandSet.insert(semanticContext.get()); + if (inserted) { + operandList.push_back(std::move(semanticContext)); + } + } + } + } + + size_t predictOperandCapacity(const Ref &x) { + switch (x->getContextType()) { + case SemanticContextType::AND: + return downCast(*x).getOperands().size(); + case SemanticContextType::OR: + return downCast(*x).getOperands().size(); + default: + return 1; + } + } + + size_t predictOperandCapacity(const Ref &a, const Ref &b) { + return predictOperandCapacity(a) + predictOperandCapacity(b); + } -SemanticContext::Predicate::Predicate(size_t ruleIndex, size_t predIndex, bool isCtxDependent) -: ruleIndex(ruleIndex), predIndex(predIndex), isCtxDependent(isCtxDependent) { } +//------------------ Predicate ----------------------------------------------------------------------------------------- -bool SemanticContext::Predicate::eval(Recognizer *parser, RuleContext *parserCallStack) { +SemanticContext::Predicate::Predicate(size_t ruleIndex, size_t predIndex, bool isCtxDependent) + : SemanticContext(SemanticContextType::PREDICATE), ruleIndex(ruleIndex), predIndex(predIndex), isCtxDependent(isCtxDependent) {} + +bool SemanticContext::Predicate::eval(Recognizer *parser, RuleContext *parserCallStack) const { RuleContext *localctx = nullptr; - if (isCtxDependent) + if (isCtxDependent) { localctx = parserCallStack; + } return parser->sempred(localctx, ruleIndex, predIndex); } size_t SemanticContext::Predicate::hashCode() const { size_t hashCode = misc::MurmurHash::initialize(); + hashCode = misc::MurmurHash::update(hashCode, static_cast(getContextType())); hashCode = misc::MurmurHash::update(hashCode, ruleIndex); hashCode = misc::MurmurHash::update(hashCode, predIndex); hashCode = misc::MurmurHash::update(hashCode, isCtxDependent ? 1 : 0); - hashCode = misc::MurmurHash::finish(hashCode, 3); + hashCode = misc::MurmurHash::finish(hashCode, 4); return hashCode; } -bool SemanticContext::Predicate::operator == (const SemanticContext &other) const { - if (this == &other) +bool SemanticContext::Predicate::equals(const SemanticContext &other) const { + if (this == &other) { return true; - - const Predicate *p = dynamic_cast(&other); - if (p == nullptr) + } + if (getContextType() != other.getContextType()) { return false; - - return ruleIndex == p->ruleIndex && predIndex == p->predIndex && isCtxDependent == p->isCtxDependent; + } + const Predicate &p = downCast(other); + return ruleIndex == p.ruleIndex && predIndex == p.predIndex && isCtxDependent == p.isCtxDependent; } std::string SemanticContext::Predicate::toString() const { @@ -56,45 +134,36 @@ std::string SemanticContext::Predicate::toString() const { //------------------ PrecedencePredicate ------------------------------------------------------------------------------- -SemanticContext::PrecedencePredicate::PrecedencePredicate() : precedence(0) { -} +SemanticContext::PrecedencePredicate::PrecedencePredicate(int precedence) : SemanticContext(SemanticContextType::PRECEDENCE), precedence(precedence) {} -SemanticContext::PrecedencePredicate::PrecedencePredicate(int precedence) : precedence(precedence) { -} - -bool SemanticContext::PrecedencePredicate::eval(Recognizer *parser, RuleContext *parserCallStack) { +bool SemanticContext::PrecedencePredicate::eval(Recognizer *parser, RuleContext *parserCallStack) const { return parser->precpred(parserCallStack, precedence); } -Ref SemanticContext::PrecedencePredicate::evalPrecedence(Recognizer *parser, - RuleContext *parserCallStack) { +Ref SemanticContext::PrecedencePredicate::evalPrecedence(Recognizer *parser, + RuleContext *parserCallStack) const { if (parser->precpred(parserCallStack, precedence)) { - return SemanticContext::NONE; - } - else { - return nullptr; + return SemanticContext::Empty::Instance; } -} - -int SemanticContext::PrecedencePredicate::compareTo(PrecedencePredicate *o) { - return precedence - o->precedence; + return nullptr; } size_t SemanticContext::PrecedencePredicate::hashCode() const { - size_t hashCode = 1; - hashCode = 31 * hashCode + static_cast(precedence); - return hashCode; + size_t hashCode = misc::MurmurHash::initialize(); + hashCode = misc::MurmurHash::update(hashCode, static_cast(getContextType())); + hashCode = misc::MurmurHash::update(hashCode, static_cast(precedence)); + return misc::MurmurHash::finish(hashCode, 2); } -bool SemanticContext::PrecedencePredicate::operator == (const SemanticContext &other) const { - if (this == &other) +bool SemanticContext::PrecedencePredicate::equals(const SemanticContext &other) const { + if (this == &other) { return true; - - const PrecedencePredicate *predicate = dynamic_cast(&other); - if (predicate == nullptr) + } + if (getContextType() != other.getContextType()) { return false; - - return precedence == predicate->precedence; + } + const PrecedencePredicate &predicate = downCast(other); + return precedence == predicate.precedence; } std::string SemanticContext::PrecedencePredicate::toString() const { @@ -103,61 +172,60 @@ std::string SemanticContext::PrecedencePredicate::toString() const { //------------------ AND ----------------------------------------------------------------------------------------------- -SemanticContext::AND::AND(Ref const& a, Ref const& b) { - Set operands; +SemanticContext::AND::AND(Ref a, Ref b) : Operator(SemanticContextType::AND) { + std::unordered_set operands; + Ref precedencePredicate; - if (is(a)) { - for (auto operand : std::dynamic_pointer_cast(a)->opnds) { - operands.insert(operand); + _opnds.reserve(predictOperandCapacity(a, b) + 1); + + if (a->getContextType() == SemanticContextType::AND) { + for (const auto &operand : downCast(a.get())->getOperands()) { + insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::less{}); } } else { - operands.insert(a); + insertSemanticContext(std::move(a), operands, _opnds, precedencePredicate, std::less{}); } - if (is(b)) { - for (auto operand : std::dynamic_pointer_cast(b)->opnds) { - operands.insert(operand); + if (b->getContextType() == SemanticContextType::AND) { + for (const auto &operand : downCast(b.get())->getOperands()) { + insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::less{}); } } else { - operands.insert(b); + insertSemanticContext(std::move(b), operands, _opnds, precedencePredicate, std::less{}); } - std::vector> precedencePredicates = filterPrecedencePredicates(operands); - - if (!precedencePredicates.empty()) { + if (precedencePredicate != nullptr) { // interested in the transition with the lowest precedence - auto predicate = [](Ref const& a, Ref const& b) { - return a->precedence < b->precedence; - }; - - auto reduced = std::min_element(precedencePredicates.begin(), precedencePredicates.end(), predicate); - operands.insert(*reduced); + auto [existing, inserted] = operands.insert(precedencePredicate.get()); + if (inserted) { + _opnds.push_back(std::move(precedencePredicate)); + } } - - std::copy(operands.begin(), operands.end(), std::back_inserter(opnds)); } -std::vector> SemanticContext::AND::getOperands() const { - return opnds; +const std::vector>& SemanticContext::AND::getOperands() const { + return _opnds; } -bool SemanticContext::AND::operator == (const SemanticContext &other) const { - if (this == &other) +bool SemanticContext::AND::equals(const SemanticContext &other) const { + if (this == &other) { return true; - - const AND *context = dynamic_cast(&other); - if (context == nullptr) + } + if (getContextType() != other.getContextType()) { return false; - - return Arrays::equals(opnds, context->opnds); + } + const AND &context = downCast(other); + return Arrays::equals(getOperands(), context.getOperands()); } size_t SemanticContext::AND::hashCode() const { - return misc::MurmurHash::hashCode(opnds, typeid(AND).hash_code()); + size_t hash = misc::MurmurHash::initialize(); + hash = misc::MurmurHash::update(hash, static_cast(getContextType())); + return misc::MurmurHash::hashCode(getOperands(), hash); } -bool SemanticContext::AND::eval(Recognizer *parser, RuleContext *parserCallStack) { - for (auto opnd : opnds) { +bool SemanticContext::AND::eval(Recognizer *parser, RuleContext *parserCallStack) const { + for (const auto &opnd : getOperands()) { if (!opnd->eval(parser, parserCallStack)) { return false; } @@ -165,18 +233,19 @@ bool SemanticContext::AND::eval(Recognizer *parser, RuleContext *parserCallStack return true; } -Ref SemanticContext::AND::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) { +Ref SemanticContext::AND::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const { bool differs = false; - std::vector> operands; - for (auto context : opnds) { - Ref evaluated = context->evalPrecedence(parser, parserCallStack); + std::vector> operands; + for (const auto &context : getOperands()) { + auto evaluated = context->evalPrecedence(parser, parserCallStack); differs |= (evaluated != context); if (evaluated == nullptr) { // The AND context is false if any element is false. return nullptr; - } else if (evaluated != NONE) { + } + if (evaluated != Empty::Instance) { // Reduce the result by skipping true elements. - operands.push_back(evaluated); + operands.push_back(std::move(evaluated)); } } @@ -186,12 +255,12 @@ Ref SemanticContext::AND::evalPrecedence(Recognizer *parser, Ru if (operands.empty()) { // All elements were true, so the AND context is true. - return NONE; + return Empty::Instance; } - Ref result = operands[0]; + Ref result = std::move(operands[0]); for (size_t i = 1; i < operands.size(); ++i) { - result = SemanticContext::And(result, operands[i]); + result = SemanticContext::And(std::move(result), std::move(operands[i])); } return result; @@ -199,7 +268,7 @@ Ref SemanticContext::AND::evalPrecedence(Recognizer *parser, Ru std::string SemanticContext::AND::toString() const { std::string tmp; - for (auto var : opnds) { + for (const auto &var : getOperands()) { tmp += var->toString() + " && "; } return tmp; @@ -207,59 +276,60 @@ std::string SemanticContext::AND::toString() const { //------------------ OR ------------------------------------------------------------------------------------------------ -SemanticContext::OR::OR(Ref const& a, Ref const& b) { - Set operands; +SemanticContext::OR::OR(Ref a, Ref b) : Operator(SemanticContextType::OR) { + std::unordered_set operands; + Ref precedencePredicate; + + _opnds.reserve(predictOperandCapacity(a, b) + 1); - if (is(a)) { - for (auto operand : std::dynamic_pointer_cast(a)->opnds) { - operands.insert(operand); + if (a->getContextType() == SemanticContextType::OR) { + for (const auto &operand : downCast(a.get())->getOperands()) { + insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::greater{}); } } else { - operands.insert(a); + insertSemanticContext(std::move(a), operands, _opnds, precedencePredicate, std::greater{}); } - if (is(b)) { - for (auto operand : std::dynamic_pointer_cast(b)->opnds) { - operands.insert(operand); + if (b->getContextType() == SemanticContextType::OR) { + for (const auto &operand : downCast(b.get())->getOperands()) { + insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::greater{}); } } else { - operands.insert(b); + insertSemanticContext(std::move(b), operands, _opnds, precedencePredicate, std::greater{}); } - std::vector> precedencePredicates = filterPrecedencePredicates(operands); - if (!precedencePredicates.empty()) { + if (precedencePredicate != nullptr) { // interested in the transition with the highest precedence - auto predicate = [](Ref const& a, Ref const& b) { - return a->precedence < b->precedence; - }; - auto reduced = std::max_element(precedencePredicates.begin(), precedencePredicates.end(), predicate); - operands.insert(*reduced); + auto [existing, inserted] = operands.insert(precedencePredicate.get()); + if (inserted) { + _opnds.push_back(std::move(precedencePredicate)); + } } - - std::copy(operands.begin(), operands.end(), std::back_inserter(opnds)); } -std::vector> SemanticContext::OR::getOperands() const { - return opnds; +const std::vector>& SemanticContext::OR::getOperands() const { + return _opnds; } -bool SemanticContext::OR::operator == (const SemanticContext &other) const { - if (this == &other) +bool SemanticContext::OR::equals(const SemanticContext &other) const { + if (this == &other) { return true; - - const OR *context = dynamic_cast(&other); - if (context == nullptr) + } + if (getContextType() != other.getContextType()) { return false; - - return Arrays::equals(opnds, context->opnds); + } + const OR &context = downCast(other); + return Arrays::equals(getOperands(), context.getOperands()); } size_t SemanticContext::OR::hashCode() const { - return misc::MurmurHash::hashCode(opnds, typeid(OR).hash_code()); + size_t hash = misc::MurmurHash::initialize(); + hash = misc::MurmurHash::update(hash, static_cast(getContextType())); + return misc::MurmurHash::hashCode(getOperands(), hash); } -bool SemanticContext::OR::eval(Recognizer *parser, RuleContext *parserCallStack) { - for (auto opnd : opnds) { +bool SemanticContext::OR::eval(Recognizer *parser, RuleContext *parserCallStack) const { + for (const auto &opnd : getOperands()) { if (opnd->eval(parser, parserCallStack)) { return true; } @@ -267,18 +337,19 @@ bool SemanticContext::OR::eval(Recognizer *parser, RuleContext *parserCallStack) return false; } -Ref SemanticContext::OR::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) { +Ref SemanticContext::OR::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const { bool differs = false; - std::vector> operands; - for (auto context : opnds) { - Ref evaluated = context->evalPrecedence(parser, parserCallStack); + std::vector> operands; + for (const auto &context : getOperands()) { + auto evaluated = context->evalPrecedence(parser, parserCallStack); differs |= (evaluated != context); - if (evaluated == NONE) { + if (evaluated == Empty::Instance) { // The OR context is true if any element is true. - return NONE; - } else if (evaluated != nullptr) { + return Empty::Instance; + } + if (evaluated != nullptr) { // Reduce the result by skipping false elements. - operands.push_back(evaluated); + operands.push_back(std::move(evaluated)); } } @@ -291,9 +362,9 @@ Ref SemanticContext::OR::evalPrecedence(Recognizer *parser, Rul return nullptr; } - Ref result = operands[0]; + Ref result = std::move(operands[0]); for (size_t i = 1; i < operands.size(); ++i) { - result = SemanticContext::Or(result, operands[i]); + result = SemanticContext::Or(std::move(result), std::move(operands[i])); } return result; @@ -301,7 +372,7 @@ Ref SemanticContext::OR::evalPrecedence(Recognizer *parser, Rul std::string SemanticContext::OR::toString() const { std::string tmp; - for(auto var : opnds) { + for(const auto &var : getOperands()) { tmp += var->toString() + " || "; } return tmp; @@ -309,37 +380,30 @@ std::string SemanticContext::OR::toString() const { //------------------ SemanticContext ----------------------------------------------------------------------------------- -const Ref SemanticContext::NONE = std::make_shared(INVALID_INDEX, INVALID_INDEX, false); - -SemanticContext::~SemanticContext() { -} +const Ref SemanticContext::Empty::Instance = std::make_shared(INVALID_INDEX, INVALID_INDEX, false); -bool SemanticContext::operator != (const SemanticContext &other) const { - return !(*this == other); -} - -Ref SemanticContext::evalPrecedence(Recognizer * /*parser*/, RuleContext * /*parserCallStack*/) { +Ref SemanticContext::evalPrecedence(Recognizer * /*parser*/, RuleContext * /*parserCallStack*/) const { return shared_from_this(); } -Ref SemanticContext::And(Ref const& a, Ref const& b) { - if (!a || a == NONE) { +Ref SemanticContext::And(Ref a, Ref b) { + if (!a || a == Empty::Instance) { return b; } - if (!b || b == NONE) { + if (!b || b == Empty::Instance) { return a; } - Ref result = std::make_shared(a, b); - if (result->opnds.size() == 1) { - return result->opnds[0]; + Ref result = std::make_shared(std::move(a), std::move(b)); + if (result->getOperands().size() == 1) { + return result->getOperands()[0]; } return result; } -Ref SemanticContext::Or(Ref const& a, Ref const& b) { +Ref SemanticContext::Or(Ref a, Ref b) { if (!a) { return b; } @@ -347,31 +411,14 @@ Ref SemanticContext::Or(Ref const& a, Ref result = std::make_shared(a, b); - if (result->opnds.size() == 1) { - return result->opnds[0]; + Ref result = std::make_shared(std::move(a), std::move(b)); + if (result->getOperands().size() == 1) { + return result->getOperands()[0]; } return result; } - -std::vector> SemanticContext::filterPrecedencePredicates(const Set &collection) { - std::vector> result; - for (auto context : collection) { - if (antlrcpp::is(context)) { - result.push_back(std::dynamic_pointer_cast(context)); - } - } - - return result; -} - - -//------------------ Operator ----------------------------------------------------------------------------------------- - -SemanticContext::Operator::~Operator() { -} diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.h b/runtime/Cpp/runtime/src/atn/SemanticContext.h old mode 100755 new mode 100644 index 7ccc16c841..cc5fcb0389 --- a/runtime/Cpp/runtime/src/atn/SemanticContext.h +++ b/runtime/Cpp/runtime/src/atn/SemanticContext.h @@ -1,12 +1,17 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ #pragma once +#include +#include +#include #include "Recognizer.h" +#include "antlr4-common.h" #include "support/CPPUtils.h" +#include "atn/SemanticContextType.h" namespace antlr4 { namespace atn { @@ -19,36 +24,9 @@ namespace atn { /// SemanticContext within the scope of this outer class. class ANTLR4CPP_PUBLIC SemanticContext : public std::enable_shared_from_this { public: - struct Hasher - { - size_t operator()(Ref const& k) const { - return k->hashCode(); - } - }; + virtual ~SemanticContext() = default; - struct Comparer { - bool operator()(Ref const& lhs, Ref const& rhs) const { - if (lhs == rhs) - return true; - return (lhs->hashCode() == rhs->hashCode()) && (*lhs == *rhs); - } - }; - - - using Set = std::unordered_set, Hasher, Comparer>; - - /** - * The default {@link SemanticContext}, which is semantically equivalent to - * a predicate of the form {@code {true}?}. - */ - static const Ref NONE; - - virtual ~SemanticContext(); - - virtual size_t hashCode() const = 0; - virtual std::string toString() const = 0; - virtual bool operator == (const SemanticContext &other) const = 0; - virtual bool operator != (const SemanticContext &other) const; + SemanticContextType getContextType() const { return _contextType; } /// /// For context independent predicates, we evaluate them without a local @@ -63,7 +41,7 @@ namespace atn { /// prediction, so we passed in the outer context here in case of context /// dependent predicate evaluation. /// - virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) = 0; + virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) const = 0; /** * Evaluate the precedence predicates for the context and reduce the result. @@ -83,57 +61,83 @@ namespace atn { * semantic context after precedence predicates are evaluated. * */ - virtual Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack); + virtual Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const; + + virtual size_t hashCode() const = 0; + + virtual bool equals(const SemanticContext &other) const = 0; + + virtual std::string toString() const = 0; - static Ref And(Ref const& a, Ref const& b); + static Ref And(Ref a, Ref b); /// See also: ParserATNSimulator::getPredsForAmbigAlts. - static Ref Or(Ref const& a, Ref const& b); + static Ref Or(Ref a, Ref b); + class Empty; class Predicate; class PrecedencePredicate; class Operator; class AND; class OR; + protected: + explicit SemanticContext(SemanticContextType contextType) : _contextType(contextType) {} + private: - static std::vector> filterPrecedencePredicates(const Set &collection); + const SemanticContextType _contextType; + }; + + inline bool operator==(const SemanticContext &lhs, const SemanticContext &rhs) { + return lhs.equals(rhs); + } + + inline bool operator!=(const SemanticContext &lhs, const SemanticContext &rhs) { + return !operator==(lhs, rhs); + } + + class ANTLR4CPP_PUBLIC SemanticContext::Empty : public SemanticContext{ + public: + /** + * The default {@link SemanticContext}, which is semantically equivalent to + * a predicate of the form {@code {true}?}. + */ + static const Ref Instance; }; - class ANTLR4CPP_PUBLIC SemanticContext::Predicate : public SemanticContext { + class ANTLR4CPP_PUBLIC SemanticContext::Predicate final : public SemanticContext { public: + static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::PREDICATE; } + + static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); } + const size_t ruleIndex; const size_t predIndex; const bool isCtxDependent; // e.g., $i ref in pred - protected: - Predicate(); - - public: Predicate(size_t ruleIndex, size_t predIndex, bool isCtxDependent); - virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) override; - virtual size_t hashCode() const override; - virtual bool operator == (const SemanticContext &other) const override; - virtual std::string toString() const override; + bool eval(Recognizer *parser, RuleContext *parserCallStack) const override; + size_t hashCode() const override; + bool equals(const SemanticContext &other) const override; + std::string toString() const override; }; - class ANTLR4CPP_PUBLIC SemanticContext::PrecedencePredicate : public SemanticContext { + class ANTLR4CPP_PUBLIC SemanticContext::PrecedencePredicate final : public SemanticContext { public: + static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::PRECEDENCE; } + + static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); } + const int precedence; - protected: - PrecedencePredicate(); + explicit PrecedencePredicate(int precedence); - public: - PrecedencePredicate(int precedence); - - virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) override; - virtual Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) override; - virtual int compareTo(PrecedencePredicate *o); - virtual size_t hashCode() const override; - virtual bool operator == (const SemanticContext &other) const override; - virtual std::string toString() const override; + bool eval(Recognizer *parser, RuleContext *parserCallStack) const override; + Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const override; + size_t hashCode() const override; + bool equals(const SemanticContext &other) const override; + std::string toString() const override; }; /** @@ -144,7 +148,12 @@ namespace atn { */ class ANTLR4CPP_PUBLIC SemanticContext::Operator : public SemanticContext { public: - virtual ~Operator() override; + static bool is(const SemanticContext &semanticContext) { + const auto contextType = semanticContext.getContextType(); + return contextType == SemanticContextType::AND || contextType == SemanticContextType::OR; + } + + static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); } /** * Gets the operands for the semantic context operator. @@ -155,68 +164,78 @@ namespace atn { * @since 4.3 */ - virtual std::vector> getOperands() const = 0; + virtual const std::vector>& getOperands() const = 0; + + protected: + using SemanticContext::SemanticContext; }; /** * A semantic context which is true whenever none of the contained contexts * is false. */ - class ANTLR4CPP_PUBLIC SemanticContext::AND : public SemanticContext::Operator { + class ANTLR4CPP_PUBLIC SemanticContext::AND final : public SemanticContext::Operator { public: - std::vector> opnds; + static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::AND; } + + static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); } - AND(Ref const& a, Ref const& b) ; + AND(Ref a, Ref b) ; - virtual std::vector> getOperands() const override; - virtual bool operator == (const SemanticContext &other) const override; - virtual size_t hashCode() const override; + const std::vector>& getOperands() const override; /** * The evaluation of predicates by this context is short-circuiting, but * unordered.

    */ - virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) override; - virtual Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) override; - virtual std::string toString() const override; + bool eval(Recognizer *parser, RuleContext *parserCallStack) const override; + Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const override; + size_t hashCode() const override; + bool equals(const SemanticContext &other) const override; + std::string toString() const override; + + private: + std::vector> _opnds; }; /** * A semantic context which is true whenever at least one of the contained * contexts is true. */ - class ANTLR4CPP_PUBLIC SemanticContext::OR : public SemanticContext::Operator { + class ANTLR4CPP_PUBLIC SemanticContext::OR final : public SemanticContext::Operator { public: - std::vector> opnds; + static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::OR; } + + static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); } - OR(Ref const& a, Ref const& b); + OR(Ref a, Ref b); - virtual std::vector> getOperands() const override; - virtual bool operator == (const SemanticContext &other) const override; - virtual size_t hashCode() const override; + const std::vector>& getOperands() const override; /** * The evaluation of predicates by this context is short-circuiting, but * unordered. */ - virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) override; - virtual Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) override; - virtual std::string toString() const override; - }; + bool eval(Recognizer *parser, RuleContext *parserCallStack) const override; + Ref evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const override; + size_t hashCode() const override; + bool equals(const SemanticContext &other) const override; + std::string toString() const override; -} // namespace atn -} // namespace antlr4 + private: + std::vector> _opnds; + }; -// Hash function for SemanticContext, used in the MurmurHash::update function +} // namespace atn +} // namespace antlr4 namespace std { - using antlr4::atn::SemanticContext; - template <> struct hash - { - size_t operator () (SemanticContext &x) const - { - return x.hashCode(); + template <> + struct hash<::antlr4::atn::SemanticContext> { + size_t operator()(const ::antlr4::atn::SemanticContext &semanticContext) const { + return semanticContext.hashCode(); } }; -} + +} // namespace std diff --git a/runtime/Cpp/runtime/src/atn/SemanticContextType.h b/runtime/Cpp/runtime/src/atn/SemanticContextType.h new file mode 100644 index 0000000000..bca6e421d2 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/SemanticContextType.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include + +#include "antlr4-common.h" + +namespace antlr4 { +namespace atn { + + enum class SemanticContextType : size_t { + PREDICATE = 1, + PRECEDENCE = 2, + AND = 3, + OR = 4, + }; + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/SerializedATNView.h b/runtime/Cpp/runtime/src/atn/SerializedATNView.h new file mode 100644 index 0000000000..a723589bc3 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/SerializedATNView.h @@ -0,0 +1,101 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "antlr4-common.h" +#include "misc/MurmurHash.h" + +namespace antlr4 { +namespace atn { + + class ANTLR4CPP_PUBLIC SerializedATNView final { + public: + using value_type = int32_t; + using size_type = size_t; + using difference_type = ptrdiff_t; + using reference = int32_t&; + using const_reference = const int32_t&; + using pointer = int32_t*; + using const_pointer = const int32_t*; + using iterator = const_pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + SerializedATNView() = default; + + SerializedATNView(const_pointer data, size_type size) : _data(data), _size(size) {} + + SerializedATNView(const std::vector &serializedATN) : _data(serializedATN.data()), _size(serializedATN.size()) {} + + SerializedATNView(const SerializedATNView&) = default; + + SerializedATNView& operator=(const SerializedATNView&) = default; + + const_iterator begin() const { return data(); } + + const_iterator cbegin() const { return data(); } + + const_iterator end() const { return data() + size(); } + + const_iterator cend() const { return data() + size(); } + + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); } + + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + + const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); } + + bool empty() const { return size() == 0; } + + const_pointer data() const { return _data; } + + size_type size() const { return _size; } + + size_type size_bytes() const { return size() * sizeof(value_type); } + + const_reference operator[](size_type index) const { return _data[index]; } + + private: + const_pointer _data = nullptr; + size_type _size = 0; + }; + + inline bool operator==(const SerializedATNView &lhs, const SerializedATNView &rhs) { + return (lhs.data() == rhs.data() && lhs.size() == rhs.size()) || + (lhs.size() == rhs.size() && std::memcmp(lhs.data(), rhs.data(), lhs.size_bytes()) == 0); + } + + inline bool operator!=(const SerializedATNView &lhs, const SerializedATNView &rhs) { + return !operator==(lhs, rhs); + } + + inline bool operator<(const SerializedATNView &lhs, const SerializedATNView &rhs) { + int diff = std::memcmp(lhs.data(), rhs.data(), std::min(lhs.size_bytes(), rhs.size_bytes())); + return diff < 0 || (diff == 0 && lhs.size() < rhs.size()); + } + +} // namespace atn +} // namespace antlr4 + +namespace std { + + template <> + struct hash<::antlr4::atn::SerializedATNView> { + size_t operator()(const ::antlr4::atn::SerializedATNView &serializedATNView) const { + return ::antlr4::misc::MurmurHash::hashCode(serializedATNView.data(), serializedATNView.size()); + } + }; + +} // namespace std diff --git a/runtime/Cpp/runtime/src/atn/SetTransition.cpp b/runtime/Cpp/runtime/src/atn/SetTransition.cpp old mode 100755 new mode 100644 index 35d6905be4..b468108bbc --- a/runtime/Cpp/runtime/src/atn/SetTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/SetTransition.cpp @@ -3,7 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "Token.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "misc/IntervalSet.h" #include "atn/SetTransition.h" @@ -11,12 +16,8 @@ using namespace antlr4; using namespace antlr4::atn; -SetTransition::SetTransition(ATNState *target, const misc::IntervalSet &aSet) - : Transition(target), set(aSet.isEmpty() ? misc::IntervalSet::of(Token::INVALID_TYPE) : aSet) { -} - -Transition::SerializationType SetTransition::getSerializationType() const { - return SET; +SetTransition::SetTransition(TransitionType transitionType, ATNState *target, misc::IntervalSet aSet) + : Transition(transitionType, target), set(aSet.isEmpty() ? misc::IntervalSet::of(Token::INVALID_TYPE) : std::move(aSet)) { } misc::IntervalSet SetTransition::label() const { diff --git a/runtime/Cpp/runtime/src/atn/SetTransition.h b/runtime/Cpp/runtime/src/atn/SetTransition.h old mode 100755 new mode 100644 index 044d41a6a3..2451a4df3d --- a/runtime/Cpp/runtime/src/atn/SetTransition.h +++ b/runtime/Cpp/runtime/src/atn/SetTransition.h @@ -5,6 +5,13 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { @@ -14,16 +21,24 @@ namespace atn { /// A transition containing a set of values.
    class ANTLR4CPP_PUBLIC SetTransition : public Transition { public: + static bool is(const Transition &transition) { + const auto transitionType = transition.getTransitionType(); + return transitionType == TransitionType::SET || transitionType == TransitionType::NOT_SET; + } + + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } + const misc::IntervalSet set; - SetTransition(ATNState *target, const misc::IntervalSet &set); + SetTransition(ATNState *target, misc::IntervalSet set) : SetTransition(TransitionType::SET, target, std::move(set)) {} - virtual SerializationType getSerializationType() const override; + misc::IntervalSet label() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; - virtual misc::IntervalSet label() const override; - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + std::string toString() const override; - virtual std::string toString() const override; + protected: + SetTransition(TransitionType transitionType, ATNState *target, misc::IntervalSet set); }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp b/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp old mode 100755 new mode 100644 index 39ad9fb835..4dfb208e69 --- a/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp +++ b/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp @@ -3,69 +3,73 @@ * can be found in the LICENSE.txt file in the project root. */ -#include "atn/EmptyPredictionContext.h" - +#include +#include +#include +#include +#include #include "atn/SingletonPredictionContext.h" +#include "antlr4-common.h" +#include "support/Casts.h" +#include "misc/MurmurHash.h" +#include "atn/HashUtils.h" + using namespace antlr4::atn; +using namespace antlrcpp; -SingletonPredictionContext::SingletonPredictionContext(Ref const& parent, size_t returnState) - : PredictionContext(parent ? calculateHashCode(parent, returnState) : calculateEmptyHashCode()), - parent(parent), returnState(returnState) { +SingletonPredictionContext::SingletonPredictionContext(Ref parent, size_t returnState) + : PredictionContext(PredictionContextType::SINGLETON), parent(std::move(parent)), returnState(returnState) { assert(returnState != ATNState::INVALID_STATE_NUMBER); } -SingletonPredictionContext::~SingletonPredictionContext() { -} - -Ref SingletonPredictionContext::create(Ref const& parent, size_t returnState) { - - if (returnState == EMPTY_RETURN_STATE && parent) { +Ref SingletonPredictionContext::create(Ref parent, size_t returnState) { + if (returnState == EMPTY_RETURN_STATE && parent == nullptr) { // someone can pass in the bits of an array ctx that mean $ - return std::dynamic_pointer_cast(EMPTY); + return std::dynamic_pointer_cast(EMPTY); } - return std::make_shared(parent, returnState); + return std::make_shared(std::move(parent), returnState); +} + +bool SingletonPredictionContext::isEmpty() const { + return parent == nullptr && returnState == EMPTY_RETURN_STATE; } size_t SingletonPredictionContext::size() const { return 1; } -Ref SingletonPredictionContext::getParent(size_t index) const { +const Ref& SingletonPredictionContext::getParent(size_t index) const { assert(index == 0); - ((void)(index)); // Make Release build happy. + static_cast(index); return parent; } size_t SingletonPredictionContext::getReturnState(size_t index) const { assert(index == 0); - ((void)(index)); // Make Release build happy. + static_cast(index); return returnState; } -bool SingletonPredictionContext::operator == (const PredictionContext &o) const { - if (this == &o) { +size_t SingletonPredictionContext::hashCodeImpl() const { + size_t hash = misc::MurmurHash::initialize(); + hash = misc::MurmurHash::update(hash, static_cast(getContextType())); + hash = misc::MurmurHash::update(hash, parent); + hash = misc::MurmurHash::update(hash, returnState); + return misc::MurmurHash::finish(hash, 3); +} + +bool SingletonPredictionContext::equals(const PredictionContext &other) const { + if (this == std::addressof(other)) { return true; } - - const SingletonPredictionContext *other = dynamic_cast(&o); - if (other == nullptr) { + if (getContextType() != other.getContextType()) { return false; } - - if (this->hashCode() != other->hashCode()) { - return false; // can't be same if hash is different - } - - if (returnState != other->returnState) - return false; - - if (!parent && !other->parent) - return true; - if (!parent || !other->parent) - return false; - - return *parent == *other->parent; + const auto &singleton = downCast(other); + return returnState == singleton.returnState && + cachedHashCodeEqual(cachedHashCode(), singleton.cachedHashCode()) && + (parent == singleton.parent || (parent != nullptr && singleton.parent != nullptr && *parent == *singleton.parent)); } std::string SingletonPredictionContext::toString() const { diff --git a/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h b/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h old mode 100755 new mode 100644 index f1e993bbae..d1c9b063d8 --- a/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h @@ -5,31 +5,41 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "atn/PredictionContext.h" namespace antlr4 { namespace atn { - class ANTLR4CPP_PUBLIC SingletonPredictionContext : public PredictionContext { + class ANTLR4CPP_PUBLIC SingletonPredictionContext final : public PredictionContext { public: + static bool is(const PredictionContext &predictionContext) { return predictionContext.getContextType() == PredictionContextType::SINGLETON; } + + static bool is(const PredictionContext *predictionContext) { return predictionContext != nullptr && is(*predictionContext); } + + static Ref create(Ref parent, size_t returnState); + // Usually a parent is linked via a weak ptr. Not so here as we have kinda reverse reference chain. // There are no child contexts stored here and often the parent context is left dangling when it's // owning ATNState is released. In order to avoid having this context released as well (leaving all other contexts // which got this one as parent with a null reference) we use a shared_ptr here instead, to keep those left alone // parent contexts alive. - const Ref parent; + const Ref parent; const size_t returnState; - SingletonPredictionContext(Ref const& parent, size_t returnState); - virtual ~SingletonPredictionContext(); + SingletonPredictionContext(Ref parent, size_t returnState); - static Ref create(Ref const& parent, size_t returnState); + bool isEmpty() const override; + size_t size() const override; + const Ref& getParent(size_t index) const override; + size_t getReturnState(size_t index) const override; + bool equals(const PredictionContext &other) const override; + std::string toString() const override; - virtual size_t size() const override; - virtual Ref getParent(size_t index) const override; - virtual size_t getReturnState(size_t index) const override; - virtual bool operator == (const PredictionContext &o) const override; - virtual std::string toString() const override; + protected: + size_t hashCodeImpl() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp b/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp deleted file mode 100755 index e62c0de11e..0000000000 --- a/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/StarBlockStartState.h" - -using namespace antlr4::atn; - -size_t StarBlockStartState::getStateType() { - return STAR_BLOCK_START; -} diff --git a/runtime/Cpp/runtime/src/atn/StarBlockStartState.h b/runtime/Cpp/runtime/src/atn/StarBlockStartState.h old mode 100755 new mode 100644 index 8fae316089..afa8a8e3b5 --- a/runtime/Cpp/runtime/src/atn/StarBlockStartState.h +++ b/runtime/Cpp/runtime/src/atn/StarBlockStartState.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/BlockStartState.h" namespace antlr4 { @@ -12,9 +15,12 @@ namespace atn { /// The block that begins a closure loop. class ANTLR4CPP_PUBLIC StarBlockStartState final : public BlockStartState { - public: - virtual size_t getStateType() override; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::STAR_BLOCK_START; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + + StarBlockStartState() : BlockStartState(ATNStateType::STAR_BLOCK_START) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp b/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp deleted file mode 100755 index 766a858f2e..0000000000 --- a/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp +++ /dev/null @@ -1,15 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/StarLoopEntryState.h" - -using namespace antlr4::atn; - -StarLoopEntryState::StarLoopEntryState() : DecisionState(), isPrecedenceDecision(false) { -} - -size_t StarLoopEntryState::getStateType() { - return STAR_LOOP_ENTRY; -} diff --git a/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h b/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h old mode 100755 new mode 100644 index a062c58f79..1e83bc6120 --- a/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h +++ b/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/DecisionState.h" namespace antlr4 { @@ -12,7 +15,9 @@ namespace atn { class ANTLR4CPP_PUBLIC StarLoopEntryState final : public DecisionState { public: - StarLoopEntryState(); + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::STAR_LOOP_ENTRY; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } /** * Indicates whether this state can benefit from a precedence DFA during SLL @@ -28,7 +33,7 @@ namespace atn { StarLoopbackState *loopBackState = nullptr; - virtual size_t getStateType() override; + StarLoopEntryState() : DecisionState(ATNStateType::STAR_LOOP_ENTRY) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp b/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp old mode 100755 new mode 100644 index f5105896b1..e2542e96b2 --- a/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp +++ b/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp @@ -4,16 +4,17 @@ */ #include "atn/StarLoopEntryState.h" +#include "atn/ATNStateType.h" #include "atn/Transition.h" +#include "support/Casts.h" #include "atn/StarLoopbackState.h" using namespace antlr4::atn; -StarLoopEntryState *StarLoopbackState::getLoopEntryState() { - return dynamic_cast(transitions[0]->target); -} - -size_t StarLoopbackState::getStateType() { - return STAR_LOOP_BACK; +StarLoopEntryState *StarLoopbackState::getLoopEntryState() const { + if (transitions[0]->target != nullptr && transitions[0]->target->getStateType() == ATNStateType::STAR_LOOP_ENTRY) { + return antlrcpp::downCast(transitions[0]->target); + } + return nullptr; } diff --git a/runtime/Cpp/runtime/src/atn/StarLoopbackState.h b/runtime/Cpp/runtime/src/atn/StarLoopbackState.h old mode 100755 new mode 100644 index f5db3efd85..6a86575441 --- a/runtime/Cpp/runtime/src/atn/StarLoopbackState.h +++ b/runtime/Cpp/runtime/src/atn/StarLoopbackState.h @@ -5,6 +5,8 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" #include "atn/ATNState.h" namespace antlr4 { @@ -12,9 +14,13 @@ namespace atn { class ANTLR4CPP_PUBLIC StarLoopbackState final : public ATNState { public: - StarLoopEntryState *getLoopEntryState(); + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::STAR_LOOP_BACK; } - virtual size_t getStateType() override; + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + + StarLoopbackState() : ATNState(ATNStateType::STAR_LOOP_BACK) {} + + StarLoopEntryState *getLoopEntryState() const; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/TokensStartState.cpp b/runtime/Cpp/runtime/src/atn/TokensStartState.cpp deleted file mode 100755 index a764278e7b..0000000000 --- a/runtime/Cpp/runtime/src/atn/TokensStartState.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "atn/TokensStartState.h" - -using namespace antlr4::atn; - -size_t TokensStartState::getStateType() { - return TOKEN_START; -} diff --git a/runtime/Cpp/runtime/src/atn/TokensStartState.h b/runtime/Cpp/runtime/src/atn/TokensStartState.h old mode 100755 new mode 100644 index e534d04ee9..ea53462469 --- a/runtime/Cpp/runtime/src/atn/TokensStartState.h +++ b/runtime/Cpp/runtime/src/atn/TokensStartState.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "atn/ATNStateType.h" +#include "atn/ATNState.h" #include "atn/DecisionState.h" namespace antlr4 { @@ -12,9 +15,12 @@ namespace atn { /// The Tokens rule start state linking to each lexer rule start state. class ANTLR4CPP_PUBLIC TokensStartState final : public DecisionState { - public: - virtual size_t getStateType() override; + static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::TOKEN_START; } + + static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); } + + TokensStartState() : DecisionState(ATNStateType::TOKEN_START) {} }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/Transition.cpp b/runtime/Cpp/runtime/src/atn/Transition.cpp old mode 100755 new mode 100644 index 15922a324e..9646e1fb4b --- a/runtime/Cpp/runtime/src/atn/Transition.cpp +++ b/runtime/Cpp/runtime/src/atn/Transition.cpp @@ -3,21 +3,20 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "Exceptions.h" +#include "misc/IntervalSet.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "support/Arrays.h" #include "atn/Transition.h" using namespace antlr4; using namespace antlr4::atn; - using namespace antlrcpp; -const std::vector Transition::serializationNames = { - "INVALID", "EPSILON", "RANGE", "RULE", "PREDICATE", "ATOM", "ACTION", "SET", "NOT_SET", "WILDCARD", "PRECEDENCE" -}; - -Transition::Transition(ATNState *target) { +Transition::Transition(TransitionType transitionType, ATNState *target) : _transitionType(transitionType) { if (target == nullptr) { throw NullPointerException("target cannot be null."); } @@ -25,9 +24,6 @@ Transition::Transition(ATNState *target) { this->target = target; } -Transition::~Transition() { -} - bool Transition::isEpsilon() const { return false; } diff --git a/runtime/Cpp/runtime/src/atn/Transition.h b/runtime/Cpp/runtime/src/atn/Transition.h old mode 100755 new mode 100644 index ffed2f58f0..6c80656149 --- a/runtime/Cpp/runtime/src/atn/Transition.h +++ b/runtime/Cpp/runtime/src/atn/Transition.h @@ -5,7 +5,13 @@ #pragma once +#include +#include +#include #include "misc/IntervalSet.h" +#include "atn/ATNState.h" +#include "antlr4-common.h" +#include "atn/TransitionType.h" namespace antlr4 { namespace atn { @@ -25,33 +31,13 @@ namespace atn { /// class ANTLR4CPP_PUBLIC Transition { public: - // constants for serialization - enum SerializationType { - EPSILON = 1, - RANGE = 2, - RULE = 3, - PREDICATE = 4, // e.g., {isType(input.LT(1))}? - ATOM = 5, - ACTION = 6, - SET = 7, // ~(A|B) or ~atom, wildcard, which convert to next 2 - NOT_SET = 8, - WILDCARD = 9, - PRECEDENCE = 10, - }; - - static const std::vector serializationNames; - /// The target of this transition. // ml: this is a reference into the ATN. ATNState *target; - virtual ~Transition(); + virtual ~Transition() = default; - protected: - Transition(ATNState *target); - - public: - virtual SerializationType getSerializationType() const = 0; + TransitionType getTransitionType() const { return _transitionType; } /** * Determines if the transition is an "epsilon" transition. @@ -70,7 +56,15 @@ namespace atn { Transition(Transition const&) = delete; Transition& operator=(Transition const&) = delete; + + protected: + Transition(TransitionType transitionType, ATNState *target); + + private: + const TransitionType _transitionType; }; + using ConstTransitionPtr = std::unique_ptr; + } // namespace atn } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/TransitionType.cpp b/runtime/Cpp/runtime/src/atn/TransitionType.cpp new file mode 100644 index 0000000000..4f1ab6eda6 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/TransitionType.cpp @@ -0,0 +1,28 @@ +#include +#include "atn/TransitionType.h" + +std::string antlr4::atn::transitionTypeName(TransitionType transitionType) { + switch (transitionType) { + case TransitionType::EPSILON: + return "EPSILON"; + case TransitionType::RANGE: + return "RANGE"; + case TransitionType::RULE: + return "RULE"; + case TransitionType::PREDICATE: + return "PREDICATE"; + case TransitionType::ATOM: + return "ATOM"; + case TransitionType::ACTION: + return "ACTION"; + case TransitionType::SET: + return "SET"; + case TransitionType::NOT_SET: + return "NOT_SET"; + case TransitionType::WILDCARD: + return "WILDCARD"; + case TransitionType::PRECEDENCE: + return "PRECEDENCE"; + } + return "UNKNOWN"; +} diff --git a/runtime/Cpp/runtime/src/atn/TransitionType.h b/runtime/Cpp/runtime/src/atn/TransitionType.h new file mode 100644 index 0000000000..d5d5f3bd97 --- /dev/null +++ b/runtime/Cpp/runtime/src/atn/TransitionType.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include +#include + +#include "antlr4-common.h" + +namespace antlr4 { +namespace atn { + + // Constants for transition serialization. + enum class TransitionType : size_t { + EPSILON = 1, + RANGE = 2, + RULE = 3, + PREDICATE = 4, // e.g., {isType(input.LT(1))}? + ATOM = 5, + ACTION = 6, + SET = 7, // ~(A|B) or ~atom, wildcard, which convert to next 2 + NOT_SET = 8, + WILDCARD = 9, + PRECEDENCE = 10, + }; + + ANTLR4CPP_PUBLIC std::string transitionTypeName(TransitionType transitionType); + +} // namespace atn +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp b/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp old mode 100755 new mode 100644 index dc47413167..a2bcc0d1ba --- a/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp +++ b/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp @@ -3,17 +3,16 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "atn/ATNState.h" +#include "atn/TransitionType.h" #include "atn/WildcardTransition.h" using namespace antlr4::atn; -WildcardTransition::WildcardTransition(ATNState *target) : Transition(target) { -} - -Transition::SerializationType WildcardTransition::getSerializationType() const { - return WILDCARD; +WildcardTransition::WildcardTransition(ATNState *target) : Transition(TransitionType::WILDCARD, target) { } bool WildcardTransition::matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const { diff --git a/runtime/Cpp/runtime/src/atn/WildcardTransition.h b/runtime/Cpp/runtime/src/atn/WildcardTransition.h old mode 100755 new mode 100644 index c47c717759..40f0828c84 --- a/runtime/Cpp/runtime/src/atn/WildcardTransition.h +++ b/runtime/Cpp/runtime/src/atn/WildcardTransition.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include "antlr4-common.h" +#include "atn/TransitionType.h" +#include "atn/ATNState.h" #include "atn/Transition.h" namespace antlr4 { @@ -12,13 +17,15 @@ namespace atn { class ANTLR4CPP_PUBLIC WildcardTransition final : public Transition { public: - WildcardTransition(ATNState *target); + static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::WILDCARD; } - virtual SerializationType getSerializationType() const override; + static bool is(const Transition *transition) { return transition != nullptr && is(*transition); } - virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + explicit WildcardTransition(ATNState *target); - virtual std::string toString() const override; + bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override; + + std::string toString() const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/dfa/DFA.cpp b/runtime/Cpp/runtime/src/dfa/DFA.cpp old mode 100755 new mode 100644 index 2236354918..bbe89de55a --- a/runtime/Cpp/runtime/src/dfa/DFA.cpp +++ b/runtime/Cpp/runtime/src/dfa/DFA.cpp @@ -3,11 +3,17 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "dfa/DFASerializer.h" #include "dfa/LexerDFASerializer.h" #include "support/CPPUtils.h" #include "atn/StarLoopEntryState.h" #include "atn/ATNConfigSet.h" +#include "support/Casts.h" #include "dfa/DFA.h" @@ -22,8 +28,8 @@ DFA::DFA(atn::DecisionState *atnStartState, size_t decision) : atnStartState(atnStartState), s0(nullptr), decision(decision) { _precedenceDfa = false; - if (is(atnStartState)) { - if (static_cast(atnStartState)->isPrecedenceDecision) { + if (atn::StarLoopEntryState::is(atnStartState)) { + if (downCast(atnStartState)->isPrecedenceDecision) { _precedenceDfa = true; s0 = new DFAState(std::unique_ptr(new atn::ATNConfigSet())); s0->isAcceptState = false; @@ -32,13 +38,12 @@ DFA::DFA(atn::DecisionState *atnStartState, size_t decision) } } -DFA::DFA(DFA &&other) : atnStartState(other.atnStartState), decision(other.decision) { +DFA::DFA(DFA &&other) : atnStartState(other.atnStartState), s0(other.s0), decision(other.decision) { // Source states are implicitly cleared by the move. states = std::move(other.states); other.atnStartState = nullptr; other.decision = 0; - s0 = other.s0; other.s0 = nullptr; _precedenceDfa = other._precedenceDfa; other._precedenceDfa = false; @@ -46,14 +51,15 @@ DFA::DFA(DFA &&other) : atnStartState(other.atnStartState), decision(other.decis DFA::~DFA() { bool s0InList = (s0 == nullptr); - for (auto state : states) { + for (auto *state : states) { if (state == s0) s0InList = true; delete state; } - if (!s0InList) + if (!s0InList) { delete s0; + } } bool DFA::isPrecedenceDfa() const { @@ -70,7 +76,7 @@ DFAState* DFA::getPrecedenceStartState(int precedence) const { return iterator->second; } -void DFA::setPrecedenceStartState(int precedence, DFAState *startState, SingleWriteMultipleReadLock &lock) { +void DFA::setPrecedenceStartState(int precedence, DFAState *startState) { if (!isPrecedenceDfa()) { throw IllegalStateException("Only precedence DFAs may contain a precedence start state."); } @@ -79,16 +85,12 @@ void DFA::setPrecedenceStartState(int precedence, DFAState *startState, SingleWr return; } - { - lock.writeLock(); - s0->edges[precedence] = startState; - lock.writeUnlock(); - } + s0->edges[precedence] = startState; } std::vector DFA::getStates() const { std::vector result; - for (auto state : states) + for (auto *state : states) result.push_back(state); std::sort(result.begin(), result.end(), [](DFAState *o1, DFAState *o2) -> bool { @@ -98,15 +100,6 @@ std::vector DFA::getStates() const { return result; } -std::string DFA::toString(const std::vector &tokenNames) { - if (s0 == nullptr) { - return ""; - } - DFASerializer serializer(this, tokenNames); - - return serializer.toString(); -} - std::string DFA::toString(const Vocabulary &vocabulary) const { if (s0 == nullptr) { return ""; @@ -116,7 +109,7 @@ std::string DFA::toString(const Vocabulary &vocabulary) const { return serializer.toString(); } -std::string DFA::toLexerString() { +std::string DFA::toLexerString() const { if (s0 == nullptr) { return ""; } diff --git a/runtime/Cpp/runtime/src/dfa/DFA.h b/runtime/Cpp/runtime/src/dfa/DFA.h old mode 100755 new mode 100644 index 99daf0a157..d6d5e7d788 --- a/runtime/Cpp/runtime/src/dfa/DFA.h +++ b/runtime/Cpp/runtime/src/dfa/DFA.h @@ -5,31 +5,45 @@ #pragma once +#include +#include +#include +#include +#include "antlr4-common.h" #include "dfa/DFAState.h" -namespace antlrcpp { - class SingleWriteMultipleReadLock; -} - namespace antlr4 { namespace dfa { - class ANTLR4CPP_PUBLIC DFA { + class ANTLR4CPP_PUBLIC DFA final { + private: + struct DFAStateHasher final { + size_t operator()(const DFAState *dfaState) const { + return dfaState->hashCode(); + } + }; + + struct DFAStateComparer final { + bool operator()(const DFAState *lhs, const DFAState *rhs) const { + return lhs == rhs || *lhs == *rhs; + } + }; + public: /// A set of all DFA states. Use a map so we can get old state back. /// Set only allows you to see if it's there. /// From which ATN state did we create this DFA? atn::DecisionState *atnStartState; - std::unordered_set states; // States are owned by this class. + std::unordered_set states; // States are owned by this class. DFAState *s0; size_t decision; - DFA(atn::DecisionState *atnStartState); + explicit DFA(atn::DecisionState *atnStartState); DFA(atn::DecisionState *atnStartState, size_t decision); DFA(const DFA &other) = delete; DFA(DFA &&other); - virtual ~DFA(); + ~DFA(); /** * Gets whether this DFA is a precedence DFA. Precedence DFAs use a special @@ -66,18 +80,14 @@ namespace dfa { * @throws IllegalStateException if this is not a precedence DFA. * @see #isPrecedenceDfa() */ - void setPrecedenceStartState(int precedence, DFAState *startState, antlrcpp::SingleWriteMultipleReadLock &lock); + void setPrecedenceStartState(int precedence, DFAState *startState); /// Return a list of all states in this DFA, ordered by state number. - virtual std::vector getStates() const; + std::vector getStates() const; - /** - * @deprecated Use {@link #toString(Vocabulary)} instead. - */ - virtual std::string toString(const std::vector& tokenNames); std::string toString(const Vocabulary &vocabulary) const; - virtual std::string toLexerString(); + std::string toLexerString() const; private: /** diff --git a/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp b/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp old mode 100755 new mode 100644 index 34c87a5602..e0de7c8b82 --- a/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp +++ b/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp @@ -3,6 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "dfa/DFA.h" #include "Vocabulary.h" @@ -10,16 +14,9 @@ using namespace antlr4::dfa; -DFASerializer::DFASerializer(const DFA *dfa, const std::vector& tokenNames) - : DFASerializer(dfa, Vocabulary::fromTokenNames(tokenNames)) { -} - DFASerializer::DFASerializer(const DFA *dfa, const Vocabulary &vocabulary) : _dfa(dfa), _vocabulary(vocabulary) { } -DFASerializer::~DFASerializer() { -} - std::string DFASerializer::toString() const { if (_dfa->s0 == nullptr) { return ""; @@ -27,7 +24,7 @@ std::string DFASerializer::toString() const { std::stringstream ss; std::vector states = _dfa->getStates(); - for (auto s : states) { + for (auto *s : states) { for (size_t i = 0; i < s->edges.size(); i++) { DFAState *t = s->edges[i]; if (t != nullptr && t->stateNumber != INT32_MAX) { @@ -55,7 +52,7 @@ std::string DFASerializer::getStateString(DFAState *s) const { if (!s->predicates.empty()) { std::string buf; for (size_t i = 0; i < s->predicates.size(); i++) { - buf.append(s->predicates[i]->toString()); + buf.append(s->predicates[i].toString()); } return baseStateStr + "=>" + buf; } else { diff --git a/runtime/Cpp/runtime/src/dfa/DFASerializer.h b/runtime/Cpp/runtime/src/dfa/DFASerializer.h old mode 100755 new mode 100644 index a1fe5a539c..fd6f7c8d30 --- a/runtime/Cpp/runtime/src/dfa/DFASerializer.h +++ b/runtime/Cpp/runtime/src/dfa/DFASerializer.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "Vocabulary.h" namespace antlr4 { @@ -13,15 +16,15 @@ namespace dfa { /// A DFA walker that knows how to dump them to serialized strings. class ANTLR4CPP_PUBLIC DFASerializer { public: - DFASerializer(const DFA *dfa, const std::vector& tnames); DFASerializer(const DFA *dfa, const Vocabulary &vocabulary); - virtual ~DFASerializer(); - virtual std::string toString() const; + virtual ~DFASerializer() = default; + + std::string toString() const; protected: virtual std::string getEdgeLabel(size_t i) const; - virtual std::string getStateString(DFAState *s) const; + std::string getStateString(DFAState *s) const; private: const DFA *_dfa; diff --git a/runtime/Cpp/runtime/src/dfa/DFAState.cpp b/runtime/Cpp/runtime/src/dfa/DFAState.cpp old mode 100755 new mode 100644 index a9118dc953..997b63c3ec --- a/runtime/Cpp/runtime/src/dfa/DFAState.cpp +++ b/runtime/Cpp/runtime/src/dfa/DFAState.cpp @@ -3,6 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "atn/ATNConfigSet.h" #include "atn/SemanticContext.h" #include "atn/ATNConfig.h" @@ -13,41 +17,11 @@ using namespace antlr4::dfa; using namespace antlr4::atn; -DFAState::PredPrediction::PredPrediction(const Ref &pred, int alt) : pred(pred) { - InitializeInstanceFields(); - this->alt = alt; -} - -DFAState::PredPrediction::~PredPrediction() { -} - -std::string DFAState::PredPrediction::toString() { +std::string DFAState::PredPrediction::toString() const { return std::string("(") + pred->toString() + ", " + std::to_string(alt) + ")"; } -void DFAState::PredPrediction::InitializeInstanceFields() { - alt = 0; -} - -DFAState::DFAState() { - InitializeInstanceFields(); -} - -DFAState::DFAState(int state) : DFAState() { - stateNumber = state; -} - -DFAState::DFAState(std::unique_ptr configs_) : DFAState() { - configs = std::move(configs_); -} - -DFAState::~DFAState() { - for (auto predicate : predicates) { - delete predicate; - } -} - -std::set DFAState::getAltSet() { +std::set DFAState::getAltSet() const { std::set alts; if (configs != nullptr) { for (size_t i = 0; i < configs->size(); i++) { @@ -58,32 +32,28 @@ std::set DFAState::getAltSet() { } size_t DFAState::hashCode() const { - size_t hash = misc::MurmurHash::initialize(7); - hash = misc::MurmurHash::update(hash, configs->hashCode()); - hash = misc::MurmurHash::finish(hash, 1); - return hash; + return configs != nullptr ? configs->hashCode() : 0; } -bool DFAState::operator == (const DFAState &o) const { - // compare set of ATN configurations in this set with other - if (this == &o) { +bool DFAState::equals(const DFAState &other) const { + if (this == std::addressof(other)) { return true; } - - return *configs == *o.configs; + return configs == other.configs || + (configs != nullptr && other.configs != nullptr && *configs == *other.configs); } -std::string DFAState::toString() { +std::string DFAState::toString() const { std::stringstream ss; ss << stateNumber; if (configs) { ss << ":" << configs->toString(); } if (isAcceptState) { - ss << " => "; + ss << "=>"; if (!predicates.empty()) { for (size_t i = 0; i < predicates.size(); i++) { - ss << predicates[i]->toString(); + ss << predicates[i].toString(); } } else { ss << prediction; @@ -91,10 +61,3 @@ std::string DFAState::toString() { } return ss.str(); } - -void DFAState::InitializeInstanceFields() { - stateNumber = -1; - isAcceptState = false; - prediction = 0; - requiresFullContext = false; -} diff --git a/runtime/Cpp/runtime/src/dfa/DFAState.h b/runtime/Cpp/runtime/src/dfa/DFAState.h old mode 100755 new mode 100644 index 2f0ddba26a..9d7c93f152 --- a/runtime/Cpp/runtime/src/dfa/DFAState.h +++ b/runtime/Cpp/runtime/src/dfa/DFAState.h @@ -5,8 +5,16 @@ #pragma once +#include +#include +#include +#include +#include #include "antlr4-common.h" +#include "atn/ATNConfigSet.h" +#include "FlatHashMap.h" + namespace antlr4 { namespace dfa { @@ -35,23 +43,25 @@ namespace dfa { /// but with different ATN contexts (with same or different alts) /// meaning that state was reached via a different set of rule invocations. /// - class ANTLR4CPP_PUBLIC DFAState { + class ANTLR4CPP_PUBLIC DFAState final { public: - class PredPrediction { + struct ANTLR4CPP_PUBLIC PredPrediction final { public: - Ref pred; // never null; at least SemanticContext.NONE + Ref pred; // never null; at least SemanticContext.NONE int alt; - PredPrediction(const Ref &pred, int alt); - virtual ~PredPrediction(); + PredPrediction() = delete; - virtual std::string toString(); + PredPrediction(const PredPrediction&) = default; + PredPrediction(PredPrediction&&) = default; - private: - void InitializeInstanceFields(); - }; + PredPrediction(Ref pred, int alt) : pred(std::move(pred)), alt(alt) {} - int stateNumber; + PredPrediction& operator=(const PredPrediction&) = default; + PredPrediction& operator=(PredPrediction&&) = default; + + std::string toString() const; + }; std::unique_ptr configs; @@ -59,24 +69,14 @@ namespace dfa { /// maps to {@code edges[0]}. // ml: this is a sparse list, so we use a map instead of a vector. // Watch out: we no longer have the -1 offset, as it isn't needed anymore. - std::unordered_map edges; - - bool isAcceptState; + FlatHashMap edges; /// if accept state, what ttype do we match or alt do we predict? /// This is set to when {@code !=null} or /// . - size_t prediction; - - Ref lexerActionExecutor; + size_t prediction = 0; - /// - /// Indicates that this state was created during SLL prediction that - /// discovered a conflict between the configurations in the state. Future - /// invocations immediately jumped doing - /// full context prediction if this field is true. - /// - bool requiresFullContext; + Ref lexerActionExecutor; /// /// During SLL parsing, this is a list of predicates associated with the @@ -91,21 +91,34 @@ namespace dfa { ///

    /// This list is computed by . ///

    - std::vector predicates; + std::vector predicates; + + int stateNumber = -1; + + bool isAcceptState = false; + + /// + /// Indicates that this state was created during SLL prediction that + /// discovered a conflict between the configurations in the state. Future + /// invocations immediately jumped doing + /// full context prediction if this field is true. + /// + bool requiresFullContext = false; /// Map a predicate to a predicted alternative. - DFAState(); - DFAState(int state); - DFAState(std::unique_ptr configs); - virtual ~DFAState(); + DFAState() = default; + + explicit DFAState(int stateNumber) : stateNumber(stateNumber) {} + + explicit DFAState(std::unique_ptr configs) : configs(std::move(configs)) {} /// /// Get the set of all alts mentioned by all ATN configurations in this /// DFA state. /// - virtual std::set getAltSet(); + std::set getAltSet() const; - virtual size_t hashCode() const; + size_t hashCode() const; /// Two DFAState instances are equal if their ATN configuration sets /// are the same. This method is used to see if a state already exists. @@ -118,27 +131,29 @@ namespace dfa { /// ParserATNSimulator#addDFAState we need to know if any other state /// exists that has this exact set of ATN configurations. The /// stateNumber is irrelevant. - bool operator == (const DFAState &o) const; + bool equals(const DFAState &other) const; - virtual std::string toString(); + std::string toString() const; + }; - struct Hasher - { - size_t operator()(DFAState *k) const { - return k->hashCode(); - } - }; + inline bool operator==(const DFAState &lhs, const DFAState &rhs) { + return lhs.equals(rhs); + } - struct Comparer { - bool operator()(DFAState *lhs, DFAState *rhs) const - { - return *lhs == *rhs; - } - }; + inline bool operator!=(const DFAState &lhs, const DFAState &rhs) { + return !operator==(lhs, rhs); + } + +} // namespace dfa +} // namespace antlr4 + +namespace std { - private: - void InitializeInstanceFields(); + template <> + struct hash<::antlr4::dfa::DFAState> { + size_t operator()(const ::antlr4::dfa::DFAState &dfaState) const { + return dfaState.hashCode(); + } }; -} // namespace atn -} // namespace antlr4 +} // namespace std diff --git a/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp b/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp old mode 100755 new mode 100644 index c3af41c1cc..08feccd3b4 --- a/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp +++ b/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp @@ -3,16 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "Vocabulary.h" #include "dfa/LexerDFASerializer.h" using namespace antlr4::dfa; -LexerDFASerializer::LexerDFASerializer(DFA *dfa) : DFASerializer(dfa, Vocabulary::EMPTY_VOCABULARY) { -} - -LexerDFASerializer::~LexerDFASerializer() { +LexerDFASerializer::LexerDFASerializer(const DFA *dfa) : DFASerializer(dfa, Vocabulary()) { } std::string LexerDFASerializer::getEdgeLabel(size_t i) const { diff --git a/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h b/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h old mode 100755 new mode 100644 index d1571071de..917861a9c0 --- a/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h +++ b/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h @@ -5,18 +5,20 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "dfa/DFASerializer.h" namespace antlr4 { namespace dfa { - class ANTLR4CPP_PUBLIC LexerDFASerializer : public DFASerializer { + class ANTLR4CPP_PUBLIC LexerDFASerializer final : public DFASerializer { public: - LexerDFASerializer(DFA *dfa); - virtual ~LexerDFASerializer(); + explicit LexerDFASerializer(const DFA *dfa); protected: - virtual std::string getEdgeLabel(size_t i) const override; + std::string getEdgeLabel(size_t i) const override; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/internal/Synchronization.cpp b/runtime/Cpp/runtime/src/internal/Synchronization.cpp new file mode 100644 index 0000000000..270d30980c --- /dev/null +++ b/runtime/Cpp/runtime/src/internal/Synchronization.cpp @@ -0,0 +1,101 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "antlr4-common.h" +#include "internal/Synchronization.h" + +using namespace antlr4::internal; + +void Mutex::lock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.Lock(); +#else + _impl.lock(); +#endif +} + +bool Mutex::try_lock() { +#if ANTLR4CPP_USING_ABSEIL + return _impl.TryLock(); +#else + return _impl.try_lock(); +#endif +} + +void Mutex::unlock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.Unlock(); +#else + _impl.unlock(); +#endif +} + +void SharedMutex::lock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.WriterLock(); +#else + _impl.lock(); +#endif +} + +bool SharedMutex::try_lock() { +#if ANTLR4CPP_USING_ABSEIL + return _impl.WriterTryLock(); +#else + return _impl.try_lock(); +#endif +} + +void SharedMutex::unlock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.WriterUnlock(); +#else + _impl.unlock(); +#endif +} + +void SharedMutex::lock_shared() { +#if ANTLR4CPP_USING_ABSEIL + _impl.ReaderLock(); +#else + _impl.lock_shared(); +#endif +} + +bool SharedMutex::try_lock_shared() { +#if ANTLR4CPP_USING_ABSEIL + return _impl.ReaderTryLock(); +#else + return _impl.try_lock_shared(); +#endif +} + +void SharedMutex::unlock_shared() { +#if ANTLR4CPP_USING_ABSEIL + _impl.ReaderUnlock(); +#else + _impl.unlock_shared(); +#endif +} diff --git a/runtime/Cpp/runtime/src/internal/Synchronization.h b/runtime/Cpp/runtime/src/internal/Synchronization.h new file mode 100644 index 0000000000..4f969a8ab6 --- /dev/null +++ b/runtime/Cpp/runtime/src/internal/Synchronization.h @@ -0,0 +1,154 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#include +#include +#include + +#if ANTLR4CPP_USING_ABSEIL +#include "absl/base/call_once.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" +#define ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS ABSL_NO_THREAD_SAFETY_ANALYSIS +#else +#define ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS +#endif + +// By default ANTLRv4 uses synchronization primitives provided by the C++ standard library. In most +// deployments this is fine, however in some using custom synchronization primitives may be +// preferred. This header allows that by optionally supporting some alternative implementations and +// allowing for more easier patching of other alternatives. + +namespace antlr4::internal { + + // Must be compatible with C++ standard library Mutex requirement. + class ANTLR4CPP_PUBLIC Mutex final { + public: + Mutex() = default; + + // No copying or moving, we are as strict as possible to support other implementations. + Mutex(const Mutex&) = delete; + Mutex(Mutex&&) = delete; + + // No copying or moving, we are as strict as possible to support other implementations. + Mutex& operator=(const Mutex&) = delete; + Mutex& operator=(Mutex&&) = delete; + + void lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + bool try_lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void unlock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + private: +#if ANTLR4CPP_USING_ABSEIL + absl::Mutex _impl; +#else + std::mutex _impl; +#endif + }; + + template + using UniqueLock = std::unique_lock; + + // Must be compatible with C++ standard library SharedMutex requirement. + class ANTLR4CPP_PUBLIC SharedMutex final { + public: + SharedMutex() = default; + + // No copying or moving, we are as strict as possible to support other implementations. + SharedMutex(const SharedMutex&) = delete; + SharedMutex(SharedMutex&&) = delete; + + // No copying or moving, we are as strict as possible to support other implementations. + SharedMutex& operator=(const SharedMutex&) = delete; + SharedMutex& operator=(SharedMutex&&) = delete; + + void lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + bool try_lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void unlock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void lock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + bool try_lock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void unlock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + private: +#if ANTLR4CPP_USING_ABSEIL + absl::Mutex _impl; +#else + std::shared_mutex _impl; +#endif + }; + + template + using SharedLock = std::shared_lock; + + class OnceFlag; + + template + void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args); + + // Must be compatible with std::once_flag. + class ANTLR4CPP_PUBLIC OnceFlag final { + public: + constexpr OnceFlag() = default; + + // No copying or moving, we are as strict as possible to support other implementations. + OnceFlag(const OnceFlag&) = delete; + OnceFlag(OnceFlag&&) = delete; + + // No copying or moving, we are as strict as possible to support other implementations. + OnceFlag& operator=(const OnceFlag&) = delete; + OnceFlag& operator=(OnceFlag&&) = delete; + + private: + template + friend void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args); + +#if ANTLR4CPP_USING_ABSEIL + absl::once_flag _impl; +#else + std::once_flag _impl; +#endif + }; + + template + void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args) { +#if ANTLR4CPP_USING_ABSEIL + absl::call_once(onceFlag._impl, std::forward(callable), std::forward(args)...); +#else + std::call_once(onceFlag._impl, std::forward(callable), std::forward(args)...); +#endif + } + +} // namespace antlr4::internal diff --git a/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp b/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp old mode 100755 new mode 100644 index c77b8bca2b..e2d9712192 --- a/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp +++ b/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp @@ -3,6 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "atn/ATN.h" #include "atn/ATNDeserializer.h" #include "Vocabulary.h" @@ -101,7 +105,7 @@ InterpreterData InterpreterDataReader::parseFile(std::string const& fileName) { }; } - std::vector serializedATN; + std::vector serializedATN; std::getline(input, line, '\n'); assert(line == "atn:"); @@ -115,7 +119,7 @@ InterpreterData InterpreterDataReader::parseFile(std::string const& fileName) { number = std::strtoul(&value[1], nullptr, 10); else number = std::strtoul(value.c_str(), nullptr, 10); - serializedATN.push_back(static_cast(number)); + serializedATN.push_back(static_cast(number)); } ATNDeserializer deserializer; diff --git a/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h b/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h old mode 100755 new mode 100644 index 0c32ac62fc..b85138180d --- a/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h +++ b/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h @@ -5,13 +5,18 @@ #pragma once +#include +#include +#include #include "antlr4-common.h" +#include "atn/ATN.h" +#include "Vocabulary.h" namespace antlr4 { namespace misc { struct InterpreterData { - atn::ATN atn; + std::unique_ptr atn; dfa::Vocabulary vocabulary; std::vector ruleNames; std::vector channels; // Only valid for lexer grammars. diff --git a/runtime/Cpp/runtime/src/misc/Interval.cpp b/runtime/Cpp/runtime/src/misc/Interval.cpp old mode 100755 new mode 100644 index bb521eb9df..4e7f09c8b3 --- a/runtime/Cpp/runtime/src/misc/Interval.cpp +++ b/runtime/Cpp/runtime/src/misc/Interval.cpp @@ -3,39 +3,13 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "misc/Interval.h" using namespace antlr4::misc; -size_t antlr4::misc::numericToSymbol(ssize_t v) { - return static_cast(v); -} - -ssize_t antlr4::misc::symbolToNumeric(size_t v) { - return static_cast(v); -} - -Interval const Interval::INVALID; - -Interval::Interval() : Interval(static_cast(-1), -2) { // Need an explicit cast here for VS. -} - -Interval::Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) { -} - -Interval::Interval(ssize_t a_, ssize_t b_) : a(a_), b(b_) { -} - -size_t Interval::length() const { - if (b < a) { - return 0; - } - return size_t(b - a + 1); -} - -bool Interval::operator == (const Interval &other) const { - return a == other.a && b == other.b; -} +const Interval Interval::INVALID; size_t Interval::hashCode() const { size_t hash = 23; diff --git a/runtime/Cpp/runtime/src/misc/Interval.h b/runtime/Cpp/runtime/src/misc/Interval.h old mode 100755 new mode 100644 index 0198ee5ae1..6c954b42ff --- a/runtime/Cpp/runtime/src/misc/Interval.h +++ b/runtime/Cpp/runtime/src/misc/Interval.h @@ -5,6 +5,8 @@ #pragma once +#include +#include #include "antlr4-common.h" namespace antlr4 { @@ -13,11 +15,11 @@ namespace misc { // Helpers to convert certain unsigned symbols (e.g. Token::EOF) to their original numeric value (e.g. -1) // and vice versa. This is needed mostly for intervals to keep their original order and for toString() // methods to print the original numeric value (e.g. for tests). - size_t numericToSymbol(ssize_t v); - ssize_t symbolToNumeric(size_t v); + constexpr size_t numericToSymbol(ssize_t v) { return static_cast(v); } + constexpr ssize_t symbolToNumeric(size_t v) { return static_cast(v); } /// An immutable inclusive interval a..b - class ANTLR4CPP_PUBLIC Interval { + class ANTLR4CPP_PUBLIC Interval final { public: static const Interval INVALID; @@ -25,15 +27,17 @@ namespace misc { ssize_t a; ssize_t b; - Interval(); - explicit Interval(size_t a_, size_t b_); // For unsigned -> signed mappings. - Interval(ssize_t a_, ssize_t b_); + constexpr Interval() : Interval(static_cast(-1), static_cast(-2)) {} + + constexpr explicit Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) {} + + constexpr Interval(ssize_t a_, ssize_t b_) : a(a_), b(b_) {} /// return number of elements between a and b inclusively. x..x is length 1. /// if b < a, then length is 0. 9..10 has length 2. - size_t length() const; + constexpr size_t length() const { return b >= a ? static_cast(b - a + 1) : 0; } - bool operator == (const Interval &other) const; + constexpr bool operator==(const Interval &other) const { return a == other.a && b == other.b; } size_t hashCode() const; @@ -76,8 +80,6 @@ namespace misc { Interval intersection(const Interval &other) const; std::string toString() const; - - private: }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/misc/IntervalSet.cpp b/runtime/Cpp/runtime/src/misc/IntervalSet.cpp old mode 100755 new mode 100644 index 031b9ba5b2..e2b95c3ded --- a/runtime/Cpp/runtime/src/misc/IntervalSet.cpp +++ b/runtime/Cpp/runtime/src/misc/IntervalSet.cpp @@ -3,7 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "misc/MurmurHash.h" +#include "misc/Interval.h" +#include "Token.h" +#include "antlr4-common.h" #include "Lexer.h" #include "Exceptions.h" #include "Vocabulary.h" @@ -13,7 +21,7 @@ using namespace antlr4; using namespace antlr4::misc; -IntervalSet const IntervalSet::COMPLETE_CHAR_SET = +IntervalSet const IntervalSet::COMPLETE_CHAR_SET = IntervalSet::of(Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE); IntervalSet const IntervalSet::EMPTY_SET; @@ -37,7 +45,7 @@ IntervalSet& IntervalSet::operator=(const IntervalSet& other) { } IntervalSet& IntervalSet::operator=(IntervalSet&& other) { - _intervals = move(other._intervals); + _intervals = std::move(other._intervals); return *this; } @@ -112,7 +120,7 @@ void IntervalSet::add(const Interval &addition) { IntervalSet IntervalSet::Or(const std::vector &sets) { IntervalSet result; - for (auto &s : sets) { + for (const auto &s : sets) { result.addAll(s); } return result; @@ -265,18 +273,13 @@ bool IntervalSet::contains(size_t el) const { } bool IntervalSet::contains(ssize_t el) const { - if (_intervals.empty()) + if (_intervals.empty() || el < _intervals.front().a || el > _intervals.back().b) { return false; - - if (el < _intervals[0].a) // list is sorted and el is before first interval; not here - return false; - - for (auto &interval : _intervals) { - if (el >= interval.a && el <= interval.b) { - return true; // found in this interval - } } - return false; + + return std::binary_search(_intervals.begin(), _intervals.end(), Interval(el, el), [](const Interval &lhs, const Interval &rhs) { + return lhs.b < rhs.a; + }); } bool IntervalSet::isEmpty() const { @@ -306,7 +309,7 @@ ssize_t IntervalSet::getMinElement() const { return Token::INVALID_TYPE; } - return _intervals[0].a; + return _intervals.front().a; } std::vector const& IntervalSet::getIntervals() const { @@ -315,7 +318,7 @@ std::vector const& IntervalSet::getIntervals() const { size_t IntervalSet::hashCode() const { size_t hash = MurmurHash::initialize(); - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { hash = MurmurHash::update(hash, interval.a); hash = MurmurHash::update(hash, interval.b); } @@ -349,7 +352,7 @@ std::string IntervalSet::toString(bool elemAreChar) const { } bool firstEntry = true; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { if (!firstEntry) ss << ", "; firstEntry = false; @@ -379,10 +382,6 @@ std::string IntervalSet::toString(bool elemAreChar) const { return ss.str(); } -std::string IntervalSet::toString(const std::vector &tokenNames) const { - return toString(dfa::Vocabulary::fromTokenNames(tokenNames)); -} - std::string IntervalSet::toString(const dfa::Vocabulary &vocabulary) const { if (_intervals.empty()) { return "{}"; @@ -395,7 +394,7 @@ std::string IntervalSet::toString(const dfa::Vocabulary &vocabulary) const { } bool firstEntry = true; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { if (!firstEntry) ss << ", "; firstEntry = false; @@ -420,10 +419,6 @@ std::string IntervalSet::toString(const dfa::Vocabulary &vocabulary) const { return ss.str(); } -std::string IntervalSet::elementName(const std::vector &tokenNames, ssize_t a) const { - return elementName(dfa::Vocabulary::fromTokenNames(tokenNames), a); -} - std::string IntervalSet::elementName(const dfa::Vocabulary &vocabulary, ssize_t a) const { if (a == -1) { return ""; @@ -436,7 +431,7 @@ std::string IntervalSet::elementName(const dfa::Vocabulary &vocabulary, ssize_t size_t IntervalSet::size() const { size_t result = 0; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { result += size_t(interval.b - interval.a + 1); } return result; @@ -444,7 +439,7 @@ size_t IntervalSet::size() const { std::vector IntervalSet::toList() const { std::vector result; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { ssize_t a = interval.a; ssize_t b = interval.b; for (ssize_t v = a; v <= b; v++) { @@ -456,7 +451,7 @@ std::vector IntervalSet::toList() const { std::set IntervalSet::toSet() const { std::set result; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { ssize_t a = interval.a; ssize_t b = interval.b; for (ssize_t v = a; v <= b; v++) { @@ -468,7 +463,7 @@ std::set IntervalSet::toSet() const { ssize_t IntervalSet::get(size_t i) const { size_t index = 0; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { ssize_t a = interval.a; ssize_t b = interval.b; for (ssize_t v = a; v <= b; v++) { diff --git a/runtime/Cpp/runtime/src/misc/IntervalSet.h b/runtime/Cpp/runtime/src/misc/IntervalSet.h old mode 100755 new mode 100644 index aa2adf66fa..d2049551e0 --- a/runtime/Cpp/runtime/src/misc/IntervalSet.h +++ b/runtime/Cpp/runtime/src/misc/IntervalSet.h @@ -5,7 +5,11 @@ #pragma once +#include +#include +#include #include "misc/Interval.h" +#include "antlr4-common.h" #include "Exceptions.h" namespace antlr4 { @@ -23,7 +27,7 @@ namespace misc { * the range {@link Integer#MIN_VALUE} to {@link Integer#MAX_VALUE} * (inclusive).

    */ - class ANTLR4CPP_PUBLIC IntervalSet { + class ANTLR4CPP_PUBLIC IntervalSet final { public: static IntervalSet const COMPLETE_CHAR_SET; static IntervalSet const EMPTY_SET; @@ -151,17 +155,9 @@ namespace misc { std::string toString() const; std::string toString(bool elemAreChar) const; - /** - * @deprecated Use {@link #toString(Vocabulary)} instead. - */ - std::string toString(const std::vector &tokenNames) const; std::string toString(const dfa::Vocabulary &vocabulary) const; protected: - /** - * @deprecated Use {@link #elementName(Vocabulary, int)} instead. - */ - std::string elementName(const std::vector &tokenNames, ssize_t a) const; std::string elementName(const dfa::Vocabulary &vocabulary, ssize_t a) const; public: diff --git a/runtime/Cpp/runtime/src/misc/MurmurHash.cpp b/runtime/Cpp/runtime/src/misc/MurmurHash.cpp old mode 100755 new mode 100644 index 55e96c9e31..12260636b3 --- a/runtime/Cpp/runtime/src/misc/MurmurHash.cpp +++ b/runtime/Cpp/runtime/src/misc/MurmurHash.cpp @@ -1,8 +1,13 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include + +#include "antlr4-common.h" #include "misc/MurmurHash.h" using namespace antlr4::misc; @@ -17,118 +22,100 @@ using namespace antlr4::misc; #if defined(_MSC_VER) -#define FORCE_INLINE __forceinline - #include #define ROTL32(x,y) _rotl(x,y) #define ROTL64(x,y) _rotl64(x,y) -#define BIG_CONSTANT(x) (x) +#elif ANTLR4CPP_HAVE_BUILTIN(__builtin_rotateleft32) && ANTLR4CPP_HAVE_BUILTIN(__builtin_rotateleft64) + +#define ROTL32(x, y) __builtin_rotateleft32(x, y) +#define ROTL64(x, y) __builtin_rotateleft64(x, y) #else // defined(_MSC_VER) // Other compilers -#define FORCE_INLINE inline __attribute__((always_inline)) +namespace { -inline uint32_t rotl32 (uint32_t x, int8_t r) -{ +constexpr uint32_t ROTL32(uint32_t x, int r) { return (x << r) | (x >> (32 - r)); } - -inline uint64_t rotl64 (uint64_t x, int8_t r) -{ +constexpr uint64_t ROTL64(uint64_t x, int r) { return (x << r) | (x >> (64 - r)); } -#define ROTL32(x,y) rotl32(x,y) -#define ROTL64(x,y) rotl64(x,y) - -#define BIG_CONSTANT(x) (x##LLU) - -#endif // !defined(_MSC_VER) - -size_t MurmurHash::initialize() { - return initialize(DEFAULT_SEED); -} - -size_t MurmurHash::initialize(size_t seed) { - return seed; } -#if defined(_WIN32) || defined(_WIN64) - #if _WIN64 - #define ENVIRONMENT64 - #else - #define ENVIRONMENT32 - #endif -#endif - -#if defined(__GNUC__) - #if defined(__x86_64__) || defined(__ppc64__) - #define ENVIRONMENT64 - #else - #define ENVIRONMENT32 - #endif -#endif +#endif // !defined(_MSC_VER) -#if defined(ENVIRONMENT32) +#if SIZE_MAX == UINT64_MAX size_t MurmurHash::update(size_t hash, size_t value) { - static const size_t c1 = 0xCC9E2D51; - static const size_t c2 = 0x1B873593; - size_t k1 = value; - k1 *= c1; - k1 = ROTL32(k1, 15); - k1 *= c2; + k1 *= UINT64_C(0x87c37b91114253d5); + k1 = ROTL64(k1, 31); + k1 *= UINT64_C(0x4cf5ad432745937f); hash ^= k1; - hash = ROTL32(hash, 13); - hash = hash * 5 + 0xE6546B64; + hash = ROTL64(hash, 27); + hash = hash * 5 + UINT64_C(0x52dce729); return hash; } - size_t MurmurHash::finish(size_t hash, size_t entryCount) { - hash ^= entryCount * 4; - hash ^= hash >> 16; - hash *= 0x85EBCA6B; - hash ^= hash >> 13; - hash *= 0xC2B2AE35; - hash ^= hash >> 16; + hash ^= entryCount * 8; + hash ^= hash >> 33; + hash *= UINT64_C(0xff51afd7ed558ccd); + hash ^= hash >> 33; + hash *= UINT64_C(0xc4ceb9fe1a85ec53); + hash ^= hash >> 33; return hash; } -#else +#elif SIZE_MAX == UINT32_MAX size_t MurmurHash::update(size_t hash, size_t value) { - static const size_t c1 = BIG_CONSTANT(0x87c37b91114253d5); - static const size_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); - size_t k1 = value; - k1 *= c1; - k1 = ROTL64(k1, 31); - k1 *= c2; + k1 *= UINT32_C(0xCC9E2D51); + k1 = ROTL32(k1, 15); + k1 *= UINT32_C(0x1B873593); hash ^= k1; - hash = ROTL64(hash, 27); - hash = hash * 5 + 0x52dce729; + hash = ROTL32(hash, 13); + hash = hash * 5 + UINT32_C(0xE6546B64); return hash; } - size_t MurmurHash::finish(size_t hash, size_t entryCount) { - hash ^= entryCount * 8; - hash ^= hash >> 33; - hash *= 0xff51afd7ed558ccd; - hash ^= hash >> 33; - hash *= 0xc4ceb9fe1a85ec53; - hash ^= hash >> 33; + hash ^= entryCount * 4; + hash ^= hash >> 16; + hash *= UINT32_C(0x85EBCA6B); + hash ^= hash >> 13; + hash *= UINT32_C(0xC2B2AE35); + hash ^= hash >> 16; return hash; } +#else +#error "Expected sizeof(size_t) to be 4 or 8." #endif + +size_t MurmurHash::update(size_t hash, const void *data, size_t size) { + size_t value; + const uint8_t *bytes = static_cast(data); + while (size >= sizeof(size_t)) { + std::memcpy(&value, bytes, sizeof(size_t)); + hash = update(hash, value); + bytes += sizeof(size_t); + size -= sizeof(size_t); + } + if (size != 0) { + value = 0; + std::memcpy(&value, bytes, size); + hash = update(hash, value); + } + return hash; +} diff --git a/runtime/Cpp/runtime/src/misc/MurmurHash.h b/runtime/Cpp/runtime/src/misc/MurmurHash.h old mode 100755 new mode 100644 index b8b5a55796..c6c33dfe93 --- a/runtime/Cpp/runtime/src/misc/MurmurHash.h +++ b/runtime/Cpp/runtime/src/misc/MurmurHash.h @@ -5,23 +5,27 @@ #pragma once +#include +#include +#include +#include + #include "antlr4-common.h" namespace antlr4 { namespace misc { - class ANTLR4CPP_PUBLIC MurmurHash { - + class ANTLR4CPP_PUBLIC MurmurHash final { private: - static const size_t DEFAULT_SEED = 0; + static constexpr size_t DEFAULT_SEED = 0; /// Initialize the hash using the default seed value. /// Returns the intermediate hash value. public: - static size_t initialize(); + static size_t initialize() { return initialize(DEFAULT_SEED); } /// Initialize the hash using the specified seed. - static size_t initialize(size_t seed); + static size_t initialize(size_t seed) { return seed; } /// Update the intermediate hash value for the next input {@code value}. /// the intermediate hash value @@ -46,6 +50,13 @@ namespace misc { return update(hash, value != nullptr ? value->hashCode() : 0); } + static size_t update(size_t hash, const void *data, size_t size); + + template + static size_t update(size_t hash, const T *data, size_t size) { + return update(hash, static_cast(data), size * sizeof(std::remove_reference_t)); + } + /// /// Apply the final computation steps to the intermediate value {@code hash} /// to form the final result of the MurmurHash 3 hash function. @@ -62,14 +73,31 @@ namespace misc { /// the seed for the MurmurHash algorithm /// the hash code of the data template // where T is C array type - static size_t hashCode(const std::vector> &data, size_t seed) { + static size_t hashCode(const std::vector> &data, size_t seed = DEFAULT_SEED) { size_t hash = initialize(seed); - for (auto entry : data) { - hash = update(hash, entry->hashCode()); + for (auto &entry : data) { + hash = update(hash, entry); } - return finish(hash, data.size()); } + + static size_t hashCode(const void *data, size_t size, size_t seed = DEFAULT_SEED) { + size_t hash = initialize(seed); + hash = update(hash, data, size); + return finish(hash, size); + } + + template + static size_t hashCode(const T *data, size_t size, size_t seed = DEFAULT_SEED) { + return hashCode(static_cast(data), size * sizeof(std::remove_reference_t), seed); + } + + private: + MurmurHash() = delete; + + MurmurHash(const MurmurHash&) = delete; + + MurmurHash& operator=(const MurmurHash&) = delete; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/support/Any.cpp b/runtime/Cpp/runtime/src/support/Any.cpp index b324cc15db..a1ed50d456 100644 --- a/runtime/Cpp/runtime/src/support/Any.cpp +++ b/runtime/Cpp/runtime/src/support/Any.cpp @@ -6,8 +6,3 @@ #include "Any.h" using namespace antlrcpp; - -Any::~Any() -{ - delete _ptr; -} diff --git a/runtime/Cpp/runtime/src/support/Any.h b/runtime/Cpp/runtime/src/support/Any.h index 5db59f6e6b..fa5df58946 100644 --- a/runtime/Cpp/runtime/src/support/Any.h +++ b/runtime/Cpp/runtime/src/support/Any.h @@ -9,162 +9,8 @@ #include "antlr4-common.h" -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4521) // 'antlrcpp::Any': multiple copy constructors specified -#endif - namespace antlrcpp { -template - using StorageType = typename std::decay::type; - -struct ANTLR4CPP_PUBLIC Any -{ - bool isNull() const { return _ptr == nullptr; } - bool isNotNull() const { return _ptr != nullptr; } - - Any() : _ptr(nullptr) { - } - - Any(Any& that) : _ptr(that.clone()) { - } - - Any(Any&& that) : _ptr(that._ptr) { - that._ptr = nullptr; - } - - Any(const Any& that) : _ptr(that.clone()) { - } - - Any(const Any&& that) : _ptr(that.clone()) { - } - - template - Any(U&& value) : _ptr(new Derived>(std::forward(value))) { - } - - template - bool is() const { - auto derived = getDerived(false); - - return derived != nullptr; - } - - template - StorageType& as() { - auto derived = getDerived(true); - - return derived->value; - } - - template - const StorageType& as() const { - auto derived = getDerived(true); - - return derived->value; - } - - template - operator U() { - return as>(); - } - - template - operator const U() const { - return as>(); - } - - Any& operator = (const Any& a) { - if (_ptr == a._ptr) - return *this; - - auto old_ptr = _ptr; - _ptr = a.clone(); - - if (old_ptr) - delete old_ptr; - - return *this; - } - - Any& operator = (Any&& a) { - if (_ptr == a._ptr) - return *this; - - std::swap(_ptr, a._ptr); - - return *this; - } - - virtual ~Any(); - - virtual bool equals(Any other) const { - return _ptr == other._ptr; - } - -private: - struct Base { - virtual ~Base() {}; - virtual Base* clone() const = 0; - }; - - template - struct Derived : Base - { - template Derived(U&& value_) : value(std::forward(value_)) { - } - - T value; - - Base* clone() const { - return clone<>(); - } - - private: - template::value, int>::type = 0> - Base* clone() const { - return new Derived(value); - } - - template::value, int>::type = 0> - Base* clone() const { - return nullptr; - } - - }; - - Base* clone() const - { - if (_ptr) - return _ptr->clone(); - else - return nullptr; - } - - template - Derived>* getDerived(bool checkCast) const { - typedef StorageType T; - - auto derived = dynamic_cast*>(_ptr); - - if (checkCast && !derived) - throw std::bad_cast(); - - return derived; - } - - Base *_ptr; - -}; - - template<> inline - Any::Any(std::nullptr_t&& ) : _ptr(nullptr) { - } - + using Any = std::any; } // namespace antlrcpp - -#ifdef _MSC_VER -#pragma warning(pop) -#endif diff --git a/runtime/Cpp/runtime/src/support/Arrays.cpp b/runtime/Cpp/runtime/src/support/Arrays.cpp index 694e44c8aa..062cae0bc9 100644 --- a/runtime/Cpp/runtime/src/support/Arrays.cpp +++ b/runtime/Cpp/runtime/src/support/Arrays.cpp @@ -3,6 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "tree/ParseTree.h" #include "Exceptions.h" @@ -16,7 +19,7 @@ std::string Arrays::listToString(const std::vector &list, const std bool firstEntry = true; ss << '['; - for (auto &entry : list) { + for (const auto &entry : list) { ss << entry; if (firstEntry) { ss << separator; @@ -32,7 +35,7 @@ template <> std::string Arrays::toString(const std::vector &source) { std::string result = "["; bool firstEntry = true; - for (auto value : source) { + for (auto *value : source) { result += value->toStringTree(); if (firstEntry) { result += ", "; diff --git a/runtime/Cpp/runtime/src/support/Arrays.h b/runtime/Cpp/runtime/src/support/Arrays.h index 18e6a8a712..a75f04c39d 100644 --- a/runtime/Cpp/runtime/src/support/Arrays.h +++ b/runtime/Cpp/runtime/src/support/Arrays.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include #include "antlr4-common.h" namespace antlrcpp { @@ -32,8 +36,13 @@ namespace antlrcpp { return false; for (size_t i = 0; i < a.size(); ++i) { + if (!a[i] && !b[i]) + continue; + if (!a[i] || !b[i]) + return false; if (a[i] == b[i]) continue; + if (!(*a[i] == *b[i])) return false; } @@ -61,6 +70,26 @@ namespace antlrcpp { return true; } + template + static bool equals(const std::vector> &a, const std::vector> &b) { + if (a.size() != b.size()) + return false; + + for (size_t i = 0; i < a.size(); ++i) { + if (!a[i] && !b[i]) + continue; + if (!a[i] || !b[i]) + return false; + if (a[i] == b[i]) + continue; + + if (!(*a[i] == *b[i])) + return false; + } + + return true; + } + template static std::string toString(const std::vector &source) { std::string result = "["; @@ -89,6 +118,20 @@ namespace antlrcpp { return result + "]"; } + template + static std::string toString(const std::vector> &source) { + std::string result = "["; + bool firstEntry = true; + for (auto &value : source) { + result += value->toString(); + if (firstEntry) { + result += ", "; + firstEntry = false; + } + } + return result + "]"; + } + template static std::string toString(const std::vector &source) { std::string result = "["; diff --git a/runtime/Cpp/runtime/src/support/BitSet.h b/runtime/Cpp/runtime/src/support/BitSet.h index bf849b1874..7fa963ccc8 100644 --- a/runtime/Cpp/runtime/src/support/BitSet.h +++ b/runtime/Cpp/runtime/src/support/BitSet.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include #include "antlr4-common.h" namespace antlrcpp { @@ -54,7 +58,7 @@ namespace antlrcpp { return result; } - std::string toString(){ + std::string toString() const { std::stringstream stream; stream << "{"; bool valueAdded = false; diff --git a/runtime/Cpp/runtime/src/support/CPPUtils.cpp b/runtime/Cpp/runtime/src/support/CPPUtils.cpp old mode 100755 new mode 100644 index 2ca43d34d2..1fa965fa66 --- a/runtime/Cpp/runtime/src/support/CPPUtils.cpp +++ b/runtime/Cpp/runtime/src/support/CPPUtils.cpp @@ -3,14 +3,20 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include +#include #include "support/CPPUtils.h" namespace antlrcpp { - std::string join(std::vector strings, const std::string &separator) { + std::string join(const std::vector &strings, const std::string &separator) { std::string str; bool firstItem = true; - for (std::string s : strings) { + for (const std::string &s : strings) { if (!firstItem) { str.append(separator); } @@ -46,18 +52,15 @@ namespace antlrcpp { case ' ': if (escapeSpaces) { - result += "·"; + result += "\u00B7"; break; } - // else fall through -#ifndef _MSC_VER -#if __has_cpp_attribute(clang::fallthrough) - [[clang::fallthrough]]; -#endif -#endif + result += c; + break; default: result += c; + break; } } @@ -72,8 +75,13 @@ namespace antlrcpp { std::string arrayToString(const std::vector &data) { std::string answer; - for (auto sub: data) { - answer += sub; + size_t toReserve = 0; + for (const auto &sub : data) { + toReserve += sub.size(); + } + answer.reserve(toReserve); + for (const auto &sub: data) { + answer.append(sub); } return answer; } @@ -202,47 +210,4 @@ namespace antlrcpp { return result; } - //----------------- FinallyAction ------------------------------------------------------------------------------------ - - FinalAction finally(std::function f) { - return FinalAction(f); - } - - //----------------- SingleWriteMultipleRead -------------------------------------------------------------------------- - - void SingleWriteMultipleReadLock::readLock() { - std::unique_lock lock(_mutex); - while (_waitingWriters != 0) - _readerGate.wait(lock); - ++_activeReaders; - lock.unlock(); - } - - void SingleWriteMultipleReadLock::readUnlock() { - std::unique_lock lock(_mutex); - --_activeReaders; - lock.unlock(); - _writerGate.notify_one(); - } - - void SingleWriteMultipleReadLock::writeLock() { - std::unique_lock lock(_mutex); - ++_waitingWriters; - while (_activeReaders != 0 || _activeWriters != 0) - _writerGate.wait(lock); - ++_activeWriters; - lock.unlock(); - } - - void SingleWriteMultipleReadLock::writeUnlock() { - std::unique_lock lock(_mutex); - --_waitingWriters; - --_activeWriters; - if (_waitingWriters > 0) - _writerGate.notify_one(); - else - _readerGate.notify_all(); - lock.unlock(); - } - } // namespace antlrcpp diff --git a/runtime/Cpp/runtime/src/support/CPPUtils.h b/runtime/Cpp/runtime/src/support/CPPUtils.h index fc83503cf4..b054f89e70 100644 --- a/runtime/Cpp/runtime/src/support/CPPUtils.h +++ b/runtime/Cpp/runtime/src/support/CPPUtils.h @@ -5,22 +5,29 @@ #pragma once +#include +#include +#include +#include +#include +#include #include "antlr4-common.h" namespace antlrcpp { - std::string join(std::vector strings, const std::string &separator); - std::map toMap(const std::vector &keys); - std::string escapeWhitespace(std::string str, bool escapeSpaces); - std::string toHexString(const int t); - std::string arrayToString(const std::vector &data); - std::string replaceString(const std::string &s, const std::string &from, const std::string &to); - std::vector split(const std::string &s, const std::string &sep, int count); - std::string indent(const std::string &s, const std::string &indentation, bool includingFirst = true); + ANTLR4CPP_PUBLIC std::string join(const std::vector &strings, const std::string &separator); + ANTLR4CPP_PUBLIC std::map toMap(const std::vector &keys); + ANTLR4CPP_PUBLIC std::string escapeWhitespace(std::string str, bool escapeSpaces); + ANTLR4CPP_PUBLIC std::string toHexString(const int t); + ANTLR4CPP_PUBLIC std::string arrayToString(const std::vector &data); + ANTLR4CPP_PUBLIC std::string replaceString(const std::string &s, const std::string &from, const std::string &to); + ANTLR4CPP_PUBLIC std::vector split(const std::string &s, const std::string &sep, int count); + ANTLR4CPP_PUBLIC std::string indent(const std::string &s, const std::string &indentation, bool includingFirst = true); // Using RAII + a lambda to implement a "finally" replacement. + template struct FinalAction { - FinalAction(std::function f) : _cleanUp { f } {} + FinalAction(OnEnd f) : _cleanUp { std::move(f) } {} FinalAction(FinalAction &&other) : _cleanUp(std::move(other._cleanUp)), _enabled(other._enabled) { other._enabled = false; // Don't trigger the lambda after ownership has moved. @@ -29,11 +36,14 @@ namespace antlrcpp { void disable() { _enabled = false; } private: - std::function _cleanUp; + OnEnd _cleanUp; bool _enabled {true}; }; - ANTLR4CPP_PUBLIC FinalAction finally(std::function f); + template + FinalAction finally(OnEnd f) { + return FinalAction(std::move(f)); + } // Convenience functions to avoid lengthy dynamic_cast() != nullptr checks in many places. template @@ -56,23 +66,6 @@ namespace antlrcpp { } // Get the error text from an exception pointer or the current exception. - std::string what(std::exception_ptr eptr = std::current_exception()); - - class SingleWriteMultipleReadLock { - public: - void readLock(); - void readUnlock(); - void writeLock(); - void writeUnlock(); - - private: - std::condition_variable _readerGate; - std::condition_variable _writerGate; - - std::mutex _mutex; - size_t _activeReaders = 0; - size_t _waitingWriters = 0; - size_t _activeWriters = 0; - }; + ANTLR4CPP_PUBLIC std::string what(std::exception_ptr eptr = std::current_exception()); } // namespace antlrcpp diff --git a/runtime/Cpp/runtime/src/support/Casts.h b/runtime/Cpp/runtime/src/support/Casts.h new file mode 100644 index 0000000000..2ded955dcd --- /dev/null +++ b/runtime/Cpp/runtime/src/support/Casts.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2012-2021 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include +#include +#include + +namespace antlrcpp { + + template + To downCast(From* from) { + static_assert(std::is_pointer_v, "Target type not a pointer."); + static_assert(std::is_base_of_v>, "Target type not derived from source type."); + #if !defined(__GNUC__) || defined(__GXX_RTTI) + assert(from == nullptr || dynamic_cast(from) != nullptr); + #endif + return static_cast(from); + } + + template + To downCast(From& from) { + static_assert(std::is_lvalue_reference_v, "Target type not a lvalue reference."); + static_assert(std::is_base_of_v>, "Target type not derived from source type."); + #if !defined(__GNUC__) || defined(__GXX_RTTI) + assert(dynamic_cast>>(std::addressof(from)) != nullptr); + #endif + return static_cast(from); + } + +} diff --git a/runtime/Cpp/runtime/src/support/Declarations.h b/runtime/Cpp/runtime/src/support/Declarations.h index a355d9b5bf..8e960676cf 100644 --- a/runtime/Cpp/runtime/src/support/Declarations.h +++ b/runtime/Cpp/runtime/src/support/Declarations.h @@ -69,7 +69,6 @@ namespace antlr4 { class ATNSimulator; class ATNState; enum class ATNType; - class AbstractPredicateTransition; class ActionTransition; class ArrayPredictionContext; class AtomTransition; @@ -78,7 +77,6 @@ namespace antlr4 { class BlockEndState; class BlockStartState; class DecisionState; - class EmptyPredictionContext; class EpsilonTransition; class LL1Analyzer; class LexerAction; diff --git a/runtime/Cpp/runtime/src/support/StringUtils.cpp b/runtime/Cpp/runtime/src/support/StringUtils.cpp index 552f1031a7..e477837d99 100644 --- a/runtime/Cpp/runtime/src/support/StringUtils.cpp +++ b/runtime/Cpp/runtime/src/support/StringUtils.cpp @@ -3,34 +3,38 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "support/StringUtils.h" namespace antlrcpp { -void replaceAll(std::string& str, std::string const& from, std::string const& to) -{ - if (from.empty()) - return; - - size_t start_pos = 0; - while ((start_pos = str.find(from, start_pos)) != std::string::npos) { - str.replace(start_pos, from.length(), to); - start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx'. + std::string escapeWhitespace(std::string_view in) { + std::string out; + escapeWhitespace(out, in); + out.shrink_to_fit(); + return out; } -} - -std::string ws2s(std::wstring const& wstr) { - std::wstring_convert> converter; - std::string narrow = converter.to_bytes(wstr); - return narrow; -} - -std::wstring s2ws(const std::string &str) { - std::wstring_convert> converter; - std::wstring wide = converter.from_bytes(str); - - return wide; -} + std::string& escapeWhitespace(std::string& out, std::string_view in) { + out.reserve(in.size()); // Best case, no escaping. + for (const auto &c : in) { + switch (c) { + case '\t': + out.append("\\t"); + break; + case '\r': + out.append("\\r"); + break; + case '\n': + out.append("\\n"); + break; + default: + out.push_back(c); + break; + } + } + return out; + } } // namespace antrlcpp diff --git a/runtime/Cpp/runtime/src/support/StringUtils.h b/runtime/Cpp/runtime/src/support/StringUtils.h index 49715287e4..38dfecbdd1 100644 --- a/runtime/Cpp/runtime/src/support/StringUtils.h +++ b/runtime/Cpp/runtime/src/support/StringUtils.h @@ -5,50 +5,14 @@ #pragma once +#include +#include #include "antlr4-common.h" namespace antlrcpp { - // For all conversions utf8 <-> utf32. - // VS 2015 and VS 2017 have different bugs in std::codecvt_utf8 (VS 2013 works fine). -#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 - typedef std::wstring_convert, __int32> UTF32Converter; -#else - typedef std::wstring_convert, char32_t> UTF32Converter; -#endif - - // The conversion functions fails in VS2017, so we explicitly use a workaround. - template - inline std::string utf32_to_utf8(T const& data) - { - // Don't make the converter static or we have to serialize access to it. - thread_local UTF32Converter converter; + ANTLR4CPP_PUBLIC std::string escapeWhitespace(std::string_view in); - #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 - auto p = reinterpret_cast(data.data()); - return converter.to_bytes(p, p + data.size()); - #else - return converter.to_bytes(data); - #endif - } + ANTLR4CPP_PUBLIC std::string& escapeWhitespace(std::string& out, std::string_view in); - inline UTF32String utf8_to_utf32(const char* first, const char* last) - { - thread_local UTF32Converter converter; - - #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 - auto r = converter.from_bytes(first, last); - i32string s = reinterpret_cast(r.data()); - #else - std::u32string s = converter.from_bytes(first, last); - #endif - - return s; - } - - void replaceAll(std::string &str, std::string const& from, std::string const& to); - - // string <-> wstring conversion (UTF-16), e.g. for use with Window's wide APIs. - ANTLR4CPP_PUBLIC std::string ws2s(std::wstring const& wstr); - ANTLR4CPP_PUBLIC std::wstring s2ws(std::string const& str); } diff --git a/runtime/Cpp/runtime/src/support/Unicode.h b/runtime/Cpp/runtime/src/support/Unicode.h new file mode 100644 index 0000000000..f0f84375ad --- /dev/null +++ b/runtime/Cpp/runtime/src/support/Unicode.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2021 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include "antlr4-common.h" + +namespace antlrcpp { + + class ANTLR4CPP_PUBLIC Unicode final { + public: + static constexpr char32_t REPLACEMENT_CHARACTER = 0xfffd; + + static constexpr bool isValid(char32_t codePoint) { + return codePoint < 0xd800 || (codePoint > 0xdfff && codePoint <= 0x10ffff); + } + + private: + Unicode() = delete; + Unicode(const Unicode&) = delete; + Unicode(Unicode&&) = delete; + Unicode& operator=(const Unicode&) = delete; + Unicode& operator=(Unicode&&) = delete; + }; + +} diff --git a/runtime/Cpp/runtime/src/support/Utf8.cpp b/runtime/Cpp/runtime/src/support/Utf8.cpp new file mode 100644 index 0000000000..2412053ccf --- /dev/null +++ b/runtime/Cpp/runtime/src/support/Utf8.cpp @@ -0,0 +1,246 @@ +/* Copyright (c) 2021 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#include +#include +#include +#include +#include +#include + +#include "support/Utf8.h" +#include "support/Unicode.h" + +// The below implementation is based off of https://github.com/google/cel-cpp/internal/utf8.cc, +// which is itself based off of https://go.googlesource.com/go/+/refs/heads/master/src/unicode/utf8/utf8.go. +// If for some reason you feel the need to copy this implementation, please retain a comment +// referencing the two source files and giving credit, as well as maintaining any and all +// obligations required by the BSD 3-clause license that governs this file. + +namespace antlrcpp { + +namespace { + +#undef SELF + constexpr uint8_t SELF = 0x80; + +#undef LOW + constexpr uint8_t LOW = 0x80; +#undef HIGH + constexpr uint8_t HIGH = 0xbf; + +#undef MASKX + constexpr uint8_t MASKX = 0x3f; +#undef MASK2 + constexpr uint8_t MASK2 = 0x1f; +#undef MASK3 + constexpr uint8_t MASK3 = 0xf; +#undef MASK4 + constexpr uint8_t MASK4 = 0x7; + +#undef TX + constexpr uint8_t TX = 0x80; +#undef T2 + constexpr uint8_t T2 = 0xc0; +#undef T3 + constexpr uint8_t T3 = 0xe0; +#undef T4 + constexpr uint8_t T4 = 0xf0; + +#undef XX + constexpr uint8_t XX = 0xf1; +#undef AS + constexpr uint8_t AS = 0xf0; +#undef S1 + constexpr uint8_t S1 = 0x02; +#undef S2 + constexpr uint8_t S2 = 0x13; +#undef S3 + constexpr uint8_t S3 = 0x03; +#undef S4 + constexpr uint8_t S4 = 0x23; +#undef S5 + constexpr uint8_t S5 = 0x34; +#undef S6 + constexpr uint8_t S6 = 0x04; +#undef S7 + constexpr uint8_t S7 = 0x44; + + // NOLINTBEGIN + // clang-format off +#undef LEADING + constexpr uint8_t LEADING[256] = { + // 1 2 3 4 5 6 7 8 9 A B C D E F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x00-0x0F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x10-0x1F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x20-0x2F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x30-0x3F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x40-0x4F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x50-0x5F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x60-0x6F + AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0x80-0x8F + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0x90-0x9F + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xA0-0xAF + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xB0-0xBF + XX, XX, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, // 0xC0-0xCF + S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, // 0xD0-0xDF + S2, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S4, S3, S3, // 0xE0-0xEF + S5, S6, S6, S6, S7, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xF0-0xFF + }; + // clang-format on + // NOLINTEND + +#undef ACCEPT + constexpr std::pair ACCEPT[16] = { + {LOW, HIGH}, {0xa0, HIGH}, {LOW, 0x9f}, {0x90, HIGH}, + {LOW, 0x8f}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, + {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, + {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, + }; + +} // namespace + + std::pair Utf8::decode(std::string_view input) { + assert(!input.empty()); + const auto b = static_cast(input.front()); + input.remove_prefix(1); + if (b < SELF) { + return {static_cast(b), 1}; + } + const auto leading = LEADING[b]; + if (leading == XX) { + return {Unicode::REPLACEMENT_CHARACTER, 1}; + } + auto size = static_cast(leading & 7) - 1; + if (size > input.size()) { + return {Unicode::REPLACEMENT_CHARACTER, 1}; + } + const auto& accept = ACCEPT[leading >> 4]; + const auto b1 = static_cast(input.front()); + input.remove_prefix(1); + if (b1 < accept.first || b1 > accept.second) { + return {Unicode::REPLACEMENT_CHARACTER, 1}; + } + if (size <= 1) { + return {(static_cast(b & MASK2) << 6) | + static_cast(b1 & MASKX), + 2}; + } + const auto b2 = static_cast(input.front()); + input.remove_prefix(1); + if (b2 < LOW || b2 > HIGH) { + return {Unicode::REPLACEMENT_CHARACTER, 1}; + } + if (size <= 2) { + return {(static_cast(b & MASK3) << 12) | + (static_cast(b1 & MASKX) << 6) | + static_cast(b2 & MASKX), + 3}; + } + const auto b3 = static_cast(input.front()); + input.remove_prefix(1); + if (b3 < LOW || b3 > HIGH) { + return {Unicode::REPLACEMENT_CHARACTER, 1}; + } + return {(static_cast(b & MASK4) << 18) | + (static_cast(b1 & MASKX) << 12) | + (static_cast(b2 & MASKX) << 6) | + static_cast(b3 & MASKX), + 4}; + } + + std::optional Utf8::strictDecode(std::string_view input) { + std::u32string output; + char32_t codePoint; + size_t codeUnits; + output.reserve(input.size()); // Worst case is each byte is a single Unicode code point. + for (size_t index = 0; index < input.size(); index += codeUnits) { + std::tie(codePoint, codeUnits) = Utf8::decode(input.substr(index)); + if (codePoint == Unicode::REPLACEMENT_CHARACTER && codeUnits == 1) { + // Condition is only met when an illegal byte sequence is encountered. See Utf8::decode. + return std::nullopt; + } + output.push_back(codePoint); + } + output.shrink_to_fit(); + return output; + } + + std::u32string Utf8::lenientDecode(std::string_view input) { + std::u32string output; + char32_t codePoint; + size_t codeUnits; + output.reserve(input.size()); // Worst case is each byte is a single Unicode code point. + for (size_t index = 0; index < input.size(); index += codeUnits) { + std::tie(codePoint, codeUnits) = Utf8::decode(input.substr(index)); + output.push_back(codePoint); + } + output.shrink_to_fit(); + return output; + } + + std::string& Utf8::encode(std::string* buffer, char32_t codePoint) { + assert(buffer != nullptr); + if (!Unicode::isValid(codePoint)) { + codePoint = Unicode::REPLACEMENT_CHARACTER; + } + if (codePoint <= 0x7f) { + buffer->push_back(static_cast(static_cast(codePoint))); + } else if (codePoint <= 0x7ff) { + buffer->push_back( + static_cast(T2 | static_cast(codePoint >> 6))); + buffer->push_back( + static_cast(TX | (static_cast(codePoint) & MASKX))); + } else if (codePoint <= 0xffff) { + buffer->push_back( + static_cast(T3 | static_cast(codePoint >> 12))); + buffer->push_back(static_cast( + TX | (static_cast(codePoint >> 6) & MASKX))); + buffer->push_back( + static_cast(TX | (static_cast(codePoint) & MASKX))); + } else { + buffer->push_back( + static_cast(T4 | static_cast(codePoint >> 18))); + buffer->push_back(static_cast( + TX | (static_cast(codePoint >> 12) & MASKX))); + buffer->push_back(static_cast( + TX | (static_cast(codePoint >> 6) & MASKX))); + buffer->push_back( + static_cast(TX | (static_cast(codePoint) & MASKX))); + } + return *buffer; + } + + std::optional Utf8::strictEncode(std::u32string_view input) { + std::string output; + output.reserve(input.size() * 4); // Worst case is each Unicode code point encodes to 4 bytes. + for (size_t index = 0; index < input.size(); index++) { + char32_t codePoint = input[index]; + if (!Unicode::isValid(codePoint)) { + return std::nullopt; + } + Utf8::encode(&output, codePoint); + } + output.shrink_to_fit(); + return output; + } + + std::string Utf8::lenientEncode(std::u32string_view input) { + std::string output; + output.reserve(input.size() * 4); // Worst case is each Unicode code point encodes to 4 bytes. + for (size_t index = 0; index < input.size(); index++) { + char32_t codePoint = input[index]; + if (!Unicode::isValid(codePoint)) { + codePoint = Unicode::REPLACEMENT_CHARACTER; + } + Utf8::encode(&output, codePoint); + } + output.shrink_to_fit(); + return output; + } + +} diff --git a/runtime/Cpp/runtime/src/support/Utf8.h b/runtime/Cpp/runtime/src/support/Utf8.h new file mode 100644 index 0000000000..c91156b28e --- /dev/null +++ b/runtime/Cpp/runtime/src/support/Utf8.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2021 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "antlr4-common.h" + +namespace antlrcpp { + + class ANTLR4CPP_PUBLIC Utf8 final { + public: + // Decodes the next code point, returning the decoded code point and the number + // of code units (a.k.a. bytes) consumed. In the event that an invalid code unit + // sequence is returned the replacement character, U+FFFD, is returned with a + // code unit count of 1. As U+FFFD requires 3 code units when encoded, this can + // be used to differentiate valid input from malformed input. + static std::pair decode(std::string_view input); + + // Decodes the given UTF-8 encoded input into a string of code points. + static std::optional strictDecode(std::string_view input); + + // Decodes the given UTF-8 encoded input into a string of code points. Unlike strictDecode(), + // each byte in an illegal byte sequence is replaced with the Unicode replacement character, + // U+FFFD. + static std::u32string lenientDecode(std::string_view input); + + // Encodes the given code point and appends it to the buffer. If the code point + // is an unpaired surrogate or outside of the valid Unicode range it is replaced + // with the replacement character, U+FFFD. + static std::string& encode(std::string *buffer, char32_t codePoint); + + // Encodes the given Unicode code point string as UTF-8. + static std::optional strictEncode(std::u32string_view input); + + // Encodes the given Unicode code point string as UTF-8. Unlike strictEncode(), + // each invalid Unicode code point is replaced with the Unicode replacement character, U+FFFD. + static std::string lenientEncode(std::u32string_view input); + + private: + Utf8() = delete; + Utf8(const Utf8&) = delete; + Utf8(Utf8&&) = delete; + Utf8& operator=(const Utf8&) = delete; + Utf8& operator=(Utf8&&) = delete; + }; + +} diff --git a/runtime/Cpp/runtime/src/support/guid.cpp b/runtime/Cpp/runtime/src/support/guid.cpp deleted file mode 100755 index b6105d70c6..0000000000 --- a/runtime/Cpp/runtime/src/support/guid.cpp +++ /dev/null @@ -1,303 +0,0 @@ -/* - The MIT License (MIT) - - Copyright (c) 2014 Graeme Hill (http://graemehill.ca) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - */ - -#include "guid.h" - -#ifdef GUID_LIBUUID -#include -#endif - -#ifdef GUID_CFUUID -#include -#endif - -#ifdef GUID_WINDOWS -#include -#endif - -#ifdef GUID_ANDROID -#include -#endif - -using namespace std; - -// overload << so that it's easy to convert to a string -ostream &operator<<(ostream &s, const Guid &guid) -{ - return s << hex << setfill('0') - << setw(2) << (int)guid._bytes[0] - << setw(2) << (int)guid._bytes[1] - << setw(2) << (int)guid._bytes[2] - << setw(2) << (int)guid._bytes[3] - << "-" - << setw(2) << (int)guid._bytes[4] - << setw(2) << (int)guid._bytes[5] - << "-" - << setw(2) << (int)guid._bytes[6] - << setw(2) << (int)guid._bytes[7] - << "-" - << setw(2) << (int)guid._bytes[8] - << setw(2) << (int)guid._bytes[9] - << "-" - << setw(2) << (int)guid._bytes[10] - << setw(2) << (int)guid._bytes[11] - << setw(2) << (int)guid._bytes[12] - << setw(2) << (int)guid._bytes[13] - << setw(2) << (int)guid._bytes[14] - << setw(2) << (int)guid._bytes[15]; -} - -// create a guid from vector of bytes -Guid::Guid(const vector &bytes) -{ - _bytes = bytes; -} - -// create a guid from array of bytes -Guid::Guid(const unsigned char *bytes) -{ - _bytes.assign(bytes, bytes + 16); -} - -// create a guid from array of words -Guid::Guid(const uint16_t *bytes, bool reverse) -{ - if (reverse) { - for (size_t i = 8; i > 0; --i) - { - _bytes.push_back(bytes[i - 1] >> 8); - _bytes.push_back(bytes[i - 1] & 0xFF); - } - } else { - for (size_t i = 0; i < 8; ++i) - { - _bytes.push_back(bytes[i] & 0xFF); - _bytes.push_back(bytes[i] >> 8); - } - } -} - -// converts a single hex char to a number (0 - 15) -static unsigned char hexDigitToChar(char ch) -{ - if (ch > 47 && ch < 58) - return (unsigned char)(ch - 48); - - if (ch > 96 && ch < 103) - return (unsigned char)(ch - 87); - - if (ch > 64 && ch < 71) - return (unsigned char)(ch - 55); - - return 0; -} - -// converts the two hexadecimal characters to an unsigned char (a byte) -static unsigned char hexPairToChar(char a, char b) -{ - return hexDigitToChar(a) * 16 + hexDigitToChar(b); -} - -// create a guid from string -Guid::Guid(const string &fromString) -{ - _bytes.clear(); - - char charOne = 0, charTwo; - bool lookingForFirstChar = true; - - for (const char &ch : fromString) - { - if (ch == '-') - continue; - - if (lookingForFirstChar) - { - charOne = ch; - lookingForFirstChar = false; - } - else - { - charTwo = ch; - auto byte = hexPairToChar(charOne, charTwo); - _bytes.push_back(byte); - lookingForFirstChar = true; - } - } - -} - -// create empty guid -Guid::Guid() -{ - _bytes = vector(16, 0); -} - -// copy constructor -Guid::Guid(const Guid &other) -{ - _bytes = other._bytes; -} - -// overload assignment operator -Guid &Guid::operator=(const Guid &other) -{ - _bytes = other._bytes; - return *this; -} - -// overload equality operator -bool Guid::operator==(const Guid &other) const -{ - return _bytes == other._bytes; -} - -// overload inequality operator -bool Guid::operator!=(const Guid &other) const -{ - return !((*this) == other); -} - -const std::string Guid::toString() const -{ - std::stringstream os; - os << *this; - return os.str(); -} - -// This is the linux friendly implementation, but it could work on other -// systems that have libuuid available -#ifdef GUID_LIBUUID -Guid GuidGenerator::newGuid() -{ - uuid_t id; - uuid_generate(id); - return id; -} -#endif - -// this is the mac and ios version -#ifdef GUID_CFUUID -Guid GuidGenerator::newGuid() -{ - auto newId = CFUUIDCreate(NULL); - auto bytes = CFUUIDGetUUIDBytes(newId); - CFRelease(newId); - - const unsigned char byteArray[16] = - { - bytes.byte0, - bytes.byte1, - bytes.byte2, - bytes.byte3, - bytes.byte4, - bytes.byte5, - bytes.byte6, - bytes.byte7, - bytes.byte8, - bytes.byte9, - bytes.byte10, - bytes.byte11, - bytes.byte12, - bytes.byte13, - bytes.byte14, - bytes.byte15 - }; - return byteArray; -} -#endif - -// obviously this is the windows version -#ifdef GUID_WINDOWS -Guid GuidGenerator::newGuid() -{ - GUID newId; - CoCreateGuid(&newId); - - const unsigned char bytes[16] = - { - (newId.Data1 >> 24) & 0xFF, - (newId.Data1 >> 16) & 0xFF, - (newId.Data1 >> 8) & 0xFF, - (newId.Data1) & 0xff, - - (newId.Data2 >> 8) & 0xFF, - (newId.Data2) & 0xff, - - (newId.Data3 >> 8) & 0xFF, - (newId.Data3) & 0xFF, - - newId.Data4[0], - newId.Data4[1], - newId.Data4[2], - newId.Data4[3], - newId.Data4[4], - newId.Data4[5], - newId.Data4[6], - newId.Data4[7] - }; - - return bytes; -} -#endif - -// android version that uses a call to a java api -#ifdef GUID_ANDROID -GuidGenerator::GuidGenerator(JNIEnv *env) -{ - _env = env; - _uuidClass = env->FindClass("java/util/UUID"); - _newGuidMethod = env->GetStaticMethodID(_uuidClass, "randomUUID", "()Ljava/util/UUID;"); - _mostSignificantBitsMethod = env->GetMethodID(_uuidClass, "getMostSignificantBits", "()J"); - _leastSignificantBitsMethod = env->GetMethodID(_uuidClass, "getLeastSignificantBits", "()J"); -} - -Guid GuidGenerator::newGuid() -{ - jobject javaUuid = _env->CallStaticObjectMethod(_uuidClass, _newGuidMethod); - jlong mostSignificant = _env->CallLongMethod(javaUuid, _mostSignificantBitsMethod); - jlong leastSignificant = _env->CallLongMethod(javaUuid, _leastSignificantBitsMethod); - - unsigned char bytes[16] = - { - (mostSignificant >> 56) & 0xFF, - (mostSignificant >> 48) & 0xFF, - (mostSignificant >> 40) & 0xFF, - (mostSignificant >> 32) & 0xFF, - (mostSignificant >> 24) & 0xFF, - (mostSignificant >> 16) & 0xFF, - (mostSignificant >> 8) & 0xFF, - (mostSignificant) & 0xFF, - (leastSignificant >> 56) & 0xFF, - (leastSignificant >> 48) & 0xFF, - (leastSignificant >> 40) & 0xFF, - (leastSignificant >> 32) & 0xFF, - (leastSignificant >> 24) & 0xFF, - (leastSignificant >> 16) & 0xFF, - (leastSignificant >> 8) & 0xFF, - (leastSignificant) & 0xFF, - }; - return bytes; -} -#endif diff --git a/runtime/Cpp/runtime/src/support/guid.h b/runtime/Cpp/runtime/src/support/guid.h deleted file mode 100755 index b412497790..0000000000 --- a/runtime/Cpp/runtime/src/support/guid.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - The MIT License (MIT) - - Copyright (c) 2014 Graeme Hill (http://graemehill.ca) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - */ -#pragma once - -#include -#include -#include -#include -#include -#include - -#ifdef GUID_ANDROID -#include -#endif - -// Class to represent a GUID/UUID. Each instance acts as a wrapper around a -// 16 byte value that can be passed around by value. It also supports -// conversion to string (via the stream operator <<) and conversion from a -// string via constructor. -class Guid -{ -public: - - // create a guid from vector of bytes - Guid(const std::vector &bytes); - - // create a guid from array of bytes - Guid(const unsigned char *bytes); - - // Create a guid from array of words. - Guid(const uint16_t *bytes, bool reverse); - - // create a guid from string - Guid(const std::string &fromString); - - // create empty guid - Guid(); - - // copy constructor - Guid(const Guid &other); - - // overload assignment operator - Guid &operator=(const Guid &other); - - // overload equality and inequality operator - bool operator==(const Guid &other) const; - bool operator!=(const Guid &other) const; - - const std::string toString() const; - std::vector::const_iterator begin() { return _bytes.begin(); } - std::vector::const_iterator end() { return _bytes.end(); } - std::vector::const_reverse_iterator rbegin() { return _bytes.rbegin(); } - std::vector::const_reverse_iterator rend() { return _bytes.rend(); } - - -private: - - // actual data - std::vector _bytes; - - // make the << operator a friend so it can access _bytes - friend std::ostream &operator<<(std::ostream &s, const Guid &guid); -}; - -// Class that can create new guids. The only reason this exists instead of -// just a global "newGuid" function is because some platforms will require -// that there is some attached context. In the case of android, we need to -// know what JNIEnv is being used to call back to Java, but the newGuid() -// function would no longer be cross-platform if we parameterized the android -// version. Instead, construction of the GuidGenerator may be different on -// each platform, but the use of newGuid is uniform. -class GuidGenerator -{ -public: -#ifdef GUID_ANDROID - GuidGenerator(JNIEnv *env); -#else - GuidGenerator() { } -#endif - - Guid newGuid(); - -#ifdef GUID_ANDROID -private: - JNIEnv *_env; - jclass _uuidClass; - jmethodID _newGuidMethod; - jmethodID _mostSignificantBitsMethod; - jmethodID _leastSignificantBitsMethod; -#endif -}; diff --git a/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h b/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h old mode 100755 new mode 100644 index d21795b20c..df41bc15ef --- a/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h +++ b/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include "tree/ParseTree.h" +#include "antlr4-common.h" #include "tree/ParseTreeVisitor.h" namespace antlr4 { @@ -14,7 +18,7 @@ namespace tree { public: /// The default implementation calls on the /// specified tree. - virtual antlrcpp::Any visit(ParseTree *tree) override { + std::any visit(ParseTree *tree) override { return tree->accept(this); } @@ -31,16 +35,16 @@ namespace tree { * the tree structure. Visitors that modify the tree should override this * method to behave properly in respect to the specific algorithm in use.

    */ - virtual antlrcpp::Any visitChildren(ParseTree *node) override { - antlrcpp::Any result = defaultResult(); + std::any visitChildren(ParseTree *node) override { + std::any result = defaultResult(); size_t n = node->children.size(); for (size_t i = 0; i < n; i++) { if (!shouldVisitNextChild(node, result)) { break; } - antlrcpp::Any childResult = node->children[i]->accept(this); - result = aggregateResult(result, childResult); + std::any childResult = node->children[i]->accept(this); + result = aggregateResult(std::move(result), std::move(childResult)); } return result; @@ -48,13 +52,13 @@ namespace tree { /// The default implementation returns the result of /// . - virtual antlrcpp::Any visitTerminal(TerminalNode * /*node*/) override { + std::any visitTerminal(TerminalNode * /*node*/) override { return defaultResult(); } /// The default implementation returns the result of /// . - virtual antlrcpp::Any visitErrorNode(ErrorNode * /*node*/) override { + std::any visitErrorNode(ErrorNode * /*node*/) override { return defaultResult(); } @@ -66,11 +70,11 @@ namespace tree { /// The default implementation of /// initializes its aggregate result to this value. ///

    - /// The base implementation returns {@code null}. + /// The base implementation returns {@code std::any()}. ///

    /// The default value returned by visitor methods. - virtual antlrcpp::Any defaultResult() { - return nullptr; // support isNotNull + virtual std::any defaultResult() { + return std::any(); } /// @@ -91,7 +95,7 @@ namespace tree { /// a child node. /// /// The updated aggregate result. - virtual antlrcpp::Any aggregateResult(antlrcpp::Any /*aggregate*/, const antlrcpp::Any &nextResult) { + virtual std::any aggregateResult(std::any /*aggregate*/, std::any nextResult) { return nextResult; } @@ -118,7 +122,7 @@ namespace tree { /// {@code true} to continue visiting children. Otherwise return /// {@code false} to stop visiting children and immediately return the /// current aggregate result from . - virtual bool shouldVisitNextChild(ParseTree * /*node*/, const antlrcpp::Any &/*currentResult*/) { + virtual bool shouldVisitNextChild(ParseTree * /*node*/, const std::any &/*currentResult*/) { return true; } diff --git a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp deleted file mode 100644 index ade2539afd..0000000000 --- a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp +++ /dev/null @@ -1,9 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "tree/ErrorNode.h" - -antlr4::tree::ErrorNode::~ErrorNode() { -} diff --git a/runtime/Cpp/runtime/src/tree/ErrorNode.h b/runtime/Cpp/runtime/src/tree/ErrorNode.h old mode 100755 new mode 100644 index 619f44ddc2..48e019b9b8 --- a/runtime/Cpp/runtime/src/tree/ErrorNode.h +++ b/runtime/Cpp/runtime/src/tree/ErrorNode.h @@ -5,14 +5,21 @@ #pragma once +#include "antlr4-common.h" +#include "tree/ParseTreeType.h" #include "tree/TerminalNode.h" namespace antlr4 { namespace tree { - class ANTLR4CPP_PUBLIC ErrorNode : public virtual TerminalNode { + class ANTLR4CPP_PUBLIC ErrorNode : public TerminalNode { public: - ~ErrorNode() override; + static bool is(const tree::ParseTree &parseTree) { return parseTree.getTreeType() == tree::ParseTreeType::ERROR; } + + static bool is(const tree::ParseTree *parseTree) { return parseTree != nullptr && is(*parseTree); } + + protected: + using TerminalNode::TerminalNode; }; } // namespace tree diff --git a/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp b/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp old mode 100755 new mode 100644 index fde942d354..e1bda994ae --- a/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp +++ b/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp @@ -3,21 +3,54 @@ * can be found in the LICENSE.txt file in the project root. */ -#include "Exceptions.h" +#include +#include +#include "misc/Interval.h" +#include "Token.h" +#include "RuleContext.h" #include "tree/ParseTreeVisitor.h" #include "tree/ErrorNodeImpl.h" using namespace antlr4; -using namespace antlr4::misc; using namespace antlr4::tree; -ErrorNodeImpl::ErrorNodeImpl(Token *token) : TerminalNodeImpl(token) { +Token* ErrorNodeImpl::getSymbol() const { + return symbol; } -ErrorNodeImpl::~ErrorNodeImpl() { +void ErrorNodeImpl::setParent(RuleContext *parent_) { + this->parent = parent_; } -antlrcpp::Any ErrorNodeImpl::accept(ParseTreeVisitor *visitor) { +misc::Interval ErrorNodeImpl::getSourceInterval() { + if (symbol == nullptr) { + return misc::Interval::INVALID; + } + + size_t tokenIndex = symbol->getTokenIndex(); + return misc::Interval(tokenIndex, tokenIndex); +} + +std::any ErrorNodeImpl::accept(ParseTreeVisitor *visitor) { return visitor->visitErrorNode(this); } + +std::string ErrorNodeImpl::getText() { + return symbol->getText(); +} + +std::string ErrorNodeImpl::toStringTree(Parser * /*parser*/, bool /*pretty*/) { + return toString(); +} + +std::string ErrorNodeImpl::toString() { + if (symbol->getType() == Token::EOF) { + return ""; + } + return symbol->getText(); +} + +std::string ErrorNodeImpl::toStringTree(bool /*pretty*/) { + return toString(); +} diff --git a/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h b/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h old mode 100755 new mode 100644 index b64b6f979c..280a3ebe53 --- a/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h +++ b/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h @@ -5,7 +5,11 @@ #pragma once +#include #include "tree/ErrorNode.h" +#include "tree/ParseTreeType.h" +#include "Token.h" +#include "antlr4-common.h" #include "tree/TerminalNodeImpl.h" #include "misc/Interval.h" @@ -21,12 +25,22 @@ namespace tree { /// and deletion as well as during "consume until error recovery set" /// upon no viable alternative exceptions. /// - class ANTLR4CPP_PUBLIC ErrorNodeImpl : public virtual TerminalNodeImpl, public virtual ErrorNode { + class ANTLR4CPP_PUBLIC ErrorNodeImpl : public ErrorNode { public: - ErrorNodeImpl(Token *token); - ~ErrorNodeImpl() override; + Token *symbol; - virtual antlrcpp::Any accept(ParseTreeVisitor *visitor) override; + explicit ErrorNodeImpl(Token *symbol) : ErrorNode(ParseTreeType::ERROR), symbol(symbol) {} + + Token* getSymbol() const override; + void setParent(RuleContext *parent) override; + misc::Interval getSourceInterval() override; + + std::any accept(ParseTreeVisitor *visitor) override; + + std::string getText() override; + std::string toStringTree(Parser *parser, bool pretty = false) override; + std::string toString() override; + std::string toStringTree(bool pretty = false) override; }; } // namespace tree diff --git a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp index a4b3efd73d..05e7e76f27 100644 --- a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp +++ b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp @@ -3,7 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "support/CPPUtils.h" +#include "support/Casts.h" #include "tree/ParseTreeListener.h" #include "tree/ParseTree.h" @@ -12,29 +16,26 @@ #include "IterativeParseTreeWalker.h" using namespace antlr4::tree; +using namespace antlrcpp; void IterativeParseTreeWalker::walk(ParseTreeListener *listener, ParseTree *t) const { - - std::vector nodeStack; - std::vector indexStack; - + std::vector> stack; ParseTree *currentNode = t; size_t currentIndex = 0; while (currentNode != nullptr) { // pre-order visit - if (antlrcpp::is(currentNode)) { - listener->visitErrorNode(dynamic_cast(currentNode)); - } else if (antlrcpp::is(currentNode)) { - listener->visitTerminal((TerminalNode *)currentNode); + if (ErrorNode::is(*currentNode)) { + listener->visitErrorNode(downCast(currentNode)); + } else if (TerminalNode::is(*currentNode)) { + listener->visitTerminal(downCast(currentNode)); } else { enterRule(listener, currentNode); } // Move down to first child, if it exists. if (!currentNode->children.empty()) { - nodeStack.push_back(currentNode); - indexStack.push_back(currentIndex); + stack.push_back(std::make_pair(currentNode, currentIndex)); currentIndex = 0; currentNode = currentNode->children[0]; continue; @@ -43,29 +44,26 @@ void IterativeParseTreeWalker::walk(ParseTreeListener *listener, ParseTree *t) c // No child nodes, so walk tree. do { // post-order visit - if (!antlrcpp::is(currentNode)) { + if (!TerminalNode::is(*currentNode)) { exitRule(listener, currentNode); } // No parent, so no siblings. - if (nodeStack.empty()) { + if (stack.empty()) { currentNode = nullptr; currentIndex = 0; break; } // Move to next sibling if possible. - if (nodeStack.back()->children.size() > ++currentIndex) { - currentNode = nodeStack.back()->children[currentIndex]; + if (stack.back().first->children.size() > ++currentIndex) { + currentNode = stack.back().first->children[currentIndex]; break; } // No next sibling, so move up. - currentNode = nodeStack.back(); - nodeStack.pop_back(); - currentIndex = indexStack.back(); - indexStack.pop_back(); - + std::tie(currentNode, currentIndex) = stack.back(); + stack.pop_back(); } while (currentNode != nullptr); } } diff --git a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h index 8957d87e44..630488397b 100644 --- a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h +++ b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h @@ -46,7 +46,7 @@ namespace tree { */ class ANTLR4CPP_PUBLIC IterativeParseTreeWalker : public ParseTreeWalker { public: - virtual void walk(ParseTreeListener *listener, ParseTree *t) const override; + void walk(ParseTreeListener *listener, ParseTree *t) const override; }; } // namespace tree diff --git a/runtime/Cpp/runtime/src/tree/ParseTree.cpp b/runtime/Cpp/runtime/src/tree/ParseTree.cpp index b975a4064d..2d529cb011 100755 --- a/runtime/Cpp/runtime/src/tree/ParseTree.cpp +++ b/runtime/Cpp/runtime/src/tree/ParseTree.cpp @@ -7,9 +7,6 @@ using namespace antlr4::tree; -ParseTree::ParseTree() : parent(nullptr) { -} - bool ParseTree::operator == (const ParseTree &other) const { return &other == this; } diff --git a/runtime/Cpp/runtime/src/tree/ParseTree.h b/runtime/Cpp/runtime/src/tree/ParseTree.h old mode 100755 new mode 100644 index 088aac3efb..ffb67bd1db --- a/runtime/Cpp/runtime/src/tree/ParseTree.h +++ b/runtime/Cpp/runtime/src/tree/ParseTree.h @@ -5,7 +5,12 @@ #pragma once +#include +#include #include "support/Any.h" +#include "misc/Interval.h" +#include "antlr4-common.h" +#include "tree/ParseTreeType.h" namespace antlr4 { namespace tree { @@ -19,15 +24,15 @@ namespace tree { // ml: This class unites 4 Java classes: RuleNode, ParseTree, SyntaxTree and Tree. class ANTLR4CPP_PUBLIC ParseTree { public: - ParseTree(); ParseTree(ParseTree const&) = delete; - virtual ~ParseTree() {} + + virtual ~ParseTree() = default; ParseTree& operator=(ParseTree const&) = delete; /// The parent of this node. If the return value is null, then this /// node is the root of the tree. - ParseTree *parent; + ParseTree *parent = nullptr; /// If we are debugging or building a parse tree for a visitor, /// we need to track all of the tokens and rule invocations associated @@ -50,7 +55,7 @@ namespace tree { /// The needs a double dispatch method. // ml: This has been changed to use Any instead of a template parameter, to avoid the need of a virtual template function. - virtual antlrcpp::Any accept(ParseTreeVisitor *visitor) = 0; + virtual std::any accept(ParseTreeVisitor *visitor) = 0; /// Return the combined text of all leaf nodes. Does not get any /// off-channel tokens (if any) so won't return whitespace and @@ -74,6 +79,14 @@ namespace tree { * EOF is unspecified.

    */ virtual misc::Interval getSourceInterval() = 0; + + ParseTreeType getTreeType() const { return _treeType; } + + protected: + explicit ParseTree(ParseTreeType treeType) : _treeType(treeType) {} + + private: + const ParseTreeType _treeType; }; // A class to help managing ParseTree instances without the need of a shared_ptr. @@ -88,7 +101,7 @@ namespace tree { } void reset() { - for (auto entry : _allocated) + for (auto * entry : _allocated) delete entry; _allocated.clear(); } diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeType.h b/runtime/Cpp/runtime/src/tree/ParseTreeType.h new file mode 100644 index 0000000000..17e0512b00 --- /dev/null +++ b/runtime/Cpp/runtime/src/tree/ParseTreeType.h @@ -0,0 +1,22 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +#pragma once + +#include + +#include "antlr4-common.h" + +namespace antlr4 { +namespace tree { + + enum class ParseTreeType : size_t { + TERMINAL = 1, + ERROR = 2, + RULE = 3, + }; + +} // namespace tree +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h old mode 100755 new mode 100644 index 5a08599246..d690c1f212 --- a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h +++ b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h @@ -5,6 +5,7 @@ #pragma once +#include "antlr4-common.h" #include "support/Any.h" namespace antlr4 { @@ -27,7 +28,7 @@ namespace tree { /// /// The to visit. /// The result of visiting the parse tree. - virtual antlrcpp::Any visit(ParseTree *tree) = 0; + virtual std::any visit(ParseTree *tree) = 0; /// /// Visit the children of a node, and return a user-defined result of the @@ -35,21 +36,21 @@ namespace tree { /// /// The whose children should be visited. /// The result of visiting the children of the node. - virtual antlrcpp::Any visitChildren(ParseTree *node) = 0; + virtual std::any visitChildren(ParseTree *node) = 0; /// /// Visit a terminal node, and return a user-defined result of the operation. /// /// The to visit. /// The result of visiting the node. - virtual antlrcpp::Any visitTerminal(TerminalNode *node) = 0; + virtual std::any visitTerminal(TerminalNode *node) = 0; /// /// Visit an error node, and return a user-defined result of the operation. /// /// The to visit. /// The result of visiting the node. - virtual antlrcpp::Any visitErrorNode(ErrorNode *node) = 0; + virtual std::any visitErrorNode(ErrorNode *node) = 0; }; diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp index 998c9ed55d..1eeabff883 100755 --- a/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp +++ b/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp @@ -7,6 +7,7 @@ #include "ParserRuleContext.h" #include "tree/ParseTreeListener.h" #include "support/CPPUtils.h" +#include "support/Casts.h" #include "tree/IterativeParseTreeWalker.h" #include "tree/ParseTreeWalker.h" @@ -17,15 +18,13 @@ using namespace antlrcpp; static IterativeParseTreeWalker defaultWalker; ParseTreeWalker &ParseTreeWalker::DEFAULT = defaultWalker; -ParseTreeWalker::~ParseTreeWalker() { -} - void ParseTreeWalker::walk(ParseTreeListener *listener, ParseTree *t) const { - if (is(t)) { - listener->visitErrorNode(dynamic_cast(t)); + if (ErrorNode::is(*t)) { + listener->visitErrorNode(downCast(t)); return; - } else if (is(t)) { - listener->visitTerminal(dynamic_cast(t)); + } + if (TerminalNode::is(*t)) { + listener->visitTerminal(downCast(t)); return; } @@ -37,13 +36,13 @@ void ParseTreeWalker::walk(ParseTreeListener *listener, ParseTree *t) const { } void ParseTreeWalker::enterRule(ParseTreeListener *listener, ParseTree *r) const { - ParserRuleContext *ctx = dynamic_cast(r); + auto *ctx = downCast(r); listener->enterEveryRule(ctx); ctx->enterRule(listener); } void ParseTreeWalker::exitRule(ParseTreeListener *listener, ParseTree *r) const { - ParserRuleContext *ctx = dynamic_cast(r); + auto *ctx = downCast(r); ctx->exitRule(listener); listener->exitEveryRule(ctx); } diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h b/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h index ca3e241802..375e659785 100755 --- a/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h +++ b/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h @@ -14,16 +14,40 @@ namespace tree { public: static ParseTreeWalker &DEFAULT; - virtual ~ParseTreeWalker(); - + virtual ~ParseTreeWalker() = default; + + /** + * + * Performs a walk on the given parse tree starting at the root and going down recursively + * with depth-first search. On each node, is called before + * recursively walking down into child nodes, then + * is called after the recursive call to wind up. + * + * The listener used by the walker to process grammar rules + * The parse tree to be walked on + */ virtual void walk(ParseTreeListener *listener, ParseTree *t) const; protected: - /// The discovery of a rule node, involves sending two events: the generic - /// and a - /// -specific event. First we trigger the generic and then - /// the rule specific. We do them in reverse order upon finishing the node. + + /** + * + * Enters a grammar rule by first triggering the generic event + * then by triggering the event specific to the given parse tree node + * + * The listener responding to the trigger events + * The grammar rule containing the rule context + */ virtual void enterRule(ParseTreeListener *listener, ParseTree *r) const; + + /** + * + * Exits a grammar rule by first triggering the event specific to the given parse tree node + * then by triggering the generic event + * + * The listener responding to the trigger events + * The grammar rule containing the rule context + */ virtual void exitRule(ParseTreeListener *listener, ParseTree *r) const; }; diff --git a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp deleted file mode 100644 index d630469c70..0000000000 --- a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp +++ /dev/null @@ -1,9 +0,0 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -#include "tree/TerminalNode.h" - -antlr4::tree::TerminalNode::~TerminalNode() { -} diff --git a/runtime/Cpp/runtime/src/tree/TerminalNode.h b/runtime/Cpp/runtime/src/tree/TerminalNode.h old mode 100755 new mode 100644 index 7108f70de0..0cea382d6a --- a/runtime/Cpp/runtime/src/tree/TerminalNode.h +++ b/runtime/Cpp/runtime/src/tree/TerminalNode.h @@ -5,6 +5,9 @@ #pragma once +#include "antlr4-common.h" +#include "tree/ParseTreeType.h" +#include "Token.h" #include "tree/ParseTree.h" namespace antlr4 { @@ -12,9 +15,14 @@ namespace tree { class ANTLR4CPP_PUBLIC TerminalNode : public ParseTree { public: - ~TerminalNode() override; + static bool is(const tree::ParseTree &parseTree) { + const auto treeType = parseTree.getTreeType(); + return treeType == ParseTreeType::TERMINAL || treeType == ParseTreeType::ERROR; + } - virtual Token* getSymbol() = 0; + static bool is(const tree::ParseTree *parseTree) { return parseTree != nullptr && is(*parseTree); } + + virtual Token* getSymbol() const = 0; /** Set the parent for this leaf node. * @@ -26,6 +34,9 @@ namespace tree { * @since 4.7 */ virtual void setParent(RuleContext *parent) = 0; + + protected: + using ParseTree::ParseTree; }; } // namespace tree diff --git a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp old mode 100755 new mode 100644 index 7ab121b732..df3c103d85 --- a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp +++ b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "misc/Interval.h" #include "Token.h" #include "RuleContext.h" @@ -13,10 +15,7 @@ using namespace antlr4; using namespace antlr4::tree; -TerminalNodeImpl::TerminalNodeImpl(Token *symbol_) : symbol(symbol_) { -} - -Token* TerminalNodeImpl::getSymbol() { +Token* TerminalNodeImpl::getSymbol() const { return symbol; } @@ -33,7 +32,7 @@ misc::Interval TerminalNodeImpl::getSourceInterval() { return misc::Interval(tokenIndex, tokenIndex); } -antlrcpp::Any TerminalNodeImpl::accept(ParseTreeVisitor *visitor) { +std::any TerminalNodeImpl::accept(ParseTreeVisitor *visitor) { return visitor->visitTerminal(this); } diff --git a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h old mode 100755 new mode 100644 index 6f65d82047..6e6132c63e --- a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h +++ b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h @@ -5,28 +5,32 @@ #pragma once +#include +#include "antlr4-common.h" +#include "tree/ParseTreeType.h" +#include "misc/Interval.h" +#include "Token.h" #include "tree/TerminalNode.h" namespace antlr4 { namespace tree { - class ANTLR4CPP_PUBLIC TerminalNodeImpl : public virtual TerminalNode { + class ANTLR4CPP_PUBLIC TerminalNodeImpl : public TerminalNode { public: Token *symbol; - TerminalNodeImpl(Token *symbol); + explicit TerminalNodeImpl(Token *symbol) : TerminalNode(ParseTreeType::TERMINAL), symbol(symbol) {} - virtual Token* getSymbol() override; - virtual void setParent(RuleContext *parent) override; - virtual misc::Interval getSourceInterval() override; + Token* getSymbol() const override; + void setParent(RuleContext *parent) override; + misc::Interval getSourceInterval() override; - virtual antlrcpp::Any accept(ParseTreeVisitor *visitor) override; - - virtual std::string getText() override; - virtual std::string toStringTree(Parser *parser, bool pretty = false) override; - virtual std::string toString() override; - virtual std::string toStringTree(bool pretty = false) override; + std::any accept(ParseTreeVisitor *visitor) override; + std::string getText() override; + std::string toStringTree(Parser *parser, bool pretty = false) override; + std::string toString() override; + std::string toStringTree(bool pretty = false) override; }; } // namespace tree diff --git a/runtime/Cpp/runtime/src/tree/Trees.cpp b/runtime/Cpp/runtime/src/tree/Trees.cpp old mode 100755 new mode 100644 index 72b9b8b487..86e10d143e --- a/runtime/Cpp/runtime/src/tree/Trees.cpp +++ b/runtime/Cpp/runtime/src/tree/Trees.cpp @@ -3,7 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include #include "tree/ErrorNode.h" +#include "antlr4-common.h" #include "Parser.h" #include "ParserRuleContext.h" #include "support/CPPUtils.h" @@ -192,7 +197,7 @@ std::vector Trees::getDescendants(ParseTree *t) { std::size_t n = t->children.size(); for (size_t i = 0 ; i < n ; i++) { auto descentants = getDescendants(t->children[i]); - for (auto entry: descentants) { + for (auto *entry: descentants) { nodes.push_back(entry); } } diff --git a/runtime/Cpp/runtime/src/tree/Trees.h b/runtime/Cpp/runtime/src/tree/Trees.h old mode 100755 new mode 100644 index d9d04624f9..00ed185f8d --- a/runtime/Cpp/runtime/src/tree/Trees.h +++ b/runtime/Cpp/runtime/src/tree/Trees.h @@ -5,7 +5,11 @@ #pragma once +#include +#include +#include #include "tree/TerminalNode.h" +#include "antlr4-common.h" #include "ParserRuleContext.h" #include "Recognizer.h" diff --git a/runtime/Cpp/runtime/src/tree/pattern/Chunk.h b/runtime/Cpp/runtime/src/tree/pattern/Chunk.h old mode 100755 new mode 100644 index 42e7838d47..23421ff687 --- a/runtime/Cpp/runtime/src/tree/pattern/Chunk.h +++ b/runtime/Cpp/runtime/src/tree/pattern/Chunk.h @@ -5,6 +5,7 @@ #pragma once +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp b/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp old mode 100755 new mode 100644 index ce34b3f227..8add2c7174 --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp @@ -3,6 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "Exceptions.h" #include "tree/pattern/ParseTreeMatch.h" diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h b/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h old mode 100755 new mode 100644 index eefde46c83..40df05227d --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp old mode 100755 new mode 100644 index cfa588f310..877f43ff21 --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "tree/ParseTree.h" #include "tree/pattern/ParseTreePatternMatcher.h" #include "tree/pattern/ParseTreeMatch.h" @@ -37,7 +39,7 @@ std::vector ParseTreePattern::findAll(ParseTree *tree, const std xpath::XPath finder(_matcher->getParser(), xpath); std::vector subtrees = finder.evaluate(tree); std::vector matches; - for (auto t : subtrees) { + for (auto *t : subtrees) { ParseTreeMatch aMatch = match(t); if (aMatch.succeeded()) { matches.push_back(aMatch); diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h old mode 100755 new mode 100644 index d5b86ff473..657b2b2d0b --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h @@ -5,6 +5,8 @@ #pragma once +#include +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp old mode 100755 new mode 100644 index 2e58a96259..2c798c5a7b --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp @@ -3,7 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include +#include #include "tree/pattern/ParseTreePattern.h" +#include "Token.h" +#include "antlr4-common.h" #include "tree/pattern/ParseTreeMatch.h" #include "tree/TerminalNode.h" #include "CommonTokenStream.h" @@ -21,7 +29,6 @@ #include "ANTLRInputStream.h" #include "support/Arrays.h" #include "Exceptions.h" -#include "support/StringUtils.h" #include "support/CPPUtils.h" #include "tree/pattern/ParseTreePatternMatcher.h" @@ -109,7 +116,7 @@ ParseTreePattern ParseTreePatternMatcher::compile(const std::string &pattern, in throw e; #else } catch (std::exception & /*e*/) { - std::throw_with_nested((const char*)"Cannot invoke start rule"); // Wrap any other exception. We should however probably use one of the ANTLR exceptions here. + std::throw_with_nested(RuntimeException("Cannot invoke start rule")); // Wrap any other exception. #endif } diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h old mode 100755 new mode 100644 index e77c7bc5e2..28e4133f9f --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h @@ -5,6 +5,12 @@ #pragma once +#include +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "Exceptions.h" namespace antlr4 { @@ -73,7 +79,7 @@ namespace pattern { class CannotInvokeStartRule : public RuntimeException { public: CannotInvokeStartRule(const RuntimeException &e); - ~CannotInvokeStartRule(); + ~CannotInvokeStartRule() override; }; // Fixes https://github.com/antlr/antlr4/issues/413 @@ -82,7 +88,7 @@ namespace pattern { public: StartRuleDoesNotConsumeFullPattern() = default; StartRuleDoesNotConsumeFullPattern(StartRuleDoesNotConsumeFullPattern const&) = default; - ~StartRuleDoesNotConsumeFullPattern(); + ~StartRuleDoesNotConsumeFullPattern() override; StartRuleDoesNotConsumeFullPattern& operator=(StartRuleDoesNotConsumeFullPattern const&) = default; }; diff --git a/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp b/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp old mode 100755 new mode 100644 index 4e33f989d9..c4af63c58c --- a/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp @@ -3,8 +3,11 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "Exceptions.h" +#include "antlr4-common.h" #include "tree/pattern/RuleTagToken.h" using namespace antlr4::tree::pattern; diff --git a/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h b/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h old mode 100755 new mode 100644 index 368ae41b84..1ba30e996b --- a/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h +++ b/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "Token.h" namespace antlr4 { @@ -73,7 +76,7 @@ namespace pattern { ///

    /// Rule tag tokens are always placed on the . /// - virtual size_t getChannel() const override; + size_t getChannel() const override; ///

    /// {@inheritDoc} @@ -81,35 +84,35 @@ namespace pattern { /// This method returns the rule tag formatted with {@code <} and {@code >} /// delimiters. /// - virtual std::string getText() const override; + std::string getText() const override; /// Rule tag tokens have types assigned according to the rule bypass /// transitions created during ATN deserialization. - virtual size_t getType() const override; + size_t getType() const override; /// The implementation for always returns 0. - virtual size_t getLine() const override; + size_t getLine() const override; /// The implementation for always returns INVALID_INDEX. - virtual size_t getCharPositionInLine() const override; + size_t getCharPositionInLine() const override; /// The implementation for always returns INVALID_INDEX. - virtual size_t getTokenIndex() const override; + size_t getTokenIndex() const override; /// The implementation for always returns INVALID_INDEX. - virtual size_t getStartIndex() const override; + size_t getStartIndex() const override; /// The implementation for always returns INVALID_INDEX. - virtual size_t getStopIndex() const override; + size_t getStopIndex() const override; /// The implementation for always returns {@code null}. - virtual TokenSource *getTokenSource() const override; + TokenSource *getTokenSource() const override; /// The implementation for always returns {@code null}. - virtual CharStream *getInputStream() const override; + CharStream *getInputStream() const override; /// The implementation for returns a string of the form {@code ruleName:bypassTokenType}. - virtual std::string toString() const override; + std::string toString() const override; }; } // namespace pattern diff --git a/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp b/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp old mode 100755 new mode 100644 index 77f2b4c9ca..96371427f5 --- a/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "Exceptions.h" #include "tree/pattern/TagChunk.h" diff --git a/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h b/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h old mode 100755 new mode 100644 index 3d0c9f8d84..e997ed9f9e --- a/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h +++ b/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "Chunk.h" namespace antlr4 { @@ -37,7 +39,7 @@ namespace pattern { /// if {@code tag} is {@code null} or /// empty. TagChunk(const std::string &tag); - virtual ~TagChunk(); + ~TagChunk() override; /// /// Construct a new instance of using the specified label @@ -70,7 +72,7 @@ namespace pattern { /// are returned in the form {@code label:tag}, and unlabeled tags are /// returned as just the tag name. /// - virtual std::string toString() override; + std::string toString() override; private: /// This is the backing field for . diff --git a/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp b/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp old mode 100755 new mode 100644 index f8dcfb0df7..1d9ab71233 --- a/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "Exceptions.h" #include "tree/pattern/TextChunk.h" diff --git a/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h b/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h old mode 100755 new mode 100644 index 1cbc0ddb2e..30f663229d --- a/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h +++ b/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "Chunk.h" namespace antlr4 { @@ -29,7 +31,7 @@ namespace pattern { /// if {@code text} is {@code null}. public: TextChunk(const std::string &text); - virtual ~TextChunk(); + ~TextChunk() override; /// /// Gets the raw text of this chunk. @@ -43,7 +45,7 @@ namespace pattern { /// The implementation for returns the result of /// in single quotes. /// - virtual std::string toString() override; + std::string toString() override; }; } // namespace pattern diff --git a/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp b/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp old mode 100755 new mode 100644 index 7d6cc9a9c7..ac3c63d732 --- a/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "tree/pattern/TokenTagToken.h" using namespace antlr4::tree::pattern; diff --git a/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h b/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h old mode 100755 new mode 100644 index 9013fb8c5e..29c6688443 --- a/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h +++ b/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "CommonToken.h" namespace antlr4 { @@ -64,7 +66,7 @@ namespace pattern { /// The implementation for returns the token tag /// formatted with {@code <} and {@code >} delimiters. /// - virtual std::string getText() const override; + std::string getText() const override; /// /// {@inheritDoc} @@ -72,7 +74,7 @@ namespace pattern { /// The implementation for returns a string of the form /// {@code tokenName:type}. /// - virtual std::string toString() const override; + std::string toString() const override; }; } // namespace pattern diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp old mode 100755 new mode 100644 index c7cc3b864d..7ebd1b8691 --- a/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp @@ -3,7 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include +#include #include "XPathLexer.h" +#include "ANTLRInputStream.h" +#include "Token.h" +#include "antlr4-common.h" #include "XPathLexerErrorListener.h" #include "XPathElement.h" #include "XPathWildcardAnywhereElement.h" @@ -137,7 +145,7 @@ std::vector XPath::evaluate(ParseTree *t) { while (i < elements.size()) { std::vector next; - for (auto node : work) { + for (auto *node : work) { if (!node->children.empty()) { // only try to match next element if it has children // e.g., //func/*/stat might have a token node for which diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPath.h b/runtime/Cpp/runtime/src/tree/xpath/XPath.h old mode 100755 new mode 100644 index e38d482d58..ea8f226aae --- a/runtime/Cpp/runtime/src/tree/xpath/XPath.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPath.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include "Token.h" #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp old mode 100755 new mode 100644 index 64b122df13..f6fa33b0c0 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "support/CPPUtils.h" #include "XPathElement.h" diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h old mode 100755 new mode 100644 index f339117d7f..7a712cab49 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h @@ -5,6 +5,8 @@ #pragma once +#include +#include #include "antlr4-common.h" namespace antlr4 { diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp index fb18788938..250d15d30c 100644 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp @@ -1,11 +1,122 @@ + +// Generated from XPathLexer.g4 by ANTLR 4.13.1 + + +#include +#include +#include +#include +#include +#include #include "XPathLexer.h" using namespace antlr4; + +using namespace antlr4; + +namespace { + +struct XPathLexerStaticData final { + XPathLexerStaticData(std::vector ruleNames, + std::vector channelNames, + std::vector modeNames, + std::vector literalNames, + std::vector symbolicNames) + : ruleNames(std::move(ruleNames)), channelNames(std::move(channelNames)), + modeNames(std::move(modeNames)), literalNames(std::move(literalNames)), + symbolicNames(std::move(symbolicNames)), + vocabulary(this->literalNames, this->symbolicNames) {} + + XPathLexerStaticData(const XPathLexerStaticData&) = delete; + XPathLexerStaticData(XPathLexerStaticData&&) = delete; + XPathLexerStaticData& operator=(const XPathLexerStaticData&) = delete; + XPathLexerStaticData& operator=(XPathLexerStaticData&&) = delete; + + std::vector decisionToDFA; + antlr4::atn::PredictionContextCache sharedContextCache; + const std::vector ruleNames; + const std::vector channelNames; + const std::vector modeNames; + const std::vector literalNames; + const std::vector symbolicNames; + const antlr4::dfa::Vocabulary vocabulary; + antlr4::atn::SerializedATNView serializedATN; + std::unique_ptr atn; +}; + +::antlr4::internal::OnceFlag xpathlexerLexerOnceFlag; +#if ANTLR4_USE_THREAD_LOCAL_CACHE +static thread_local +#endif +std::unique_ptr xpathlexerLexerStaticData = nullptr; + +void xpathlexerLexerInitialize() { +#if ANTLR4_USE_THREAD_LOCAL_CACHE + if (xpathlexerLexerStaticData != nullptr) { + return; + } +#else + assert(xpathlexerLexerStaticData == nullptr); +#endif + auto staticData = std::make_unique( + std::vector{ + "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar", "NameStartChar", + "STRING" + }, + std::vector{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN" + }, + std::vector{ + "DEFAULT_MODE" + }, + std::vector{ + "", "", "", "'//'", "'/'", "'*'", "'!'" + }, + std::vector{ + "", "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG", + "ID", "STRING" + } + ); + static const int32_t serializedATNSegment[] = { + 4,0,8,50,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,6, + 2,7,7,7,1,0,1,0,1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,5,4,29,8,4,10,4,12, + 4,32,9,4,1,4,1,4,1,5,1,5,3,5,38,8,5,1,6,1,6,1,7,1,7,5,7,44,8,7,10,7,12, + 7,47,9,7,1,7,1,7,1,45,0,8,1,3,3,4,5,5,7,6,9,7,11,0,13,0,15,8,1,0,2,5, + 0,48,57,95,95,183,183,768,879,8255,8256,13,0,65,90,97,122,192,214,216, + 246,248,767,880,893,895,8191,8204,8205,8304,8591,11264,12271,12289,55295, + 63744,64975,65008,65535,50,0,1,1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1, + 0,0,0,0,9,1,0,0,0,0,15,1,0,0,0,1,17,1,0,0,0,3,20,1,0,0,0,5,22,1,0,0,0, + 7,24,1,0,0,0,9,26,1,0,0,0,11,37,1,0,0,0,13,39,1,0,0,0,15,41,1,0,0,0,17, + 18,5,47,0,0,18,19,5,47,0,0,19,2,1,0,0,0,20,21,5,47,0,0,21,4,1,0,0,0,22, + 23,5,42,0,0,23,6,1,0,0,0,24,25,5,33,0,0,25,8,1,0,0,0,26,30,3,13,6,0,27, + 29,3,11,5,0,28,27,1,0,0,0,29,32,1,0,0,0,30,28,1,0,0,0,30,31,1,0,0,0,31, + 33,1,0,0,0,32,30,1,0,0,0,33,34,6,4,0,0,34,10,1,0,0,0,35,38,3,13,6,0,36, + 38,7,0,0,0,37,35,1,0,0,0,37,36,1,0,0,0,38,12,1,0,0,0,39,40,7,1,0,0,40, + 14,1,0,0,0,41,45,5,39,0,0,42,44,9,0,0,0,43,42,1,0,0,0,44,47,1,0,0,0,45, + 46,1,0,0,0,45,43,1,0,0,0,46,48,1,0,0,0,47,45,1,0,0,0,48,49,5,39,0,0,49, + 16,1,0,0,0,4,0,30,37,45,1,1,4,0 + }; + staticData->serializedATN = antlr4::atn::SerializedATNView(serializedATNSegment, sizeof(serializedATNSegment) / sizeof(serializedATNSegment[0])); + + antlr4::atn::ATNDeserializer deserializer; + staticData->atn = deserializer.deserialize(staticData->serializedATN); + + const size_t count = staticData->atn->getNumberOfDecisions(); + staticData->decisionToDFA.reserve(count); + for (size_t i = 0; i < count; i++) { + staticData->decisionToDFA.emplace_back(staticData->atn->getDecisionState(i), i); + } + xpathlexerLexerStaticData = std::move(staticData); +} + +} + XPathLexer::XPathLexer(CharStream *input) : Lexer(input) { - _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache); + XPathLexer::initialize(); + _interpreter = new atn::LexerATNSimulator(this, *xpathlexerLexerStaticData->atn, xpathlexerLexerStaticData->decisionToDFA, xpathlexerLexerStaticData->sharedContextCache); } XPathLexer::~XPathLexer() { @@ -17,44 +128,40 @@ std::string XPathLexer::getGrammarFileName() const { } const std::vector& XPathLexer::getRuleNames() const { - return _ruleNames; + return xpathlexerLexerStaticData->ruleNames; } const std::vector& XPathLexer::getChannelNames() const { - return _channelNames; + return xpathlexerLexerStaticData->channelNames; } const std::vector& XPathLexer::getModeNames() const { - return _modeNames; -} - -const std::vector& XPathLexer::getTokenNames() const { - return _tokenNames; + return xpathlexerLexerStaticData->modeNames; } -dfa::Vocabulary& XPathLexer::getVocabulary() const { - return _vocabulary; +const dfa::Vocabulary& XPathLexer::getVocabulary() const { + return xpathlexerLexerStaticData->vocabulary; } -const std::vector XPathLexer::getSerializedATN() const { - return _serializedATN; +antlr4::atn::SerializedATNView XPathLexer::getSerializedATN() const { + return xpathlexerLexerStaticData->serializedATN; } const atn::ATN& XPathLexer::getATN() const { - return _atn; + return *xpathlexerLexerStaticData->atn; } void XPathLexer::action(RuleContext *context, size_t ruleIndex, size_t actionIndex) { switch (ruleIndex) { - case 4: IDAction(dynamic_cast(context), actionIndex); break; + case 4: IDAction(antlrcpp::downCast(context), actionIndex); break; default: break; } } -void XPathLexer::IDAction(antlr4::RuleContext * /*context*/, size_t actionIndex) { +void XPathLexer::IDAction(antlr4::RuleContext *context, size_t actionIndex) { switch (actionIndex) { case 0: if (isupper(getText()[0])) @@ -70,104 +177,10 @@ void XPathLexer::IDAction(antlr4::RuleContext * /*context*/, size_t actionIndex) -// Static vars and initialization. -std::vector XPathLexer::_decisionToDFA; -atn::PredictionContextCache XPathLexer::_sharedContextCache; - -// We own the ATN which in turn owns the ATN states. -atn::ATN XPathLexer::_atn; -std::vector XPathLexer::_serializedATN; - -std::vector XPathLexer::_ruleNames = { - "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar", "NameStartChar", - "STRING" -}; - -std::vector XPathLexer::_channelNames = { - "DEFAULT_TOKEN_CHANNEL", "HIDDEN" -}; - -std::vector XPathLexer::_modeNames = { - "DEFAULT_MODE" -}; - -std::vector XPathLexer::_literalNames = { - "", "", "", "'//'", "'/'", "'*'", "'!'" -}; - -std::vector XPathLexer::_symbolicNames = { - "", "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", - "STRING" -}; - -dfa::Vocabulary XPathLexer::_vocabulary(_literalNames, _symbolicNames); - -std::vector XPathLexer::_tokenNames; - -XPathLexer::Initializer::Initializer() { - // This code could be in a static initializer lambda, but VS doesn't allow access to private class members from there. - for (size_t i = 0; i < _symbolicNames.size(); ++i) { - std::string name = _vocabulary.getLiteralName(i); - if (name.empty()) { - name = _vocabulary.getSymbolicName(i); - } - - if (name.empty()) { - _tokenNames.push_back(""); - } else { - _tokenNames.push_back(name); - } - } - - _serializedATN = { - 0x3, 0x430, 0xd6d1, 0x8206, 0xad2d, 0x4417, 0xaef1, 0x8d80, 0xaadd, - 0x2, 0xa, 0x34, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3, 0x9, 0x3, 0x4, - 0x4, 0x9, 0x4, 0x4, 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7, 0x9, - 0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x3, 0x2, 0x3, 0x2, 0x3, - 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5, 0x3, 0x5, 0x3, - 0x6, 0x3, 0x6, 0x7, 0x6, 0x1f, 0xa, 0x6, 0xc, 0x6, 0xe, 0x6, 0x22, 0xb, - 0x6, 0x3, 0x6, 0x3, 0x6, 0x3, 0x7, 0x3, 0x7, 0x5, 0x7, 0x28, 0xa, 0x7, - 0x3, 0x8, 0x3, 0x8, 0x3, 0x9, 0x3, 0x9, 0x7, 0x9, 0x2e, 0xa, 0x9, 0xc, - 0x9, 0xe, 0x9, 0x31, 0xb, 0x9, 0x3, 0x9, 0x3, 0x9, 0x3, 0x2f, 0x2, 0xa, - 0x3, 0x5, 0x5, 0x6, 0x7, 0x7, 0x9, 0x8, 0xb, 0x9, 0xd, 0x2, 0xf, 0x2, - 0x11, 0xa, 0x3, 0x2, 0x4, 0x7, 0x2, 0x32, 0x3b, 0x61, 0x61, 0xb9, 0xb9, - 0x302, 0x371, 0x2041, 0x2042, 0xf, 0x2, 0x43, 0x5c, 0x63, 0x7c, 0xc2, - 0xd8, 0xda, 0xf8, 0xfa, 0x301, 0x372, 0x37f, 0x381, 0x2001, 0x200e, - 0x200f, 0x2072, 0x2191, 0x2c02, 0x2ff1, 0x3003, 0xd801, 0xf902, 0xfdd1, - 0xfdf2, 0x1, 0x34, 0x2, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xb, 0x3, 0x2, 0x2, 0x2, 0x2, 0x11, 0x3, 0x2, 0x2, 0x2, 0x3, 0x13, - 0x3, 0x2, 0x2, 0x2, 0x5, 0x16, 0x3, 0x2, 0x2, 0x2, 0x7, 0x18, 0x3, 0x2, - 0x2, 0x2, 0x9, 0x1a, 0x3, 0x2, 0x2, 0x2, 0xb, 0x1c, 0x3, 0x2, 0x2, 0x2, - 0xd, 0x27, 0x3, 0x2, 0x2, 0x2, 0xf, 0x29, 0x3, 0x2, 0x2, 0x2, 0x11, - 0x2b, 0x3, 0x2, 0x2, 0x2, 0x13, 0x14, 0x7, 0x31, 0x2, 0x2, 0x14, 0x15, - 0x7, 0x31, 0x2, 0x2, 0x15, 0x4, 0x3, 0x2, 0x2, 0x2, 0x16, 0x17, 0x7, - 0x31, 0x2, 0x2, 0x17, 0x6, 0x3, 0x2, 0x2, 0x2, 0x18, 0x19, 0x7, 0x2c, - 0x2, 0x2, 0x19, 0x8, 0x3, 0x2, 0x2, 0x2, 0x1a, 0x1b, 0x7, 0x23, 0x2, - 0x2, 0x1b, 0xa, 0x3, 0x2, 0x2, 0x2, 0x1c, 0x20, 0x5, 0xf, 0x8, 0x2, - 0x1d, 0x1f, 0x5, 0xd, 0x7, 0x2, 0x1e, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x1f, - 0x22, 0x3, 0x2, 0x2, 0x2, 0x20, 0x1e, 0x3, 0x2, 0x2, 0x2, 0x20, 0x21, - 0x3, 0x2, 0x2, 0x2, 0x21, 0x23, 0x3, 0x2, 0x2, 0x2, 0x22, 0x20, 0x3, - 0x2, 0x2, 0x2, 0x23, 0x24, 0x8, 0x6, 0x2, 0x2, 0x24, 0xc, 0x3, 0x2, - 0x2, 0x2, 0x25, 0x28, 0x5, 0xf, 0x8, 0x2, 0x26, 0x28, 0x9, 0x2, 0x2, - 0x2, 0x27, 0x25, 0x3, 0x2, 0x2, 0x2, 0x27, 0x26, 0x3, 0x2, 0x2, 0x2, - 0x28, 0xe, 0x3, 0x2, 0x2, 0x2, 0x29, 0x2a, 0x9, 0x3, 0x2, 0x2, 0x2a, - 0x10, 0x3, 0x2, 0x2, 0x2, 0x2b, 0x2f, 0x7, 0x29, 0x2, 0x2, 0x2c, 0x2e, - 0xb, 0x2, 0x2, 0x2, 0x2d, 0x2c, 0x3, 0x2, 0x2, 0x2, 0x2e, 0x31, 0x3, - 0x2, 0x2, 0x2, 0x2f, 0x30, 0x3, 0x2, 0x2, 0x2, 0x2f, 0x2d, 0x3, 0x2, - 0x2, 0x2, 0x30, 0x32, 0x3, 0x2, 0x2, 0x2, 0x31, 0x2f, 0x3, 0x2, 0x2, - 0x2, 0x32, 0x33, 0x7, 0x29, 0x2, 0x2, 0x33, 0x12, 0x3, 0x2, 0x2, 0x2, - 0x6, 0x2, 0x20, 0x27, 0x2f, 0x3, 0x3, 0x6, 0x2, - }; - - atn::ATNDeserializer deserializer; - _atn = deserializer.deserialize(_serializedATN); - - size_t count = _atn.getNumberOfDecisions(); - _decisionToDFA.reserve(count); - for (size_t i = 0; i < count; i++) { - _decisionToDFA.emplace_back(_atn.getDecisionState(i), i); - } +void XPathLexer::initialize() { +#if ANTLR4_USE_THREAD_LOCAL_CACHE + xpathlexerLexerInitialize(); +#else + ::antlr4::internal::call_once(xpathlexerLexerOnceFlag, xpathlexerLexerInitialize); +#endif } - -XPathLexer::Initializer XPathLexer::_init; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h b/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h index ca471c9102..7dba6ad0ba 100644 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h @@ -1,6 +1,12 @@ + +// Generated from XPathLexer.g4 by ANTLR 4.13.1 + #pragma once +#include +#include +#include #include "antlr4-runtime.h" @@ -13,44 +19,38 @@ class XPathLexer : public antlr4::Lexer { ID = 7, STRING = 8 }; - XPathLexer(antlr4::CharStream *input); - ~XPathLexer(); + explicit XPathLexer(antlr4::CharStream *input); - virtual std::string getGrammarFileName() const override; - virtual const std::vector& getRuleNames() const override; + ~XPathLexer() override; - virtual const std::vector& getChannelNames() const override; - virtual const std::vector& getModeNames() const override; - virtual const std::vector& getTokenNames() const override; // deprecated, use vocabulary instead - virtual antlr4::dfa::Vocabulary& getVocabulary() const override; - virtual const std::vector getSerializedATN() const override; - virtual const antlr4::atn::ATN& getATN() const override; + std::string getGrammarFileName() const override; - virtual void action(antlr4::RuleContext *context, size_t ruleIndex, size_t actionIndex) override; -private: - static std::vector _decisionToDFA; - static antlr4::atn::PredictionContextCache _sharedContextCache; - static std::vector _ruleNames; - static std::vector _tokenNames; - static std::vector _channelNames; - static std::vector _modeNames; + const std::vector& getRuleNames() const override; + + const std::vector& getChannelNames() const override; + + const std::vector& getModeNames() const override; + + const antlr4::dfa::Vocabulary& getVocabulary() const override; - static std::vector _literalNames; - static std::vector _symbolicNames; - static antlr4::dfa::Vocabulary _vocabulary; - static antlr4::atn::ATN _atn; - static std::vector _serializedATN; + antlr4::atn::SerializedATNView getSerializedATN() const override; + const antlr4::atn::ATN& getATN() const override; + + void action(antlr4::RuleContext *context, size_t ruleIndex, size_t actionIndex) override; + + // By default the static state used to implement the lexer is lazily initialized during the first + // call to the constructor. You can call this function if you wish to initialize the static state + // ahead of time. + static void initialize(); + +private: // Individual action functions triggered by action() above. void IDAction(antlr4::RuleContext *context, size_t actionIndex); // Individual semantic predicate functions triggered by sempred() above. - struct Initializer { - Initializer(); - }; - static Initializer _init; }; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp old mode 100755 new mode 100644 index 2804c8ee3d..cdfd9c4e86 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp @@ -3,6 +3,10 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include +#include "Token.h" #include "XPathLexerErrorListener.h" using namespace antlr4; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h b/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h old mode 100755 new mode 100644 index c0c3eaaca7..3388a9505e --- a/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h @@ -5,6 +5,11 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" +#include "Token.h" #include "BaseErrorListener.h" namespace antlr4 { @@ -13,7 +18,7 @@ namespace xpath { class ANTLR4CPP_PUBLIC XPathLexerErrorListener : public BaseErrorListener { public: - virtual void syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line, + void syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr e) override; }; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp old mode 100755 new mode 100644 index 9ca910df2e..ca4dbf4f76 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "tree/ParseTree.h" #include "tree/Trees.h" diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h old mode 100755 new mode 100644 index 2ceb75ceed..0366a40ae7 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "XPathElement.h" namespace antlr4 { @@ -16,7 +19,7 @@ namespace xpath { public: XPathRuleAnywhereElement(const std::string &ruleName, int ruleIndex); - virtual std::vector evaluate(ParseTree *t) override; + std::vector evaluate(ParseTree *t) override; protected: int _ruleIndex = 0; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp old mode 100755 new mode 100644 index e446e8cbb7..a8bbe7e8a1 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp @@ -3,6 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "tree/ParseTree.h" #include "tree/Trees.h" @@ -18,7 +21,7 @@ XPathRuleElement::XPathRuleElement(const std::string &ruleName, size_t ruleIndex std::vector XPathRuleElement::evaluate(ParseTree *t) { // return all children of t that match nodeName std::vector nodes; - for (auto c : t->children) { + for (auto *c : t->children) { if (antlrcpp::is(c)) { ParserRuleContext *ctx = dynamic_cast(c); if ((ctx->getRuleIndex() == _ruleIndex && !_invert) || (ctx->getRuleIndex() != _ruleIndex && _invert)) { diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h old mode 100755 new mode 100644 index b57276f033..ad5e0e2186 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" #include "XPathElement.h" namespace antlr4 { @@ -15,7 +19,7 @@ namespace xpath { public: XPathRuleElement(const std::string &ruleName, size_t ruleIndex); - virtual std::vector evaluate(ParseTree *t) override; + std::vector evaluate(ParseTree *t) override; protected: size_t _ruleIndex = 0; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp old mode 100755 new mode 100644 index c557c9d675..891bcc69fa --- a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp @@ -3,6 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include #include "tree/ParseTree.h" #include "tree/Trees.h" diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h old mode 100755 new mode 100644 index 2045d91b32..b06e6fc580 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h @@ -5,6 +5,9 @@ #pragma once +#include +#include +#include "antlr4-common.h" #include "XPathElement.h" namespace antlr4 { @@ -17,7 +20,7 @@ namespace xpath { public: XPathTokenAnywhereElement(const std::string &tokenName, int tokenType); - virtual std::vector evaluate(ParseTree *t) override; + std::vector evaluate(ParseTree *t) override; }; } // namespace xpath diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp old mode 100755 new mode 100644 index 7d53dd5de4..2466472975 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp @@ -3,6 +3,9 @@ * can be found in the LICENSE.txt file in the project root. */ +#include +#include +#include #include "tree/ParseTree.h" #include "tree/Trees.h" #include "support/CPPUtils.h" @@ -21,7 +24,7 @@ XPathTokenElement::XPathTokenElement(const std::string &tokenName, size_t tokenT std::vector XPathTokenElement::evaluate(ParseTree *t) { // return all children of t that match nodeName std::vector nodes; - for (auto c : t->children) { + for (auto *c : t->children) { if (antlrcpp::is(c)) { TerminalNode *tnode = dynamic_cast(c); if ((tnode->getSymbol()->getType() == _tokenType && !_invert) || (tnode->getSymbol()->getType() != _tokenType && _invert)) { diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h old mode 100755 new mode 100644 index 7221530ce6..1cc61ecdfa --- a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h @@ -5,6 +5,10 @@ #pragma once +#include +#include +#include +#include "antlr4-common.h" #include "XPathElement.h" namespace antlr4 { @@ -15,7 +19,7 @@ namespace xpath { public: XPathTokenElement(const std::string &tokenName, size_t tokenType); - virtual std::vector evaluate(ParseTree *t) override; + std::vector evaluate(ParseTree *t) override; protected: size_t _tokenType = 0; diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp old mode 100755 new mode 100644 index 4ff424f056..0888dcc016 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "XPath.h" #include "tree/ParseTree.h" #include "tree/Trees.h" diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h old mode 100755 new mode 100644 index dc5d1e5a29..5873f13376 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "XPathElement.h" namespace antlr4 { @@ -15,7 +17,7 @@ namespace xpath { public: XPathWildcardAnywhereElement(); - virtual std::vector evaluate(ParseTree *t) override; + std::vector evaluate(ParseTree *t) override; }; } // namespace xpath diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp old mode 100755 new mode 100644 index aabda5a9be..d607e6a6c4 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp @@ -3,6 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ +#include #include "XPath.h" #include "tree/ParseTree.h" #include "tree/Trees.h" diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h old mode 100755 new mode 100644 index accb461de2..9e7a19506b --- a/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h @@ -5,6 +5,8 @@ #pragma once +#include +#include "antlr4-common.h" #include "XPathElement.h" namespace antlr4 { @@ -15,7 +17,7 @@ namespace xpath { public: XPathWildcardElement(); - virtual std::vector evaluate(ParseTree *t) override; + std::vector evaluate(ParseTree *t) override; }; } // namespace xpath diff --git a/runtime/Cpp/runtime/tests/Utf8Test.cpp b/runtime/Cpp/runtime/tests/Utf8Test.cpp new file mode 100644 index 0000000000..e7e415a53f --- /dev/null +++ b/runtime/Cpp/runtime/tests/Utf8Test.cpp @@ -0,0 +1,110 @@ +#include +#include + +#include "gtest/gtest.h" +#include "support/Utf8.h" + +namespace antlrcpp { +namespace { + + struct Utf8EncodeTestCase final { + char32_t code_point; + std::string_view code_units; + }; + + using Utf8EncodeTest = testing::TestWithParam; + + TEST_P(Utf8EncodeTest, Compliance) { + const Utf8EncodeTestCase& test_case = GetParam(); + std::string result; + EXPECT_EQ(Utf8::encode(&result, test_case.code_point), test_case.code_units); + } + + INSTANTIATE_TEST_SUITE_P(Utf8EncodeTest, Utf8EncodeTest, + testing::ValuesIn({ + {0x0000, std::string_view("\x00", 1)}, + {0x0001, "\x01"}, + {0x007e, "\x7e"}, + {0x007f, "\x7f"}, + {0x0080, "\xc2\x80"}, + {0x0081, "\xc2\x81"}, + {0x00bf, "\xc2\xbf"}, + {0x00c0, "\xc3\x80"}, + {0x00c1, "\xc3\x81"}, + {0x00c8, "\xc3\x88"}, + {0x00d0, "\xc3\x90"}, + {0x00e0, "\xc3\xa0"}, + {0x00f0, "\xc3\xb0"}, + {0x00f8, "\xc3\xb8"}, + {0x00ff, "\xc3\xbf"}, + {0x0100, "\xc4\x80"}, + {0x07ff, "\xdf\xbf"}, + {0x0400, "\xd0\x80"}, + {0x0800, "\xe0\xa0\x80"}, + {0x0801, "\xe0\xa0\x81"}, + {0x1000, "\xe1\x80\x80"}, + {0xd000, "\xed\x80\x80"}, + {0xd7ff, "\xed\x9f\xbf"}, + {0xe000, "\xee\x80\x80"}, + {0xfffe, "\xef\xbf\xbe"}, + {0xffff, "\xef\xbf\xbf"}, + {0x10000, "\xf0\x90\x80\x80"}, + {0x10001, "\xf0\x90\x80\x81"}, + {0x40000, "\xf1\x80\x80\x80"}, + {0x10fffe, "\xf4\x8f\xbf\xbe"}, + {0x10ffff, "\xf4\x8f\xbf\xbf"}, + {0xFFFD, "\xef\xbf\xbd"}, + })); + + struct Utf8DecodeTestCase final { + char32_t code_point; + std::string_view code_units; + }; + + using Utf8DecodeTest = testing::TestWithParam; + + TEST_P(Utf8DecodeTest, Compliance) { + const Utf8DecodeTestCase& test_case = GetParam(); + auto [code_point, code_units] = Utf8::decode(test_case.code_units); + EXPECT_EQ(code_units, test_case.code_units.size()); + EXPECT_EQ(code_point, test_case.code_point); + } + + INSTANTIATE_TEST_SUITE_P(Utf8DecodeTest, Utf8DecodeTest, + testing::ValuesIn({ + {0x0000, std::string_view("\x00", 1)}, + {0x0001, "\x01"}, + {0x007e, "\x7e"}, + {0x007f, "\x7f"}, + {0x0080, "\xc2\x80"}, + {0x0081, "\xc2\x81"}, + {0x00bf, "\xc2\xbf"}, + {0x00c0, "\xc3\x80"}, + {0x00c1, "\xc3\x81"}, + {0x00c8, "\xc3\x88"}, + {0x00d0, "\xc3\x90"}, + {0x00e0, "\xc3\xa0"}, + {0x00f0, "\xc3\xb0"}, + {0x00f8, "\xc3\xb8"}, + {0x00ff, "\xc3\xbf"}, + {0x0100, "\xc4\x80"}, + {0x07ff, "\xdf\xbf"}, + {0x0400, "\xd0\x80"}, + {0x0800, "\xe0\xa0\x80"}, + {0x0801, "\xe0\xa0\x81"}, + {0x1000, "\xe1\x80\x80"}, + {0xd000, "\xed\x80\x80"}, + {0xd7ff, "\xed\x9f\xbf"}, + {0xe000, "\xee\x80\x80"}, + {0xfffe, "\xef\xbf\xbe"}, + {0xffff, "\xef\xbf\xbf"}, + {0x10000, "\xf0\x90\x80\x80"}, + {0x10001, "\xf0\x90\x80\x81"}, + {0x40000, "\xf1\x80\x80\x80"}, + {0x10fffe, "\xf4\x8f\xbf\xbe"}, + {0x10ffff, "\xf4\x8f\xbf\xbf"}, + {0xFFFD, "\xef\xbf\xbd"}, + })); + +} +} diff --git a/runtime/Dart/.gitignore b/runtime/Dart/.gitignore new file mode 100644 index 0000000000..a45b12ec79 --- /dev/null +++ b/runtime/Dart/.gitignore @@ -0,0 +1,23 @@ +!lib + +# See https://www.dartlang.org/guides/libraries/private-files + +# Files and directories created by pub +.dart_tool/ +.packages +build/ +# If you're building an application, you may want to check-in your pubspec.lock +pubspec.lock + +# Directory created by dartdoc +# If you don't generate documentation locally you can remove this line. +doc/api/ + +# Avoid committing generated Javascript files: +*.dart.js +*.info.json # Produced by the --dump-info flag. +*.js # When generated by dart2js. Don't specify *.js if your + # project includes source files written in JavaScript. +*.js_ +*.js.deps +*.js.map \ No newline at end of file diff --git a/runtime/Dart/LICENSE b/runtime/Dart/LICENSE new file mode 100644 index 0000000000..2042d1bda6 --- /dev/null +++ b/runtime/Dart/LICENSE @@ -0,0 +1,52 @@ +[The "BSD 3-clause license"] +Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +MIT License for codepointat.js from https://git.io/codepointat +MIT License for fromcodepoint.js from https://git.io/vDW1m + +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/runtime/Dart/README.md b/runtime/Dart/README.md new file mode 100644 index 0000000000..3b2b4a78bd --- /dev/null +++ b/runtime/Dart/README.md @@ -0,0 +1,11 @@ +# Dart target for ANTLR 4 + +Dart runtime libraries for ANTLR 4 + +This runtime is available through [pub](https://pub.dev). The package name is 'antlr4'. + +See www.antlr.org for more information on ANTLR. + +See https://github.com/antlr/antlr4/blob/master/doc/dart-target.md for more information on using ANTLR in Dart. + + diff --git a/runtime/Dart/analysis_options.yaml b/runtime/Dart/analysis_options.yaml new file mode 100644 index 0000000000..108d1058ac --- /dev/null +++ b/runtime/Dart/analysis_options.yaml @@ -0,0 +1 @@ +include: package:pedantic/analysis_options.yaml diff --git a/runtime/Dart/benchmark/src/util/bit_set.dart b/runtime/Dart/benchmark/src/util/bit_set.dart new file mode 100644 index 0000000000..88921f67ba --- /dev/null +++ b/runtime/Dart/benchmark/src/util/bit_set.dart @@ -0,0 +1,39 @@ +import 'package:antlr4/src/util/bit_set.dart'; +import 'package:benchmark_harness/benchmark_harness.dart'; + +class TemplateBenchmark extends BenchmarkBase { + BitSet bs = BitSet(); + + TemplateBenchmark() : super('BitSet common operations'); + + static void main() { + TemplateBenchmark().report(); + } + + // The benchmark code. + @override + void run() { + bs.set(32); + bs.set(59); + bs.set(256); + bs.get(128); + bs.get(256); + bs.nextset(0); + bs.nextset(60); + bs.cardinality; + bs.hashCode; + } + + // Not measured setup code executed prior to the benchmark runs. + @override + void setup() { } + + // Not measured teardown code executed after the benchmark runs. + @override + void teardown() { } +} + +void main() { + // Run TemplateBenchmark + TemplateBenchmark.main(); +} diff --git a/runtime/Dart/lib/antlr4.dart b/runtime/Dart/lib/antlr4.dart new file mode 100644 index 0000000000..60ae882be4 --- /dev/null +++ b/runtime/Dart/lib/antlr4.dart @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +library antlr4; + +export 'src/atn/atn.dart'; +export 'src/dfa/dfa.dart'; +export 'src/error/error.dart'; +export 'src/misc/misc.dart'; +export 'src/tree/tree.dart'; + +export 'src/input_stream.dart'; +export 'src/interval_set.dart'; +export 'src/lexer.dart'; +export 'src/parser.dart'; +export 'src/parser_rule_context.dart'; +export 'src/prediction_context.dart'; +export 'src/recognizer.dart'; +export 'src/rule_context.dart'; +export 'src/runtime_meta_data.dart'; +export 'src/token.dart'; +export 'src/token_factory.dart'; +export 'src/token_source.dart'; +export 'src/token_stream.dart'; +export 'src/vocabulary.dart'; + +import 'src/util/platform_stub.dart' + if (dart.library.io) 'src/util/platform_io.dart'; + +/// Hack to workaround not being able to access stdout in tests. +void TEST_platformStdoutWrite(Object? object) => stdoutWrite(object); diff --git a/runtime/Dart/lib/src/atn/atn.dart b/runtime/Dart/lib/src/atn/atn.dart new file mode 100644 index 0000000000..3e7af84763 --- /dev/null +++ b/runtime/Dart/lib/src/atn/atn.dart @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/atn.dart'; +export 'src/atn_config.dart'; +export 'src/atn_config_set.dart'; +export 'src/atn_deserializer.dart'; +export 'src/atn_simulator.dart'; +export 'src/atn_state.dart'; +export 'src/info.dart'; +export 'src/lexer_action_executor.dart'; +export 'src/lexer_atn_simulator.dart'; +export 'src/parser_atn_simulator.dart'; +export 'src/profiling_atn_simulator.dart'; +export 'src/semantic_context.dart'; +export 'src/transition.dart'; diff --git a/runtime/Dart/lib/src/atn/src/atn.dart b/runtime/Dart/lib/src/atn/src/atn.dart new file mode 100644 index 0000000000..b3a9682fcb --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn.dart @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../interval_set.dart'; +import '../../ll1_analyzer.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import 'atn_state.dart'; +import 'atn_type.dart'; +import 'lexer_action.dart'; +import 'transition.dart'; + +class ATN { + static final INVALID_ALT_NUMBER = 0; + + List states = []; + + /// Each subrule/rule is a decision point and we must track them so we + /// can go back later and build DFA predictors for them. This includes + /// all the rules, subrules, optional blocks, ()+, ()* etc... + List decisionToState = []; + + /// Maps from rule index to starting state number. + List ruleToStartState = []; + + /// Maps from rule index to stop state number. + late List ruleToStopState; + + Map modeNameToStartState = {}; + + /// The type of the ATN. + final ATNType grammarType; + + /// The maximum value for any symbol recognized by a transition in the ATN. + final int maxTokenType; + + /// For lexer ATNs, this maps the rule index to the resulting token type. + /// For parser ATNs, this maps the rule index to the generated bypass token + /// type if the + /// {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions} + /// deserialization option was specified; otherwise, this is null. + late List ruleToTokenType; + + /// For lexer ATNs, this is an array of [LexerAction] objects which may + /// be referenced by action transitions in the ATN. + List? lexerActions; + + List modeToStartState = []; + + /// Used for runtime deserialization of ATNs from strings */ + ATN(this.grammarType, this.maxTokenType); + + /// TODO merge doc comment + /// Compute the set of valid tokens that can occur starting in state [s]. + /// If [ctx] is null, the set of tokens will not include what can follow + /// the rule surrounding [s]. In other words, the set will be + /// restricted to tokens reachable staying within [s]'s rule. + /// + /// Compute the set of valid tokens that can occur starting in [s] and + /// staying in same rule. {@link Token#EPSILON} is in set if we reach end of + /// rule. + IntervalSet nextTokens(ATNState s, [RuleContext? ctx]) { + if (ctx != null) { + return LL1Analyzer(this).LOOK(s, ctx); + } + if (s.nextTokenWithinRule != null) return s.nextTokenWithinRule!; + s.nextTokenWithinRule = LL1Analyzer(this).LOOK(s, null); + s.nextTokenWithinRule!.setReadonly(true); + return s.nextTokenWithinRule!; + } + + void addState(ATNState? state) { + if (state != null) { + state.atn = this; + state.stateNumber = states.length; + } + + states.add(state); + } + + void removeState(ATNState state) { + states[state.stateNumber] = + null; // just free mem, don't shift states in list + } + + int defineDecisionState(DecisionState s) { + decisionToState.add(s); + s.decision = decisionToState.length - 1; + return s.decision; + } + + DecisionState? getDecisionState(int? decision) { + if (decisionToState.isNotEmpty || decision != null) { + return decisionToState[decision!]; + } + return null; + } + + int get numberOfDecisions { + return decisionToState.length; + } + + /// Computes the set of input symbols which could follow ATN state number + /// [stateNumber] in the specified full [context]. This method + /// considers the complete parser context, but does not evaluate semantic + /// predicates (i.e. all predicates encountered during the calculation are + /// assumed true). If a path in the ATN exists from the starting state to the + /// [RuleStopState] of the outermost context without matching any + /// symbols, {@link Token#EOF} is added to the returned set. + /// + ///

    If [context] is null, it is treated as {@link ParserRuleContext#EMPTY}.

    + /// + /// Note that this does NOT give you the set of all tokens that could + /// appear at a given token position in the input phrase. In other words, + /// it does not answer: + /// + /// "Given a specific partial input phrase, return the set of all tokens + /// that can follow the last token in the input phrase." + /// + /// The big difference is that with just the input, the parser could + /// land right in the middle of a lookahead decision. Getting + /// all *possible* tokens given a partial input stream is a separate + /// computation. See https://github.com/antlr/antlr4/issues/1428 + /// + /// For this function, we are specifying an ATN state and call stack to compute + /// what token(s) can come next and specifically: outside of a lookahead decision. + /// That is what you want for error reporting and recovery upon parse error. + /// + /// @param stateNumber the ATN state number + /// @param context the full parse context + /// @return The set of potentially valid input symbols which could follow the + /// specified state in the specified context. + /// @throws IllegalArgumentException if the ATN does not contain a state with + /// number [stateNumber] + IntervalSet getExpectedTokens(int stateNumber, RuleContext? context) { + if (stateNumber < 0 || stateNumber >= states.length) { + throw RangeError.index(stateNumber, states, 'stateNumber'); + } + + var ctx = context; + final s = states[stateNumber]!; + var following = nextTokens(s); + if (!following.contains(Token.EPSILON)) { + return following; + } + + final expected = IntervalSet(); + expected.addAll(following); + expected.remove(Token.EPSILON); + while (ctx != null && + ctx.invokingState >= 0 && + following.contains(Token.EPSILON)) { + final invokingState = states[ctx.invokingState]!; + + final rt = invokingState.transition(0) as RuleTransition; + following = nextTokens(rt.followState); + expected.addAll(following); + expected.remove(Token.EPSILON); + ctx = ctx.parent; + } + + if (following.contains(Token.EPSILON)) { + expected.addOne(Token.EOF); + } + + return expected; + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_config.dart b/runtime/Dart/lib/src/atn/src/atn_config.dart new file mode 100644 index 0000000000..82acda6aa1 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_config.dart @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../prediction_context.dart'; +import '../../util/murmur_hash.dart'; +import 'atn_state.dart'; +import 'lexer_action_executor.dart'; +import 'semantic_context.dart'; + +Map checkParams(params, isCfg) { + if (params == null) { + final result = { + 'state': null, + 'alt': null, + 'context': null, + 'semanticContext': null + }; + if (isCfg) { + result['reachesIntoOuterContext'] = 0; + } + return result; + } else { + final props = {}; + props['state'] = params.state; + props['alt'] = (params.alt == null) ? null : params.alt; + props['context'] = params.context; + props['semanticContext'] = params.semanticContext; + if (isCfg) { + props['reachesIntoOuterContext'] = params.reachesIntoOuterContext ?? 0; + props['precedenceFilterSuppressed'] = + params.precedenceFilterSuppressed ?? false; + } + return props; + } +} + +/// A tuple: (ATN state, predicted alt, syntactic, semantic context). +/// The syntactic context is a graph-structured stack node whose +/// path(s) to the root is the rule invocation(s) +/// chain used to arrive at the state. The semantic context is +/// the tree of semantic predicates encountered before reaching +/// an ATN state. +class ATNConfig { + /// This field stores the bit mask for implementing the + /// {@link #isPrecedenceFilterSuppressed} property as a bit within the + /// existing {@link #reachesIntoOuterContext} field. + static final int SUPPRESS_PRECEDENCE_FILTER = 0x40000000; + + /// The ATN state associated with this configuration */ + ATNState state; + + /// What alt (or lexer rule) is predicted by this configuration */ + int alt; + + /// The stack of invoking states leading to the rule/states associated + /// with this config. We track only those contexts pushed during + /// execution of the ATN simulator. + PredictionContext? context; + + /// We cannot execute predicates dependent upon local context unless + /// we know for sure we are in the correct context. Because there is + /// no way to do this efficiently, we simply cannot evaluate + /// dependent predicates unless we are in the rule that initially + /// invokes the ATN simulator. + /// + ///

    + /// closure() tracks the depth of how far we dip into the outer context: + /// depth > 0. Note that it may not be totally accurate depth since I + /// don't ever decrement. TODO: make it a bool then

    + /// + ///

    + /// For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method + /// is also backed by this field. Since the field is ly accessible, the + /// highest bit which would not cause the value to become negative is used to + /// store this field. This choice minimizes the risk that code which only + /// compares this value to 0 would be affected by the new purpose of the + /// flag. It also ensures the performance of the existing [ATNConfig] + /// constructors as well as certain operations like + /// {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are + /// completely unaffected by the change.

    + int reachesIntoOuterContext; + + SemanticContext semanticContext; + + ATNConfig( + this.state, + this.alt, + this.context, [ + this.semanticContext = EmptySemanticContext.Instance, + ]) : reachesIntoOuterContext = 0; + + ATNConfig.dup( + ATNConfig c, { + ATNState? state, + int? alt, + PredictionContext? context, + SemanticContext? semanticContext, + }) : state = state ?? c.state, + alt = alt ?? c.alt, + context = context ?? c.context, + semanticContext = semanticContext ?? c.semanticContext, + reachesIntoOuterContext = c.reachesIntoOuterContext; + + /// This method gets the value of the {@link #reachesIntoOuterContext} field + /// as it existed prior to the introduction of the + /// {@link #isPrecedenceFilterSuppressed} method. + int get outerContextDepth { + return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER; + } + + bool isPrecedenceFilterSuppressed() { + return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0; + } + + void setPrecedenceFilterSuppressed(bool value) { + if (value) { + reachesIntoOuterContext |= 0x40000000; + } else { + reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER; + } + } + + /// An ATN configuration is equal to another if both have + /// the same state, they predict the same alternative, and + /// syntactic/semantic contexts are the same. + @override + bool operator ==(Object other) { + if (other is ATNConfig) { + return state.stateNumber == other.state.stateNumber && + alt == other.alt && + (context == other.context || + (context != null && context == other.context)) && + semanticContext == other.semanticContext && + isPrecedenceFilterSuppressed() == + other.isPrecedenceFilterSuppressed(); + } + return false; + } + + @override + int get hashCode { + var hashCode = MurmurHash.initialize(7); + hashCode = MurmurHash.update(hashCode, state.stateNumber); + hashCode = MurmurHash.update(hashCode, alt); + hashCode = MurmurHash.update(hashCode, context); + hashCode = MurmurHash.update(hashCode, semanticContext); + hashCode = MurmurHash.finish(hashCode, 4); + return hashCode; + } + + @override + String toString([_, bool showAlt = true]) { + final buf = StringBuffer(); + // if ( state.ruleIndex>=0 ) { + // if ( recog!=null ) buf.write(recog.ruleNames[state.ruleIndex]+":"); + // else buf.write(state.ruleIndex+":"); + // } + buf.write('('); + buf.write(state); + if (showAlt) { + buf.write(','); + buf.write(alt); + } + if (context != null) { + buf.write(',['); + buf.write(context.toString()); + buf.write(']'); + } + if (semanticContext != EmptySemanticContext.Instance) { + buf.write(','); + buf.write(semanticContext); + } + if (outerContextDepth > 0) { + buf.write(',up='); + buf.write(outerContextDepth); + } + buf.write(')'); + return buf.toString(); + } +} + +class LexerATNConfig extends ATNConfig { + /// Gets the [LexerActionExecutor] capable of executing the embedded + /// action(s) for the current configuration. + LexerActionExecutor? lexerActionExecutor; + + bool passedThroughNonGreedyDecision = false; + + LexerATNConfig( + ATNState state, + int alt, + PredictionContext context, [ + this.lexerActionExecutor, + ]) : super(state, alt, context, EmptySemanticContext.Instance) { + passedThroughNonGreedyDecision = false; + } + + LexerATNConfig.dup( + LexerATNConfig c, + ATNState state, { + this.lexerActionExecutor, + PredictionContext? context, + }) : super.dup(c, state: state, context: context) { + lexerActionExecutor = lexerActionExecutor ?? c.lexerActionExecutor; + passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state); + } + + bool hasPassedThroughNonGreedyDecision() { + return passedThroughNonGreedyDecision; + } + + @override + int get hashCode { + var hashCode = MurmurHash.initialize(7); + hashCode = MurmurHash.update(hashCode, state.stateNumber); + hashCode = MurmurHash.update(hashCode, alt); + hashCode = MurmurHash.update(hashCode, context); + hashCode = MurmurHash.update(hashCode, semanticContext); + hashCode = + MurmurHash.update(hashCode, passedThroughNonGreedyDecision ? 1 : 0); + hashCode = MurmurHash.update(hashCode, lexerActionExecutor); + hashCode = MurmurHash.finish(hashCode, 6); + return hashCode; + } + + @override + bool operator ==(Object other) { + if (identical(this, other)) { + return true; + } else if (other is LexerATNConfig) { + final lexerOther = other; + if (passedThroughNonGreedyDecision != + lexerOther.passedThroughNonGreedyDecision) { + return false; + } + + if (lexerActionExecutor != lexerOther.lexerActionExecutor) { + return false; + } + + return super == other; + } + return false; + } + + static bool checkNonGreedyDecision(LexerATNConfig source, ATNState target) { + return source.passedThroughNonGreedyDecision || + target is DecisionState && target.nonGreedy; + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_config_set.dart b/runtime/Dart/lib/src/atn/src/atn_config_set.dart new file mode 100644 index 0000000000..0c6de03e0c --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_config_set.dart @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:collection'; +import 'dart:math'; + +import 'package:collection/collection.dart'; + +import '../../misc/misc.dart'; +import '../../prediction_context.dart'; +import '../../util/bit_set.dart'; +import '../../util/utils.dart'; +import 'atn.dart'; +import 'atn_config.dart'; +import 'atn_state.dart'; +import 'semantic_context.dart'; + +final defaultConfigLookup = () => HashSet(equals: (a, b) { + return a.state.stateNumber == b.state.stateNumber && + a.alt == b.alt && + a.semanticContext == b.semanticContext; + }, hashCode: (ATNConfig o) { + var hashCode = 7; + hashCode = 31 * hashCode + o.state.stateNumber; + hashCode = 31 * hashCode + o.alt; + hashCode = 31 * hashCode + o.semanticContext.hashCode; + return hashCode; + }); + +class ATNConfigSet extends Iterable { + /// Indicates that the set of configurations is read-only. Do not + /// allow any code to manipulate the set; DFA states will point at + /// the sets and they must not change. This does not protect the other + /// fields; in particular, conflictingAlts is set after + /// we've made this readonly. + bool _readOnly = false; + + bool get readOnly => _readOnly; + + set readOnly(bool readOnly) { + _readOnly = readOnly; + if (readOnly) { + configLookup = null; // can't mod, no need for lookup cache + } else { + configLookup = defaultConfigLookup(); + } + } + + /// The reason that we need this is because we don't want the hash map to use + /// the standard hash code and equals. We need all configurations with the same + /// {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles + /// the number of objects associated with ATNConfigs. The other solution is to + /// use a hash table that lets us specify the equals/hashcode operation. + /// + /// All configs but hashed by (s, i, _, pi) not including context. Wiped out + /// when we go readonly as this set becomes a DFA state. + Set? configLookup = defaultConfigLookup(); + + /// Track the elements as they are added to the set; supports get(i) */ + final List configs = []; + + // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation + // TODO: can we track conflicts as they are added to save scanning configs later? + int uniqueAlt = 0; + + /// Currently this is only used when we detect SLL conflict; this does + /// not necessarily represent the ambiguous alternatives. In fact, + /// I should also point out that this seems to include predicated alternatives + /// that have predicates that evaluate to false. Computed in computeTargetState(). + BitSet? conflictingAlts; + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this. + bool hasSemanticContext = false; + bool dipsIntoOuterContext = false; + + /// Indicates that this configuration set is part of a full context + /// LL prediction. It will be used to determine how to merge $. With SLL + /// it's a wildcard whereas it is not for LL context merge. + bool fullCtx; + + int cachedHashCode = -1; + + ATNConfigSet([this.fullCtx = true]); + + ATNConfigSet.dup(ATNConfigSet old) + : fullCtx = old.fullCtx, + uniqueAlt = old.uniqueAlt, + conflictingAlts = old.conflictingAlts, + hasSemanticContext = old.hasSemanticContext, + dipsIntoOuterContext = old.dipsIntoOuterContext { + addAll(old); + } + + /// Adding a new config means merging contexts with existing configs for + /// {@code (s, i, pi, _)}, where [s] is the + /// {@link ATNConfig#state}, [i] is the {@link ATNConfig#alt}, and + /// [pi] is the {@link ATNConfig#semanticContext}. We use + /// {@code (s,i,pi)} as key. + /// + ///

    This method updates {@link #dipsIntoOuterContext} and + /// {@link #hasSemanticContext} when necessary.

    + bool add( + ATNConfig config, [ + Map, PredictionContext>? + mergeCache, + ]) { + if (readOnly) throw StateError('This set is readonly'); + if (config.semanticContext != EmptySemanticContext.Instance) { + hasSemanticContext = true; + } + if (config.outerContextDepth > 0) { + dipsIntoOuterContext = true; + } + final existing = configLookup!.lookup(config) ?? config; + if (identical(existing, config)) { + // we added this new one + cachedHashCode = -1; + configLookup!.add(config); + configs.add(config); // track order here + return true; + } + // a previous (s,i,pi,_), merge with it and save result + final rootIsWildcard = !fullCtx; + final merged = PredictionContext.merge( + existing.context!, + config.context!, + rootIsWildcard, + mergeCache, + ); + // no need to check for existing.context, config.context in cache + // since only way to create new graphs is "call rule" and here. We + // cache at both places. + existing.reachesIntoOuterContext = + max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext); + + // make sure to preserve the precedence filter suppression during the merge + if (config.isPrecedenceFilterSuppressed()) { + existing.setPrecedenceFilterSuppressed(true); + } + + existing.context = merged; // replace context; no need to alt mapping + return true; + } + + /// Return a List holding list of configs */ + List get elements { + return configs; + } + + Set get states { + final states = {}; + for (var i = 0; i < configs.length; i++) { + states.add(configs[i].state); + } + return states; + } + + /// Gets the complete set of represented alternatives for the configuration + /// set. + /// + /// @return the set of represented alternatives in this configuration set + /// + /// @since 4.3 + BitSet get alts { + final alts = BitSet(); + for (var config in configs) { + alts.set(config.alt); + } + return alts; + } + + List get predicates { + final preds = []; + for (var c in configs) { + if (c.semanticContext != EmptySemanticContext.Instance) { + preds.add(c.semanticContext); + } + } + return preds; + } + + ATNConfig get(int i) { + return configs[i]; + } + + void optimizeConfigs(interpreter) { + if (readOnly) throw StateError('This set is readonly'); + + if (configLookup!.isEmpty) return; + + for (var config in configs) { +// int before = PredictionContext.getAllContextNodes(config.context).length; + config.context = interpreter!.getCachedContext(config.context); +// int after = PredictionContext.getAllContextNodes(config.context).length; +// System.out.println("configs "+before+"->"+after); + } + } + + bool addAll(coll) { + for (ATNConfig c in coll) { + add(c); + } + return false; + } + + @override + bool operator ==(other) { + return identical(this, other) || + (other is ATNConfigSet && + ListEquality().equals(configs, other.configs) && + fullCtx == other.fullCtx && + uniqueAlt == other.uniqueAlt && + conflictingAlts == other.conflictingAlts && + hasSemanticContext == other.hasSemanticContext && + dipsIntoOuterContext == other.dipsIntoOuterContext); + } + + @override + int get hashCode { + if (readOnly) { + if (cachedHashCode == -1) { + cachedHashCode = ListEquality().hash(configs); + } + + return cachedHashCode; + } + + return ListEquality().hash(configs); + } + + @override + int get length { + return configs.length; + } + + @override + bool get isEmpty => configs.isEmpty; + + void updateHashCode(hash) { + if (readOnly) { + if (cachedHashCode == -1) { + cachedHashCode = hashCode; + } + hash.update(cachedHashCode); + } else { + hash.update(hashCode); + } + } + + @override + bool contains(Object? o) { + if (configLookup == null) { + throw UnsupportedError( + 'This method is not implemented for readonly sets.'); + } + + return configLookup!.contains(o); + } + + @override + Iterator get iterator => configs.iterator; + + void clear() { + if (readOnly) throw StateError('This set is readonly'); + configs.clear(); + cachedHashCode = -1; + configLookup!.clear(); + } + + @override + String toString() { + final buf = StringBuffer(); + buf.write(arrayToString(elements)); + if (hasSemanticContext) { + buf.write(',hasSemanticContext=$hasSemanticContext'); + } + if (uniqueAlt != ATN.INVALID_ALT_NUMBER) buf.write(',uniqueAlt=$uniqueAlt'); + if (conflictingAlts != null) buf.write(',conflictingAlts=$conflictingAlts'); + if (dipsIntoOuterContext) buf.write(',dipsIntoOuterContext'); + return buf.toString(); + } +} + +class OrderedATNConfigSet extends ATNConfigSet { + @override + final configLookup = {}; +} diff --git a/runtime/Dart/lib/src/atn/src/atn_deserializer.dart b/runtime/Dart/lib/src/atn/src/atn_deserializer.dart new file mode 100644 index 0000000000..67de5abd2c --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_deserializer.dart @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../interval_set.dart'; +import '../../misc/misc.dart'; +import '../../token.dart'; +import 'atn.dart'; +import 'atn_state.dart'; +import 'atn_type.dart'; +import 'lexer_action.dart'; +import 'transition.dart'; + +class ATNDeserializationOptions { + static late final ATNDeserializationOptions defaultOptions = + ATNDeserializationOptions(true); + + bool readOnly; + late bool verifyATN; + late bool generateRuleBypassTransitions; + + ATNDeserializationOptions(this.readOnly, + [ATNDeserializationOptions? options]) { + if (options == null) { + verifyATN = true; + generateRuleBypassTransitions = false; + } else { + verifyATN = options.verifyATN; + generateRuleBypassTransitions = options.generateRuleBypassTransitions; + } + } + + bool isReadOnly() { + return readOnly; + } + + void makeReadOnly() { + readOnly = true; + } + + bool isVerifyATN() { + return verifyATN; + } + + void setVerifyATN(bool verifyATN) { + throwIfReadOnly(); + this.verifyATN = verifyATN; + } + + bool isGenerateRuleBypassTransitions() { + return generateRuleBypassTransitions; + } + + void setGenerateRuleBypassTransitions(bool generateRuleBypassTransitions) { + throwIfReadOnly(); + this.generateRuleBypassTransitions = generateRuleBypassTransitions; + } + + void throwIfReadOnly() { + if (isReadOnly()) { + throw StateError('The object is read only.'); + } + } +} + +class ATNDeserializer { + static final SERIALIZED_VERSION = 4; + + late final ATNDeserializationOptions deserializationOptions; + late List data; + int pos = 0; + + ATNDeserializer([ATNDeserializationOptions? options]) { + deserializationOptions = + options ?? ATNDeserializationOptions.defaultOptions; + } + + ATN deserialize(List data) { + this.data = data; + this.pos = 0; + checkVersion(); + final atn = readATN(); + readStates(atn); + readRules(atn); + readModes(atn); + final sets = []; + readSets(atn, sets); + readEdges(atn, sets); + readDecisions(atn); + readLexerActions(atn); + markPrecedenceDecisions(atn); + verifyATN(atn); + if (deserializationOptions.generateRuleBypassTransitions && + atn.grammarType == ATNType.PARSER) { + generateRuleBypassTransitions(atn); + // re-verify after modification + verifyATN(atn); + } + return atn; + } + + void checkVersion() { + final version = readInt(); + if (version != SERIALIZED_VERSION) { + throw ('Could not deserialize ATN with version $version (expected $SERIALIZED_VERSION).'); + } + } + + ATN readATN() { + final grammarType = readInt(); + final maxTokenType = readInt(); + return ATN(ATNType.values[grammarType], maxTokenType); + } + + void readStates(ATN atn) { + final loopBackStateNumbers = >[]; + final endStateNumbers = >[]; + final nstates = readInt(); + for (var i = 0; i < nstates; i++) { + final stype = StateType.values[readInt()]; + // ignore bad type of states + if (stype == StateType.INVALID_TYPE) { + atn.addState(null); + continue; + } + + var ruleIndex = readInt(); + + final s = stateFactory(stype, ruleIndex); + if (s is LoopEndState) { + // special case + final loopBackStateNumber = readInt(); + loopBackStateNumbers.add(Pair(s, loopBackStateNumber)); + } else if (s is BlockStartState) { + final endStateNumber = readInt(); + endStateNumbers.add(Pair(s, endStateNumber)); + } + atn.addState(s); + } + + // delay the assignment of loop back and end states until we know all the state instances have been initialized + for (final pair in loopBackStateNumbers) { + pair.a.loopBackState = atn.states[pair.b]; + } + + for (final pair in endStateNumbers) { + pair.a.endState = atn.states[pair.b] as BlockEndState; + } + + final numNonGreedyStates = readInt(); + for (var i = 0; i < numNonGreedyStates; i++) { + final stateNumber = readInt(); + (atn.states[stateNumber] as DecisionState).nonGreedy = true; + } + + final numPrecedenceStates = readInt(); + for (var i = 0; i < numPrecedenceStates; i++) { + final stateNumber = readInt(); + (atn.states[stateNumber] as RuleStartState).isLeftRecursiveRule = true; + } + } + + void readRules(ATN atn) { + final nrules = readInt(); + if (atn.grammarType == ATNType.LEXER) { + atn.ruleToTokenType = []; + } + + for (var i = 0; i < nrules; i++) { + final s = readInt(); + final startState = atn.states[s] as RuleStartState; + atn.ruleToStartState.add(startState); + if (atn.grammarType == ATNType.LEXER) { + var tokenType = readInt(); + + atn.ruleToTokenType.add(tokenType); + } + } + + atn.ruleToStopState = List.generate( + nrules, (int index) => RuleStopState(index)); + + for (var state in atn.states) { + if (state is! RuleStopState) { + continue; + } + atn.ruleToStopState[state.ruleIndex] = state; + atn.ruleToStartState[state.ruleIndex].stopState = state; + } + } + + void readModes(ATN atn) { + final nmodes = readInt(); + for (var i = 0; i < nmodes; i++) { + final s = readInt(); + atn.modeToStartState.add(atn.states[s] as TokensStartState); + } + } + + void readSets(ATN atn, List sets) { + final nsets = readInt(); + for (var i = 0; i < nsets; i++) { + final nintervals = readInt(); + final set = IntervalSet(); + sets.add(set); + + final containsEof = readInt() != 0; + if (containsEof) { + set.addOne(-1); + } + + for (var j = 0; j < nintervals; j++) { + int a = readInt(); + int b = readInt(); + set.addRange(a, b); + } + } + } + + void readEdges(ATN atn, sets) { + final nedges = readInt(); + for (var i = 0; i < nedges; i++) { + final src = readInt(); + final trg = readInt(); + final ttype = TransitionType.values[readInt()]; + final arg1 = readInt(); + final arg2 = readInt(); + final arg3 = readInt(); + final trans = edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets); +// System.out.println("EDGE "+trans.getClass().getSimpleName()+" "+ +// src+"->"+trg+ +// " "+Transition.serializationNames[ttype]+ +// " "+arg1+","+arg2+","+arg3); + final srcState = atn.states[src]!; + srcState.addTransition(trans); + } + + // edges for rule stop states can be derived, so they aren't serialized + for (var state in atn.states) { + if (state == null) { + continue; + } + for (var i = 0; i < state.numberOfTransitions; i++) { + final t = state.transition(i); + if (t is RuleTransition) { + final ruleTransition = t; + var outermostPrecedenceReturn = -1; + if (atn.ruleToStartState[ruleTransition.target.ruleIndex] + .isLeftRecursiveRule) { + if (ruleTransition.precedence == 0) { + outermostPrecedenceReturn = ruleTransition.target.ruleIndex; + } + } + + final returnTransition = EpsilonTransition( + ruleTransition.followState, outermostPrecedenceReturn); + atn.ruleToStopState[ruleTransition.target.ruleIndex] + .addTransition(returnTransition); + } + } + } + + for (var state in atn.states) { + if (state is BlockStartState) { + // we need to know the end state to set its start state + if (state.endState == null) { + throw StateError(''); + } + + // block end states can only be associated to a single block start state + if (state.endState!.startState != null) { + throw StateError(''); + } + + state.endState!.startState = state; + } + + if (state is PlusLoopbackState) { + final loopbackState = state; + for (var i = 0; i < loopbackState.numberOfTransitions; i++) { + final target = loopbackState.transition(i).target; + if (target is PlusBlockStartState) { + target.loopBackState = loopbackState; + } + } + } else if (state is StarLoopbackState) { + final loopbackState = state; + for (var i = 0; i < loopbackState.numberOfTransitions; i++) { + final target = loopbackState.transition(i).target; + if (target is StarLoopEntryState) { + target.loopBackState = loopbackState; + } + } + } + } + } + + void readDecisions(ATN atn) { + final ndecisions = readInt(); + for (var i = 1; i <= ndecisions; i++) { + final s = readInt(); + final decState = atn.states[s] as DecisionState; + atn.decisionToState.add(decState); + decState.decision = i - 1; + } + } + + void readLexerActions(ATN atn) { + if (atn.grammarType == ATNType.LEXER) { + atn.lexerActions = List.generate(readInt(), (index) { + final actionType = LexerActionType.values[readInt()]; + var data1 = readInt(); + var data2 = readInt(); + final lexerAction = lexerActionFactory(actionType, data1, data2); + + return lexerAction; + }); + } + } + + void generateRuleBypassTransitions(ATN atn) { + final length = atn.ruleToStartState.length; + atn.ruleToTokenType = + List.generate(length, (index) => atn.maxTokenType + index + 1); + + for (var i = 0; i < atn.ruleToStartState.length; i++) { + generateRuleBypassTransition(atn, i); + } + } + + void generateRuleBypassTransition(ATN atn, int idx) { + final bypassStart = BasicBlockStartState(idx); + atn.addState(bypassStart); + + final bypassStop = BlockEndState(idx); + atn.addState(bypassStop); + + bypassStart.endState = bypassStop; + atn.defineDecisionState(bypassStart); + + bypassStop.startState = bypassStart; + + ATNState? endState; + Transition? excludeTransition; + if (atn.ruleToStartState[idx].isLeftRecursiveRule) { + // wrap from the beginning of the rule to the StarLoopEntryState + endState = null; + for (var state in atn.states) { + if (state == null) { + continue; + } + if (state.ruleIndex != idx) { + continue; + } + + if (state is! StarLoopEntryState) { + continue; + } + + final maybeLoopEndState = + state.transition(state.numberOfTransitions - 1).target; + if (maybeLoopEndState is! LoopEndState) { + continue; + } + + if (maybeLoopEndState.epsilonOnlyTransitions && + maybeLoopEndState.transition(0).target is RuleStopState) { + endState = state; + break; + } + } + + if (endState == null) { + throw UnsupportedError( + "Couldn't identify final state of the precedence rule prefix section.", + ); + } + + excludeTransition = + (endState as StarLoopEntryState).loopBackState!.transition(0); + } else { + endState = atn.ruleToStopState[idx]; + } + + // all non-excluded transitions that currently target end state need to target blockEnd instead + for (var state in atn.states) { + if (state == null) { + continue; + } + for (var transition in state.transitions) { + if (transition == excludeTransition) { + continue; + } + + if (transition.target == endState) { + transition.target = bypassStop; + } + } + } + + // all transitions leaving the rule start state need to leave blockStart instead + while (atn.ruleToStartState[idx].numberOfTransitions > 0) { + final transition = atn.ruleToStartState[idx] + .removeTransition(atn.ruleToStartState[idx].numberOfTransitions - 1); + bypassStart.addTransition(transition); + } + + // link the new states + atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart)); + bypassStop.addTransition(EpsilonTransition(endState)); + + ATNState matchState = BasicState(idx); + atn.addState(matchState); + + matchState.addTransition(AtomTransition( + bypassStop, + atn.ruleToTokenType[idx], + )); + bypassStart.addTransition(EpsilonTransition(matchState)); + } + + /// Analyze the [StarLoopEntryState] states in the specified ATN to set + /// the {@link StarLoopEntryState#isPrecedenceDecision} field to the + /// correct value. + /// + /// @param atn The ATN. + void markPrecedenceDecisions(ATN atn) { + for (var state in atn.states) { + if (state is StarLoopEntryState) { + /* We analyze the ATN to determine if this ATN decision state is the + * decision for the closure block that determines whether a + * precedence rule should continue or complete. + */ + if (atn.ruleToStartState[state.ruleIndex].isLeftRecursiveRule) { + final maybeLoopEndState = + state.transition(state.numberOfTransitions - 1).target; + if (maybeLoopEndState is LoopEndState) { + if (maybeLoopEndState.epsilonOnlyTransitions && + maybeLoopEndState.transition(0).target is RuleStopState) { + state.isPrecedenceDecision = true; + } + } + } + } + } + } + + void verifyATN(ATN atn) { + // verify assumptions + for (var state in atn.states) { + if (state == null) { + continue; + } + + checkCondition( + state.onlyHasEpsilonTransitions() || state.numberOfTransitions <= 1); + + if (state is PlusBlockStartState) { + checkCondition(state.loopBackState != null); + } + + if (state is StarLoopEntryState) { + final starLoopEntryState = state; + checkCondition(starLoopEntryState.loopBackState != null); + checkCondition(starLoopEntryState.numberOfTransitions == 2); + + if (starLoopEntryState.transition(0).target is StarBlockStartState) { + checkCondition( + starLoopEntryState.transition(1).target is LoopEndState); + checkCondition(!starLoopEntryState.nonGreedy); + } else if (starLoopEntryState.transition(0).target is LoopEndState) { + checkCondition( + starLoopEntryState.transition(1).target is StarBlockStartState); + checkCondition(starLoopEntryState.nonGreedy); + } else { + throw StateError(''); + } + } + + if (state is StarLoopbackState) { + checkCondition(state.numberOfTransitions == 1); + checkCondition(state.transition(0).target is StarLoopEntryState); + } + + if (state is LoopEndState) { + checkCondition(state.loopBackState != null); + } + + if (state is RuleStartState) { + checkCondition(state.stopState != null); + } + + if (state is BlockStartState) { + checkCondition(state.endState != null); + } + + if (state is BlockEndState) { + checkCondition(state.startState != null); + } + + if (state is DecisionState) { + final decisionState = state; + checkCondition(decisionState.numberOfTransitions <= 1 || + decisionState.decision >= 0); + } else { + checkCondition( + state.numberOfTransitions <= 1 || state is RuleStopState); + } + } + } + + void checkCondition(bool condition, [String message = '']) { + if (!condition) { + throw StateError(message); + } + } + + int readInt() { + return data[pos++]; + } + + Transition edgeFactory( + ATN atn, + TransitionType type, + int src, + int trg, + int arg1, + int arg2, + int arg3, + List sets, + ) { + final target = atn.states[trg]!; + switch (type) { + case TransitionType.EPSILON: + return EpsilonTransition(target); + case TransitionType.RANGE: + return arg3 != 0 + ? RangeTransition(target, Token.EOF, arg2) + : RangeTransition(target, arg1, arg2); + case TransitionType.RULE: + final rt = RuleTransition( + atn.states[arg1] as RuleStartState, arg2, arg3, target); + return rt; + case TransitionType.PREDICATE: + final pt = PredicateTransition(target, arg1, arg2, arg3 != 0); + return pt; + case TransitionType.PRECEDENCE: + return PrecedencePredicateTransition(target, arg1); + case TransitionType.ATOM: + return arg3 != 0 + ? AtomTransition(target, Token.EOF) + : AtomTransition(target, arg1); + case TransitionType.ACTION: + final a = ActionTransition(target, arg1, arg2, arg3 != 0); + return a; + case TransitionType.SET: + return SetTransition(target, sets[arg1]); + case TransitionType.NOT_SET: + return NotSetTransition(target, sets[arg1]); + case TransitionType.WILDCARD: + return WildcardTransition(target); + case TransitionType.INVALID: + throw ArgumentError.value(type, 'transition type', 'not valid.'); + default: + throw ArgumentError.value(type, 'transition type', 'not valid.'); + } + } + + ATNState? stateFactory(StateType type, int ruleIndex) { + ATNState s; + switch (type) { + case StateType.INVALID_TYPE: + return null; + case StateType.BASIC: + s = BasicState(ruleIndex); + break; + case StateType.RULE_START: + s = RuleStartState(ruleIndex); + break; + case StateType.BLOCK_START: + s = BasicBlockStartState(ruleIndex); + break; + case StateType.PLUS_BLOCK_START: + s = PlusBlockStartState(ruleIndex); + break; + case StateType.STAR_BLOCK_START: + s = StarBlockStartState(ruleIndex); + break; + case StateType.TOKEN_START: + s = TokensStartState(ruleIndex); + break; + case StateType.RULE_STOP: + s = RuleStopState(ruleIndex); + break; + case StateType.BLOCK_END: + s = BlockEndState(ruleIndex); + break; + case StateType.STAR_LOOP_BACK: + s = StarLoopbackState(ruleIndex); + break; + case StateType.STAR_LOOP_ENTRY: + s = StarLoopEntryState(ruleIndex); + break; + case StateType.PLUS_LOOP_BACK: + s = PlusLoopbackState(ruleIndex); + break; + case StateType.LOOP_END: + s = LoopEndState(ruleIndex); + break; + default: + throw ArgumentError.value(type, 'state type', 'not valid.'); + } + + return s; + } + + LexerAction lexerActionFactory(LexerActionType type, int data1, int data2) { + switch (type) { + case LexerActionType.CHANNEL: + return LexerChannelAction(data1); + + case LexerActionType.CUSTOM: + return LexerCustomAction(data1, data2); + + case LexerActionType.MODE: + return LexerModeAction(data1); + + case LexerActionType.MORE: + return LexerMoreAction.INSTANCE; + + case LexerActionType.POP_MODE: + return LexerPopModeAction.INSTANCE; + + case LexerActionType.PUSH_MODE: + return LexerPushModeAction(data1); + + case LexerActionType.SKIP: + return LexerSkipAction.INSTANCE; + + case LexerActionType.TYPE: + return LexerTypeAction(data1); + default: + throw ArgumentError.value(type, 'lexer action type', 'not valid.'); + } + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_simulator.dart b/runtime/Dart/lib/src/atn/src/atn_simulator.dart new file mode 100644 index 0000000000..be598c4973 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_simulator.dart @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../dfa/dfa.dart'; +import '../../prediction_context.dart'; +import 'atn.dart'; +import 'atn_config_set.dart'; + +abstract class ATNSimulator { + /// Must distinguish between missing edge and edge we know leads nowhere */ + + static final DFAState ERROR = DFAState( + stateNumber: 0x7FFFFFFF, + configs: ATNConfigSet(), + ); + + final ATN atn; + + /// The context cache maps all PredictionContext objects that are equals() + /// to a single cached copy. This cache is shared across all contexts + /// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + /// to use only cached nodes/graphs in addDFAState(). We don't want to + /// fill this during closure() since there are lots of contexts that + /// pop up but are not used ever again. It also greatly slows down closure(). + /// + ///

    This cache makes a huge difference in memory and a little bit in speed. + /// For the Java grammar on java.*, it dropped the memory requirements + /// at the end from 25M to 16M. We don't store any of the full context + /// graphs in the DFA because they are limited to local context only, + /// but apparently there's a lot of repetition there as well. We optimize + /// the config contexts before storing the config set in the DFA states + /// by literally rebuilding them with cached subgraphs only.

    + /// + ///

    I tried a cache for use during closure operations, that was + /// whacked after each adaptivePredict(). It cost a little bit + /// more time I think and doesn't save on the overall footprint + /// so it's not worth the complexity.

    + final PredictionContextCache? sharedContextCache; + + ATNSimulator(this.atn, this.sharedContextCache); + + void reset(); + + /// Clear the DFA cache used by the current instance. Since the DFA cache may + /// be shared by multiple ATN simulators, this method may affect the + /// performance (but not accuracy) of other parsers which are being used + /// concurrently. + /// + /// @throws UnsupportedOperationException if the current instance does not + /// support clearing the DFA. + /// + /// @since 4.3 + void clearDFA() { + throw UnsupportedError( + 'This ATN simulator does not support clearing the DFA.'); + } + + PredictionContext getCachedContext(PredictionContext context) { + if (sharedContextCache == null) return context; + + final visited = {}; + return PredictionContext.getCachedContext( + context, sharedContextCache!, visited); + } +} + +/// Used to cache [PredictionContext] objects. Its used for the shared +/// context cash associated with contexts in DFA states. This cache +/// can be used for both lexers and parsers. +class PredictionContextCache { + final cache = {}; + + /// Add a context to the cache and return it. If the context already exists, + /// return that one instead and do not add a new context to the cache. + /// Protect shared cache from unsafe thread access. + PredictionContext add(PredictionContext ctx) { + if (ctx == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; + final existing = cache[ctx]; + if (existing != null) { +// System.out.println(name+" reuses "+existing); + return existing; + } + cache[ctx] = ctx; + return ctx; + } + + PredictionContext? operator [](PredictionContext ctx) { + return cache[ctx]; + } + + int get length { + return cache.length; + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_state.dart b/runtime/Dart/lib/src/atn/src/atn_state.dart new file mode 100644 index 0000000000..372901d343 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_state.dart @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../interval_set.dart'; +import 'atn.dart'; +import 'transition.dart'; + +var INITIAL_NUM_TRANSITIONS = 4; + +enum StateType { + INVALID_TYPE, + BASIC, + RULE_START, + BLOCK_START, + PLUS_BLOCK_START, + STAR_BLOCK_START, + TOKEN_START, + RULE_STOP, + BLOCK_END, + STAR_LOOP_BACK, + STAR_LOOP_ENTRY, + PLUS_LOOP_BACK, + LOOP_END, +} + +/// The following images show the relation of states and +/// {@link ATNState#transitions} for various grammar constructs. +/// +///
      +/// +///
    • Solid edges marked with an ε indicate a required +/// [EpsilonTransition].
    • +/// +///
    • Dashed edges indicate locations where any transition derived from +/// [Transition] might appear.
    • +/// +///
    • Dashed nodes are place holders for either a sequence of linked +/// [BasicState] states or the inclusion of a block representing a nested +/// construct in one of the forms below.
    • +/// +///
    • Nodes showing multiple outgoing alternatives with a {@code ...} support +/// any number of alternatives (one or more). Nodes without the {@code ...} only +/// support the exact number of alternatives shown in the diagram.
    • +/// +///
    +/// +///

    Basic Blocks

    +/// +///

    Rule

    +/// +/// +/// +///

    Block of 1 or more alternatives

    +/// +/// +/// +///

    Greedy Loops

    +/// +///

    Greedy Closure: {@code (...)*}

    +/// +/// +/// +///

    Greedy Positive Closure: {@code (...)+}

    +/// +/// +/// +///

    Greedy Optional: {@code (...)?}

    +/// +/// +/// +///

    Non-Greedy Loops

    +/// +///

    Non-Greedy Closure: {@code (...)*?}

    +/// +/// +/// +///

    Non-Greedy Positive Closure: {@code (...)+?}

    +/// +/// +/// +///

    Non-Greedy Optional: {@code (...)??}

    +/// +/// +abstract class ATNState { + static final int INITIAL_NUM_TRANSITIONS = 4; + + static final int INVALID_STATE_NUMBER = -1; + + /// Which ATN are we in? */ + late ATN atn; + + int stateNumber = INVALID_STATE_NUMBER; + + int ruleIndex; // at runtime, we don't have Rule objects + + bool epsilonOnlyTransitions = false; + + /// Track the transitions emanating from this ATN state. */ + List transitions = []; + + /// Used to cache lookahead during parsing, not used during construction */ + IntervalSet? nextTokenWithinRule; + + ATNState(this.ruleIndex); + + @override + int get hashCode { + return stateNumber; + } + + @override + bool operator ==(Object o) { + // are these states same object? + if (o is ATNState) return stateNumber == o.stateNumber; + return false; + } + + bool isNonGreedyExitState() { + return false; + } + + @override + String toString() { + return stateNumber.toString(); + } + + int get numberOfTransitions { + return transitions.length; + } + + void addTransition(Transition e) { + addTransitionAt(transitions.length, e); + } + + void addTransitionAt(int index, Transition e) { + if (transitions.isEmpty) { + epsilonOnlyTransitions = e.isEpsilon; + } else if (epsilonOnlyTransitions != e.isEpsilon) { + log('ATN state $stateNumber has both epsilon and non-epsilon transitions.\n', + level: Level.SEVERE.value); + epsilonOnlyTransitions = false; + } + + var alreadyPresent = false; + for (var t in transitions) { + if (t.target.stateNumber == e.target.stateNumber) { + if (t.label != null && e.label != null && t.label == e.label) { +// System.err.println("Repeated transition upon "+e.label()+" from "+stateNumber+"->"+t.target.stateNumber); + alreadyPresent = true; + break; + } else if (t.isEpsilon && e.isEpsilon) { +// System.err.println("Repeated epsilon transition from "+stateNumber+"->"+t.target.stateNumber); + alreadyPresent = true; + break; + } + } + } + if (!alreadyPresent) { + transitions.insert(index, e); + } + } + + Transition transition(int i) { + return transitions[i]; + } + + void setTransition(int i, Transition e) { + transitions[i] = e; + } + + Transition removeTransition(int index) { + return transitions.removeAt(index); + } + + StateType get stateType; + + bool onlyHasEpsilonTransitions() => epsilonOnlyTransitions; + + void setRuleIndex(int ruleIndex) { + this.ruleIndex = ruleIndex; + } +} + +class BasicState extends ATNState { + BasicState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.BASIC; +} + +class RuleStartState extends ATNState { + RuleStopState? stopState; + bool isLeftRecursiveRule = false; + + RuleStartState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.RULE_START; +} + +abstract class DecisionState extends ATNState { + int decision = 0; + bool nonGreedy = false; + + DecisionState(int ruleIndex) : super(ruleIndex); +} + +// The start of a regular {@code (...)} block. +abstract class BlockStartState extends DecisionState { + BlockEndState? endState; + + BlockStartState(int ruleIndex) : super(ruleIndex); +} + +class BasicBlockStartState extends BlockStartState { + BasicBlockStartState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.BLOCK_START; +} + +/// Start of {@code (A|B|...)+} loop. Technically a decision state, but +/// we don't use for code generation; somebody might need it, so I'm defining +/// it for completeness. In reality, the [PlusLoopbackState] node is the +/// real decision-making note for {@code A+}. +class PlusBlockStartState extends BlockStartState { + PlusLoopbackState? loopBackState; + + PlusBlockStartState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.PLUS_BLOCK_START; +} + +/// The block that begins a closure loop. +class StarBlockStartState extends BlockStartState { + StarBlockStartState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.STAR_BLOCK_START; +} + +/// The Tokens rule start state linking to each lexer rule start state */ +class TokensStartState extends DecisionState { + TokensStartState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.TOKEN_START; +} + +/// The last node in the ATN for a rule, unless that rule is the start symbol. +/// In that case, there is one transition to EOF. Later, we might encode +/// references to all calls to this rule to compute FOLLOW sets for +/// error handling. +class RuleStopState extends ATNState { + RuleStopState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.RULE_STOP; +} + +/// Terminal node of a simple {@code (a|b|c)} block. +class BlockEndState extends ATNState { + BlockStartState? startState; + + BlockEndState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.BLOCK_END; +} + +class StarLoopbackState extends ATNState { + StarLoopbackState(int ruleIndex) : super(ruleIndex); + + StarLoopEntryState get loopEntryState { + return transition(0).target as StarLoopEntryState; + } + + @override + StateType get stateType => StateType.STAR_LOOP_BACK; +} + +class StarLoopEntryState extends DecisionState { + StarLoopbackState? loopBackState; + + /// Indicates whether this state can benefit from a precedence DFA during SLL + /// decision making. + /// + ///

    This is a computed property that is calculated during ATN deserialization + /// and stored for use in [ParserATNSimulator] and + /// [ParserInterpreter].

    + /// + /// @see DFA#isPrecedenceDfa() + bool isPrecedenceDecision = false; + + StarLoopEntryState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.STAR_LOOP_ENTRY; +} + +/// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: +/// one to the loop back to start of the block and one to exit. +class PlusLoopbackState extends DecisionState { + PlusLoopbackState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.PLUS_LOOP_BACK; +} + +/// Mark the end of a * or + loop. +class LoopEndState extends ATNState { + ATNState? loopBackState; + + LoopEndState(int ruleIndex) : super(ruleIndex); + + @override + StateType get stateType => StateType.LOOP_END; +} diff --git a/runtime/Dart/lib/src/atn/src/atn_type.dart b/runtime/Dart/lib/src/atn/src/atn_type.dart new file mode 100644 index 0000000000..fa1f568448 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_type.dart @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +/// Represents the type of recognizer an ATN applies to. +enum ATNType { + /// A lexer grammar. + LEXER, + + /// A parser grammar. + PARSER +} diff --git a/runtime/Dart/lib/src/atn/src/info.dart b/runtime/Dart/lib/src/atn/src/info.dart new file mode 100644 index 0000000000..63c9f6c1c4 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/info.dart @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../token_stream.dart'; +import '../../util/bit_set.dart'; +import 'atn_config_set.dart'; +import 'profiling_atn_simulator.dart'; +import 'semantic_context.dart'; + +/// This class represents profiling event information for a context sensitivity. +/// Context sensitivities are decisions where a particular input resulted in an +/// SLL conflict, but LL prediction produced a single unique alternative. +/// +///

    +/// In some cases, the unique alternative identified by LL prediction is not +/// equal to the minimum represented alternative in the conflicting SLL +/// configuration set. Grammars and inputs which result in this scenario are +/// unable to use {@link PredictionMode#SLL}, which in turn means they cannot use +/// the two-stage parsing strategy to improve parsing performance for that +/// input.

    +/// +/// @see ParserATNSimulator#reportContextSensitivity +/// @see ANTLRErrorListener#reportContextSensitivity +/// +/// @since 4.3 +class ContextSensitivityInfo extends DecisionEventInfo { + /// Constructs a new instance of the [ContextSensitivityInfo] class + /// with the specified detailed context sensitivity information. + /// + /// @param decision The decision number + /// @param configs The final configuration set containing the unique + /// alternative identified by full-context prediction + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the context sensitivity was + /// identified during full-context prediction + ContextSensitivityInfo(int decision, ATNConfigSet configs, TokenStream input, + int startIndex, int stopIndex) + : super(decision, configs, input, startIndex, stopIndex, true); +} + +/// This is the base class for gathering detailed information about prediction +/// events which occur during parsing. +/// +/// Note that we could record the parser call stack at the time this event +/// occurred but in the presence of left recursive rules, the stack is kind of +/// meaningless. It's better to look at the individual configurations for their +/// individual stacks. Of course that is a [PredictionContext] object +/// not a parse tree node and so it does not have information about the extent +/// (start...stop) of the various subtrees. Examining the stack tops of all +/// configurations provide the return states for the rule invocations. +/// From there you can get the enclosing rule. +/// +/// @since 4.3 +class DecisionEventInfo { + /// The invoked decision number which this event is related to. + /// + /// @see ATN#decisionToState + final int decision; + + /// The configuration set containing additional information relevant to the + /// prediction state when the current event occurred, or null if no + /// additional information is relevant or available. + final ATNConfigSet? configs; + + /// The input token stream which is being parsed. + final TokenStream input; + + /// The token index in the input stream at which the current prediction was + /// originally invoked. + final int startIndex; + + /// The token index in the input stream at which the current event occurred. + final int stopIndex; + + /// [true] if the current event occurred during LL prediction; + /// otherwise, [false] if the input occurred during SLL prediction. + final bool fullCtx; + + DecisionEventInfo( + this.decision, + this.configs, + this.input, + this.startIndex, + this.stopIndex, + this.fullCtx, + ); +} + +/// This class contains profiling gathered for a particular decision. +/// +///

    +/// Parsing performance in ANTLR 4 is heavily influenced by both static factors +/// (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the +/// choice of input and the state of the DFA cache at the time profiling +/// operations are started). For best results, gather and use aggregate +/// statistics from a large sample of inputs representing the inputs expected in +/// production before using the results to make changes in the grammar.

    +/// +/// @since 4.3 +class DecisionInfo { + /// The decision number, which is an index into {@link ATN#decisionToState}. + final int decision; + + /// The total number of times {@link ParserATNSimulator#adaptivePredict} was + /// invoked for this decision. + int invocations = 0; + + /// The total time spent in {@link ParserATNSimulator#adaptivePredict} for + /// this decision, in nanoseconds. + /// + ///

    + /// The value of this field contains the sum of differential results obtained + /// by {@link System#nanoTime()}, and is not adjusted to compensate for JIT + /// and/or garbage collection overhead. For best accuracy, use a modern JVM + /// implementation that provides precise results from + /// {@link System#nanoTime()}, and perform profiling in a separate process + /// which is warmed up by parsing the input prior to profiling. If desired, + /// call {@link ATNSimulator#clearDFA} to reset the DFA cache to its initial + /// state before starting the profiling measurement pass.

    + int timeInPrediction = 0; + + /// The sum of the lookahead required for SLL prediction for this decision. + /// Note that SLL prediction is used before LL prediction for performance + /// reasons even when {@link PredictionMode#LL} or + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used. + int SLL_TotalLook = 0; + + /// Gets the minimum lookahead required for any single SLL prediction to + /// complete for this decision, by reaching a unique prediction, reaching an + /// SLL conflict state, or encountering a syntax error. + int SLL_MinLook = 0; + + /// Gets the maximum lookahead required for any single SLL prediction to + /// complete for this decision, by reaching a unique prediction, reaching an + /// SLL conflict state, or encountering a syntax error. + int SLL_MaxLook = 0; + + /// Gets the [LookaheadEventInfo] associated with the event where the + /// {@link #SLL_MaxLook} value was set. + LookaheadEventInfo? SLL_MaxLookEvent; + + /// The sum of the lookahead required for LL prediction for this decision. + /// Note that LL prediction is only used when SLL prediction reaches a + /// conflict state. + int LL_TotalLook = 0; + + /// Gets the minimum lookahead required for any single LL prediction to + /// complete for this decision. An LL prediction completes when the algorithm + /// reaches a unique prediction, a conflict state (for + /// {@link PredictionMode#LL}, an ambiguity state (for + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error. + int LL_MinLook = 0; + + /// Gets the maximum lookahead required for any single LL prediction to + /// complete for this decision. An LL prediction completes when the algorithm + /// reaches a unique prediction, a conflict state (for + /// {@link PredictionMode#LL}, an ambiguity state (for + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error. + int LL_MaxLook = 0; + + /// Gets the [LookaheadEventInfo] associated with the event where the + /// {@link #LL_MaxLook} value was set. + LookaheadEventInfo? LL_MaxLookEvent; + + /// A collection of [ContextSensitivityInfo] instances describing the + /// context sensitivities encountered during LL prediction for this decision. + /// + /// @see ContextSensitivityInfo + final List contextSensitivities = []; + + /// A collection of [ErrorInfo] instances describing the parse errors + /// identified during calls to {@link ParserATNSimulator#adaptivePredict} for + /// this decision. + /// + /// @see ErrorInfo + final List errors = []; + + /// A collection of [AmbiguityInfo] instances describing the + /// ambiguities encountered during LL prediction for this decision. + /// + /// @see AmbiguityInfo + final List ambiguities = []; + + /// A collection of [PredicateEvalInfo] instances describing the + /// results of evaluating individual predicates during prediction for this + /// decision. + /// + /// @see PredicateEvalInfo + final List predicateEvals = []; + + /// The total number of ATN transitions required during SLL prediction for + /// this decision. An ATN transition is determined by the number of times the + /// DFA does not contain an edge that is required for prediction, resulting + /// in on-the-fly computation of that edge. + /// + ///

    + /// If DFA caching of SLL transitions is employed by the implementation, ATN + /// computation may cache the computed edge for efficient lookup during + /// future parsing of this decision. Otherwise, the SLL parsing algorithm + /// will use ATN transitions exclusively.

    + /// + /// @see #SLL_ATNTransitions + /// @see ParserATNSimulator#computeTargetState + /// @see LexerATNSimulator#computeTargetState + int SLL_ATNTransitions = 0; + + /// The total number of DFA transitions required during SLL prediction for + /// this decision. + /// + ///

    If the ATN simulator implementation does not use DFA caching for SLL + /// transitions, this value will be 0.

    + /// + /// @see ParserATNSimulator#getExistingTargetState + /// @see LexerATNSimulator#getExistingTargetState + int SLL_DFATransitions = 0; + + /// Gets the total number of times SLL prediction completed in a conflict + /// state, resulting in fallback to LL prediction. + /// + ///

    Note that this value is not related to whether or not + /// {@link PredictionMode#SLL} may be used successfully with a particular + /// grammar. If the ambiguity resolution algorithm applied to the SLL + /// conflicts for this decision produce the same result as LL prediction for + /// this decision, {@link PredictionMode#SLL} would produce the same overall + /// parsing result as {@link PredictionMode#LL}.

    + int LL_Fallback = 0; + + /// The total number of ATN transitions required during LL prediction for + /// this decision. An ATN transition is determined by the number of times the + /// DFA does not contain an edge that is required for prediction, resulting + /// in on-the-fly computation of that edge. + /// + ///

    + /// If DFA caching of LL transitions is employed by the implementation, ATN + /// computation may cache the computed edge for efficient lookup during + /// future parsing of this decision. Otherwise, the LL parsing algorithm will + /// use ATN transitions exclusively.

    + /// + /// @see #LL_DFATransitions + /// @see ParserATNSimulator#computeTargetState + /// @see LexerATNSimulator#computeTargetState + int LL_ATNTransitions = 0; + + /// The total number of DFA transitions required during LL prediction for + /// this decision. + /// + ///

    If the ATN simulator implementation does not use DFA caching for LL + /// transitions, this value will be 0.

    + /// + /// @see ParserATNSimulator#getExistingTargetState + /// @see LexerATNSimulator#getExistingTargetState + int LL_DFATransitions = 0; + + /// Constructs a new instance of the [DecisionInfo] class to contain + /// statistics for a particular decision. + /// + /// @param decision The decision number + DecisionInfo(this.decision); + + @override + String toString() { + return '{' + 'decision=$decision' + ', contextSensitivities=${contextSensitivities.length}' + ', errors=${errors.length}' + ', ambiguities=${ambiguities.length}' + ', SLL_lookahead=$SLL_TotalLook' + ', SLL_ATNTransitions=$SLL_ATNTransitions, SLL_DFATransitions=$SLL_DFATransitions, LL_Fallback=$LL_Fallback, LL_lookahead=$LL_TotalLook, LL_ATNTransitions=$LL_ATNTransitions}'; + } +} + +/// This class represents profiling event information for an ambiguity. +/// Ambiguities are decisions where a particular input resulted in an SLL +/// conflict, followed by LL prediction also reaching a conflict state +/// (indicating a true ambiguity in the grammar). +/// +///

    +/// This event may be reported during SLL prediction in cases where the +/// conflicting SLL configuration set provides sufficient information to +/// determine that the SLL conflict is truly an ambiguity. For example, if none +/// of the ATN configurations in the conflicting SLL configuration set have +/// traversed a global follow transition (i.e. +/// {@link ATNConfig#reachesIntoOuterContext} is 0 for all configurations), then +/// the result of SLL prediction for that input is known to be equivalent to the +/// result of LL prediction for that input.

    +/// +///

    +/// In some cases, the minimum represented alternative in the conflicting LL +/// configuration set is not equal to the minimum represented alternative in the +/// conflicting SLL configuration set. Grammars and inputs which result in this +/// scenario are unable to use {@link PredictionMode#SLL}, which in turn means +/// they cannot use the two-stage parsing strategy to improve parsing performance +/// for that input.

    +/// +/// @see ParserATNSimulator#reportAmbiguity +/// @see ANTLRErrorListener#reportAmbiguity +/// +/// @since 4.3 +class AmbiguityInfo extends DecisionEventInfo { + /// The set of alternative numbers for this decision event that lead to a valid parse. */ + BitSet? ambigAlts; + + /// Constructs a new instance of the [AmbiguityInfo] class with the + /// specified detailed ambiguity information. + /// + /// @param decision The decision number + /// @param configs The final configuration set identifying the ambiguous + /// alternatives for the current input + /// @param ambigAlts The set of alternatives in the decision that lead to a valid parse. + /// The predicted alt is the min(ambigAlts) + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the ambiguity was identified during + /// prediction + /// @param fullCtx [true] if the ambiguity was identified during LL + /// prediction; otherwise, [false] if the ambiguity was identified + /// during SLL prediction + AmbiguityInfo(int decision, ATNConfigSet configs, this.ambigAlts, + TokenStream input, int startIndex, int stopIndex, bool fullCtx) + : super(decision, configs, input, startIndex, stopIndex, fullCtx); +} + +/// This class represents profiling event information for a syntax error +/// identified during prediction. Syntax errors occur when the prediction +/// algorithm is unable to identify an alternative which would lead to a +/// successful parse. +/// +/// @see Parser#notifyErrorListeners(Token, String, RecognitionException) +/// @see ANTLRErrorListener#syntaxError +/// +/// @since 4.3 +class ErrorInfo extends DecisionEventInfo { + /// Constructs a new instance of the [ErrorInfo] class with the + /// specified detailed syntax error information. + /// + /// @param decision The decision number + /// @param configs The final configuration set reached during prediction + /// prior to reaching the {@link ATNSimulator#ERROR} state + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the syntax error was identified + /// @param fullCtx [true] if the syntax error was identified during LL + /// prediction; otherwise, [false] if the syntax error was identified + /// during SLL prediction + ErrorInfo( + int decision, + ATNConfigSet? configs, + TokenStream input, + int startIndex, + int stopIndex, + bool fullCtx, + ) : super(decision, configs, input, startIndex, stopIndex, fullCtx); +} + +/// This class represents profiling event information for tracking the lookahead +/// depth required in order to make a prediction. +/// +/// @since 4.3 +class LookaheadEventInfo extends DecisionEventInfo { + /// The alternative chosen by adaptivePredict(), not necessarily + /// the outermost alt shown for a rule; left-recursive rules have + /// user-level alts that differ from the rewritten rule with a (...) block + /// and a (..)* loop. + int predictedAlt; + + /// Constructs a new instance of the [LookaheadEventInfo] class with + /// the specified detailed lookahead information. + /// + /// @param decision The decision number + /// @param configs The final configuration set containing the necessary + /// information to determine the result of a prediction, or null if + /// the final configuration set is not available + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the prediction was finally made + /// @param fullCtx [true] if the current lookahead is part of an LL + /// prediction; otherwise, [false] if the current lookahead is part of + /// an SLL prediction + LookaheadEventInfo( + int decision, + ATNConfigSet? configs, + this.predictedAlt, + TokenStream input, + int startIndex, + int stopIndex, + bool fullCtx, + ) : super(decision, configs, input, startIndex, stopIndex, fullCtx); +} + +/// This class represents profiling event information for semantic predicate +/// evaluations which occur during prediction. +/// +/// @see ParserATNSimulator#evalSemanticContext +/// +/// @since 4.3 +class PredicateEvalInfo extends DecisionEventInfo { + /// The semantic context which was evaluated. + final SemanticContext semctx; + + /// The alternative number for the decision which is guarded by the semantic + /// context {@link #semctx}. Note that other ATN + /// configurations may predict the same alternative which are guarded by + /// other semantic contexts and/or {@link SemanticContext#NONE}. + final int predictedAlt; + + /// The result of evaluating the semantic context {@link #semctx}. + final bool evalResult; + + /// Constructs a new instance of the [PredicateEvalInfo] class with the + /// specified detailed predicate evaluation information. + /// + /// @param decision The decision number + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the predicate evaluation was + /// triggered. Note that the input stream may be reset to other positions for + /// the actual evaluation of individual predicates. + /// @param semctx The semantic context which was evaluated + /// @param evalResult The results of evaluating the semantic context + /// @param predictedAlt The alternative number for the decision which is + /// guarded by the semantic context [semctx]. See {@link #predictedAlt} + /// for more information. + /// @param fullCtx [true] if the semantic context was + /// evaluated during LL prediction; otherwise, [false] if the semantic + /// context was evaluated during SLL prediction + /// + /// @see ParserATNSimulator#evalSemanticContext(SemanticContext, ParserRuleContext, int, boolean) + /// @see SemanticContext#eval(Recognizer, RuleContext) + PredicateEvalInfo( + int decision, + TokenStream input, + int startIndex, + int stopIndex, + this.semctx, + this.evalResult, + this.predictedAlt, + bool fullCtx) + : super(decision, ATNConfigSet(), input, startIndex, stopIndex, fullCtx); +} + +/// This class provides access to specific and aggregate statistics gathered +/// during profiling of a parser. +/// +/// @since 4.3 +class ParseInfo { + final ProfilingATNSimulator atnSimulator; + + ParseInfo(this.atnSimulator); + + /// Gets an array of [DecisionInfo] instances containing the profiling + /// information gathered for each decision in the ATN. + /// + /// @return An array of [DecisionInfo] instances, indexed by decision + /// number. + List get decisionInfo { + return atnSimulator.decisionInfo; + } + + /// Gets the decision numbers for decisions that required one or more + /// full-context predictions during parsing. These are decisions for which + /// {@link DecisionInfo#LL_Fallback} is non-zero. + /// + /// @return A list of decision numbers which required one or more + /// full-context predictions during parsing. + List get llDecisions { + final decisions = atnSimulator.decisionInfo; + final LL = []; + for (var i = 0; i < decisions.length; i++) { + final fallBack = decisions[i].LL_Fallback; + if (fallBack > 0) LL.add(i); + } + return LL; + } + + /// Gets the total time spent during prediction across all decisions made + /// during parsing. This value is the sum of + /// {@link DecisionInfo#timeInPrediction} for all decisions. + int get totalTimeInPrediction { + final decisions = atnSimulator.decisionInfo; + var t = 0; + for (var i = 0; i < decisions.length; i++) { + t += decisions[i].timeInPrediction; + } + return t; + } + + /// Gets the total number of SLL lookahead operations across all decisions + /// made during parsing. This value is the sum of + /// {@link DecisionInfo#SLL_TotalLook} for all decisions. + int get totalSLLLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].SLL_TotalLook; + } + return k; + } + + /// Gets the total number of LL lookahead operations across all decisions + /// made during parsing. This value is the sum of + /// {@link DecisionInfo#LL_TotalLook} for all decisions. + int get totalLLLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].LL_TotalLook; + } + return k; + } + + /// Gets the total number of ATN lookahead operations for SLL prediction + /// across all decisions made during parsing. + int get totalSLLATNLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].SLL_ATNTransitions; + } + return k; + } + + /// Gets the total number of ATN lookahead operations for LL prediction + /// across all decisions made during parsing. + int get totalLLATNLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].LL_ATNTransitions; + } + return k; + } + + /// Gets the total number of ATN lookahead operations for SLL and LL + /// prediction across all decisions made during parsing. + /// + ///

    + /// This value is the sum of {@link #getTotalSLLATNLookaheadOps} and + /// {@link #getTotalLLATNLookaheadOps}.

    + int get totalATNLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].SLL_ATNTransitions; + k += decisions[i].LL_ATNTransitions; + } + return k; + } + + /// Gets the total number of DFA states stored in the DFA cache for all + /// decisions in the ATN. + int get dfaSize { + var n = 0; + final decisionToDFA = atnSimulator.decisionToDFA; + for (var i = 0; i < decisionToDFA.length; i++) { + n += getDFASizeAt(i); + } + return n; + } + + /// Gets the total number of DFA states stored in the DFA cache for a + /// particular decision. + int getDFASizeAt(int decision) { + final decisionToDFA = atnSimulator.decisionToDFA[decision]; + return decisionToDFA.states.length; + } +} diff --git a/runtime/Dart/lib/src/atn/src/lexer_action.dart b/runtime/Dart/lib/src/atn/src/lexer_action.dart new file mode 100644 index 0000000000..9d23270bcc --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/lexer_action.dart @@ -0,0 +1,608 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../lexer.dart'; +import '../../util/murmur_hash.dart'; + +/// Represents the serialization type of a [LexerAction]. +/// +/// @since 4.2 +enum LexerActionType { + /// The type of a [LexerChannelAction] action. + CHANNEL, + + /// The type of a [LexerCustomAction] action. + CUSTOM, + + /// The type of a [LexerModeAction] action. + MODE, + + /// The type of a [LexerMoreAction] action. + MORE, + + /// The type of a [LexerPopModeAction] action. + POP_MODE, + + /// The type of a [LexerPushModeAction] action. + PUSH_MODE, + + /// The type of a [LexerSkipAction] action. + SKIP, + + /// The type of a [LexerTypeAction] action. + TYPE, +} + +/// Represents a single action which can be executed following the successful +/// match of a lexer rule. Lexer actions are used for both embedded action syntax +/// and ANTLR 4's new lexer command syntax. +/// +/// @since 4.2 +abstract class LexerAction { + /// Gets the serialization type of the lexer action. + /// + /// @return The serialization type of the lexer action. + LexerActionType get actionType; + + /// Gets whether the lexer action is position-dependent. Position-dependent + /// actions may have different semantics depending on the [CharStream] + /// index at the time the action is executed. + /// + ///

    Many lexer commands, including [type], [skip], and + /// [more], do not check the input index during their execution. + /// Actions like this are position-independent, and may be stored more + /// efficiently as part of the {@link LexerATNConfig#lexerActionExecutor}.

    + /// + /// @return [true] if the lexer action semantics can be affected by the + /// position of the input [CharStream] at the time it is executed; + /// otherwise, [false]. + bool get isPositionDependent; + + /// Execute the lexer action in the context of the specified [Lexer]. + /// + ///

    For position-dependent actions, the input stream must already be + /// positioned correctly prior to calling this method.

    + /// + /// @param lexer The lexer instance. + void execute(Lexer lexer); +} + +/// Implements the [channel] lexer action by calling +/// {@link Lexer#setChannel} with the assigned channel. +/// +/// @since 4.2 +class LexerChannelAction implements LexerAction { + /// Gets the channel to use for the [Token] created by the lexer. + /// + /// @return The channel to use for the [Token] created by the lexer. + final int channel; + + /// Constructs a new [channel] action with the specified channel value. + /// @param channel The channel value to pass to {@link Lexer#setChannel}. + LexerChannelAction(this.channel); + + @override + LexerActionType get actionType => LexerActionType.CHANNEL; + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#setChannel} with the + /// value provided by {@link #getChannel}.

    + @override + void execute(Lexer lexer) { + lexer.channel = channel; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, channel); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerChannelAction) { + return channel == obj.channel; + } + + return false; + } + + @override + String toString() { + return 'channel($channel)'; + } +} + +/// Executes a custom lexer action by calling {@link Recognizer#action} with the +/// rule and action indexes assigned to the custom action. The implementation of +/// a custom action is added to the generated code for the lexer in an override +/// of {@link Recognizer#action} when the grammar is compiled. +/// +///

    This class may represent embedded actions created with the {...} +/// syntax in ANTLR 4, as well as actions created for lexer commands where the +/// command argument could not be evaluated when the grammar was compiled.

    +/// +/// @since 4.2 +class LexerCustomAction implements LexerAction { + /// Gets the rule index to use for calls to {@link Recognizer#action}. + /// + /// @return The rule index for the custom action. + final int ruleIndex; + + /// Gets the action index to use for calls to {@link Recognizer#action}. + /// + /// @return The action index for the custom action. + final int actionIndex; + + /// Constructs a custom lexer action with the specified rule and action + /// indexes. + /// + /// @param ruleIndex The rule index to use for calls to + /// {@link Recognizer#action}. + /// @param actionIndex The action index to use for calls to + /// {@link Recognizer#action}. + LexerCustomAction(this.ruleIndex, this.actionIndex); + + /// {@inheritDoc} + /// + /// @return This method returns {@link LexerActionType#CUSTOM}. + + @override + LexerActionType get actionType => LexerActionType.CUSTOM; + + /// Gets whether the lexer action is position-dependent. Position-dependent + /// actions may have different semantics depending on the [CharStream] + /// index at the time the action is executed. + /// + ///

    Custom actions are position-dependent since they may represent a + /// user-defined embedded action which makes calls to methods like + /// {@link Lexer#getText}.

    + /// + /// @return This method returns [true]. + + @override + bool get isPositionDependent => true; + + /// {@inheritDoc} + /// + ///

    Custom actions are implemented by calling {@link Lexer#action} with the + /// appropriate rule and action indexes.

    + + @override + void execute(Lexer lexer) { + lexer.action(null, ruleIndex, actionIndex); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, ruleIndex); + hash = MurmurHash.update(hash, actionIndex); + return MurmurHash.finish(hash, 3); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerCustomAction) { + return ruleIndex == obj.ruleIndex && actionIndex == obj.actionIndex; + } + return false; + } +} + +/// Implements the [mode] lexer action by calling {@link Lexer#mode} with +/// the assigned mode. +/// +/// @since 4.2 +class LexerModeAction implements LexerAction { + /// Get the lexer mode this action should transition the lexer to. + /// + /// @return The lexer mode for this [mode] command. + final int mode; + + /// Constructs a new [mode] action with the specified mode value. + /// @param mode The mode value to pass to {@link Lexer#mode}. + LexerModeAction(this.mode); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#MODE}. + + @override + LexerActionType get actionType => LexerActionType.MODE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#mode} with the + /// value provided by {@link #getMode}.

    + + @override + void execute(Lexer lexer) { + lexer.mode(mode); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, mode); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerModeAction) { + return mode == obj.mode; + } + return false; + } + + @override + String toString() { + return 'mode($mode)'; + } +} + +/// Implements the [more] lexer action by calling {@link Lexer#more}. +/// +///

    The [more] command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by {@link #INSTANCE}.

    +/// +/// @since 4.2 +class LexerMoreAction implements LexerAction { + /// Provides a singleton instance of this parameterless lexer action. + static final LexerMoreAction INSTANCE = LexerMoreAction(); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#MORE}. + @override + LexerActionType get actionType => LexerActionType.MORE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#more}.

    + + @override + void execute(Lexer lexer) { + lexer.more(); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + return MurmurHash.finish(hash, 1); + } + + @override + bool operator ==(Object obj) { + return identical(obj, this); + } + + @override + String toString() { + return 'more'; + } +} + +/// Implements the [popMode] lexer action by calling {@link Lexer#popMode}. +/// +///

    The [popMode] command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by {@link #INSTANCE}.

    +/// +/// @since 4.2 +class LexerPopModeAction implements LexerAction { + /// Provides a singleton instance of this parameterless lexer action. + static final LexerPopModeAction INSTANCE = LexerPopModeAction(); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#POP_MODE}. + + @override + LexerActionType get actionType => LexerActionType.POP_MODE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#popMode}.

    + + @override + void execute(Lexer lexer) { + lexer.popMode(); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + return MurmurHash.finish(hash, 1); + } + + @override + bool operator ==(Object obj) { + return identical(obj, this); + } + + @override + String toString() { + return 'popMode'; + } +} + +/// Implements the [pushMode] lexer action by calling +/// {@link Lexer#pushMode} with the assigned mode. +/// +/// @since 4.2 +class LexerPushModeAction implements LexerAction { + /// Get the lexer mode this action should transition the lexer to. + /// + /// @return The lexer mode for this [pushMode] command. + final int mode; + + /// Constructs a new [pushMode] action with the specified mode value. + /// @param mode The mode value to pass to {@link Lexer#pushMode}. + LexerPushModeAction(this.mode); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#PUSH_MODE}. + + @override + LexerActionType get actionType => LexerActionType.PUSH_MODE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#pushMode} with the + /// value provided by {@link #getMode}.

    + + @override + void execute(Lexer lexer) { + lexer.pushMode(mode); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, mode); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerPushModeAction) { + return mode == obj.mode; + } + return false; + } + + @override + String toString() { + return 'pushMode($mode)'; + } +} + +/// Implements the [skip] lexer action by calling {@link Lexer#skip}. +/// +///

    The [skip] command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by {@link #INSTANCE}.

    +/// +/// @since 4.2 +class LexerSkipAction implements LexerAction { + /// Provides a singleton instance of this parameterless lexer action. + static final LexerSkipAction INSTANCE = LexerSkipAction(); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#SKIP}. + + @override + LexerActionType get actionType => LexerActionType.SKIP; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#skip}.

    + @override + void execute(Lexer lexer) { + lexer.skip(); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + return MurmurHash.finish(hash, 1); + } + + @override + bool operator ==(Object obj) { + return identical(obj, this); + } + + @override + String toString() { + return 'skip'; + } +} + +/// Implements the [type] lexer action by calling {@link Lexer#setType} +/// with the assigned type. +/// +/// @since 4.2 +class LexerTypeAction implements LexerAction { + /// Gets the type to assign to a token created by the lexer. + /// @return The type to assign to a token created by the lexer. + final int type; + + /// Constructs a new [type] action with the specified token type value. + /// @param type The type to assign to the token using {@link Lexer#setType}. + LexerTypeAction(this.type); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#TYPE}. + @override + LexerActionType get actionType => LexerActionType.TYPE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

    This action is implemented by calling {@link Lexer#setType} with the + /// value provided by {@link #getType}.

    + + @override + void execute(Lexer lexer) { + lexer.type = type; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, type); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerTypeAction) { + return type == obj.type; + } + return false; + } + + @override + String toString() { + return 'type($type)'; + } +} + +/// This implementation of [LexerAction] is used for tracking input offsets +/// for position-dependent actions within a [LexerActionExecutor]. +/// +///

    This action is not serialized as part of the ATN, and is only required for +/// position-dependent lexer actions which appear at a location other than the +/// end of a rule. For more information about DFA optimizations employed for +/// lexer actions, see {@link LexerActionExecutor#append} and +/// {@link LexerActionExecutor#fixOffsetBeforeMatch}.

    +/// +/// @since 4.2 +class LexerIndexedCustomAction implements LexerAction { + /// Gets the location in the input [CharStream] at which the lexer + /// action should be executed. The value is interpreted as an offset relative + /// to the token start index. + /// + /// @return The location in the input [CharStream] at which the lexer + /// action should be executed. + final int offset; + + /// Gets the lexer action to execute. + /// + /// @return A [LexerAction] object which executes the lexer action. + final LexerAction action; + + /// Constructs a new indexed custom action by associating a character offset + /// with a [LexerAction]. + /// + ///

    Note: This class is only required for lexer actions for which + /// {@link LexerAction#isPositionDependent} returns [true].

    + /// + /// @param offset The offset into the input [CharStream], relative to + /// the token start index, at which the specified lexer action should be + /// executed. + /// @param action The lexer action to execute at a particular offset in the + /// input [CharStream]. + LexerIndexedCustomAction(this.offset, this.action); + + /// {@inheritDoc} + /// + /// @return This method returns the result of calling {@link #getActionType} + /// on the [LexerAction] returned by {@link #getAction}. + @override + LexerActionType get actionType => action.actionType; + + /// {@inheritDoc} + /// @return This method returns [true]. + + @override + bool get isPositionDependent => true; + + /// {@inheritDoc} + /// + ///

    This method calls {@link #execute} on the result of {@link #getAction} + /// using the provided [lexer].

    + + @override + void execute(Lexer lexer) { +// assume the input stream position was properly set by the calling code + action.execute(lexer); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, offset); + hash = MurmurHash.update(hash, action); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (obj == this) { + return true; + } else if (obj is LexerIndexedCustomAction) { + return offset == obj.offset && action == obj.action; + } + return false; + } +} diff --git a/runtime/Dart/lib/src/atn/src/lexer_action_executor.dart b/runtime/Dart/lib/src/atn/src/lexer_action_executor.dart new file mode 100644 index 0000000000..8fe02a0902 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/lexer_action_executor.dart @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import '../../input_stream.dart'; +import '../../lexer.dart'; +import '../../util/murmur_hash.dart'; +import 'lexer_action.dart'; + +/// Represents an executor for a sequence of lexer actions which traversed during +/// the matching operation of a lexer rule (token). +/// +///

    The executor tracks position information for position-dependent lexer actions +/// efficiently, ensuring that actions appearing only at the end of the rule do +/// not cause bloating of the [DFA] created for the lexer.

    +/// +/// @since 4.2 +class LexerActionExecutor { + /// Gets the lexer actions to be executed by this executor. + /// @return The lexer actions to be executed by this executor. + final List lexerActions; + + /// Caches the result of {@link #hashCode} since the hash code is an element + /// of the performance-critical {@link LexerATNConfig#hashCode} operation. + @override + int get hashCode { + var hash = MurmurHash.initialize(); + for (var lexerAction in lexerActions) { + hash = MurmurHash.update(hash, lexerAction); + } + + return MurmurHash.finish(hash, lexerActions.length); + } + + /// Constructs an executor for a sequence of [LexerAction] actions. + /// @param lexerActions The lexer actions to execute. + LexerActionExecutor(this.lexerActions); + + /// Creates a [LexerActionExecutor] which executes the actions for + /// the input [lexerActionExecutor] followed by a specified + /// [lexerAction]. + /// + /// @param lexerActionExecutor The executor for actions already traversed by + /// the lexer while matching a token within a particular + /// [LexerATNConfig]. If this is null, the method behaves as + /// though it were an empty executor. + /// @param lexerAction The lexer action to execute after the actions + /// specified in [lexerActionExecutor]. + /// + /// @return A [LexerActionExecutor] for executing the combine actions + /// of [lexerActionExecutor] and [lexerAction]. + static LexerActionExecutor append( + LexerActionExecutor? lexerActionExecutor, + LexerAction lexerAction, + ) { + if (lexerActionExecutor == null) { + return LexerActionExecutor([lexerAction]); + } + + final lexerActions = + List.from(lexerActionExecutor.lexerActions); + lexerActions.add(lexerAction); + return LexerActionExecutor(lexerActions); + } + + /// Creates a [LexerActionExecutor] which encodes the current offset + /// for position-dependent lexer actions. + /// + ///

    Normally, when the executor encounters lexer actions where + /// {@link LexerAction#isPositionDependent} returns [true], it calls + /// {@link IntStream#seek} on the input [CharStream] to set the input + /// position to the end of the current token. This behavior provides + /// for efficient DFA representation of lexer actions which appear at the end + /// of a lexer rule, even when the lexer rule matches a variable number of + /// characters.

    + /// + ///

    Prior to traversing a match transition in the ATN, the current offset + /// from the token start index is assigned to all position-dependent lexer + /// actions which have not already been assigned a fixed offset. By storing + /// the offsets relative to the token start index, the DFA representation of + /// lexer actions which appear in the middle of tokens remains efficient due + /// to sharing among tokens of the same length, regardless of their absolute + /// position in the input stream.

    + /// + ///

    If the current executor already has offsets assigned to all + /// position-dependent lexer actions, the method returns [this].

    + /// + /// @param offset The current offset to assign to all position-dependent + /// lexer actions which do not already have offsets assigned. + /// + /// @return A [LexerActionExecutor] which stores input stream offsets + /// for all position-dependent lexer actions. + LexerActionExecutor fixOffsetBeforeMatch(int offset) { + List? updatedLexerActions; + for (var i = 0; i < lexerActions.length; i++) { + if (lexerActions[i].isPositionDependent && + lexerActions[i] is! LexerIndexedCustomAction) { + updatedLexerActions ??= List.from(lexerActions); + + updatedLexerActions[i] = + LexerIndexedCustomAction(offset, lexerActions[i]); + } + } + + if (updatedLexerActions == null) { + return this; + } + + return LexerActionExecutor(updatedLexerActions); + } + + /// Execute the actions encapsulated by this executor within the context of a + /// particular [Lexer]. + /// + ///

    This method calls {@link IntStream#seek} to set the position of the + /// [input] [CharStream] prior to calling + /// {@link LexerAction#execute} on a position-dependent action. Before the + /// method returns, the input position will be restored to the same position + /// it was in when the method was invoked.

    + /// + /// @param lexer The lexer instance. + /// @param input The input stream which is the source for the current token. + /// When this method is called, the current {@link IntStream#index} for + /// [input] should be the start of the following token, i.e. 1 + /// character past the end of the current token. + /// @param startIndex The token start index. This value may be passed to + /// {@link IntStream#seek} to set the [input] position to the beginning + /// of the token. + void execute(Lexer lexer, CharStream input, int startIndex) { + var requiresSeek = false; + final stopIndex = input.index; + try { + for (var lexerAction in lexerActions) { + if (lexerAction is LexerIndexedCustomAction) { + final offset = lexerAction.offset; + input.seek(startIndex + offset); + lexerAction = lexerAction.action; + requiresSeek = (startIndex + offset) != stopIndex; + } else if (lexerAction.isPositionDependent) { + input.seek(stopIndex); + requiresSeek = false; + } + + lexerAction.execute(lexer); + } + } finally { + if (requiresSeek) { + input.seek(stopIndex); + } + } + } + + @override + bool operator ==(Object other) { + if (identical(other, this)) { + return true; + } else if (other is! LexerActionExecutor) { + return false; + } + + return hashCode == other.hashCode && + ListEquality().equals( + lexerActions, + other.lexerActions, + ); + } +} diff --git a/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart new file mode 100644 index 0000000000..3801f727f3 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart @@ -0,0 +1,770 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../dfa/dfa.dart'; +import '../../error/error.dart'; +import '../../input_stream.dart'; +import '../../interval_set.dart'; +import '../../lexer.dart'; +import '../../prediction_context.dart'; +import '../../token.dart'; +import 'atn.dart'; +import 'atn_config.dart'; +import 'atn_config_set.dart'; +import 'atn_simulator.dart'; +import 'atn_state.dart'; +import 'lexer_action_executor.dart'; +import 'transition.dart'; + +/// When we hit an accept state in either the DFA or the ATN, we +/// have to notify the character stream to start buffering characters +/// via {@link IntStream#mark} and record the current state. The current sim state +/// includes the current index into the input, the current line, +/// and current character position in that line. Note that the Lexer is +/// tracking the starting line and characterization of the token. These +/// variables track the "state" of the simulator when it hits an accept state. +/// +///

    We track these variables separately for the DFA and ATN simulation +/// because the DFA simulation often has to fail over to the ATN +/// simulation. If the ATN simulation fails, we need the DFA to fall +/// back to its previously accepted state, if any. If the ATN succeeds, +/// then the ATN does the accept and the DFA simulator that invoked it +/// can simply return the predicted token type.

    +class SimState { + int index = -1; + int line = 0; + int charPos = -1; + + DFAState? dfaState; + + void reset() { + index = -1; + line = 0; + charPos = -1; + dfaState = null; + } +} + +/// "dup" of ParserInterpreter */ +class LexerATNSimulator extends ATNSimulator { + static const bool debug = bool.fromEnvironment( + 'ANTLR_LEXER_DEBUG', + defaultValue: false, + ); + static const bool dfa_debug = bool.fromEnvironment( + 'ANTLR_LEXER_DFA_DEBUG', + defaultValue: false, + ); + + static const int MIN_DFA_EDGE = 0; + static const int MAX_DFA_EDGE = 127; // forces unicode to stay in ATN + + final Lexer recog; + + /// The current token's starting index into the character stream. + /// Shared across DFA to ATN simulation in case the ATN fails and the + /// DFA did not have a previous accept state. In this case, we use the + /// ATN-generated exception object. + int startIndex = -1; + + /// line number 1..n within the input */ + int line = 1; + + /// The index of the character relative to the beginning of the line 0..n-1 */ + int charPositionInLine = 0; + + List decisionToDFA; + int mode = Lexer.DEFAULT_MODE; + + /// Used during DFA/ATN exec to record the most recent accept configuration info */ + + final SimState prevAccept = SimState(); + + LexerATNSimulator( + ATN atn, + this.decisionToDFA, + PredictionContextCache sharedContextCache, { + required this.recog, + }) : super(atn, sharedContextCache); + + void copyState(LexerATNSimulator simulator) { + charPositionInLine = simulator.charPositionInLine; + line = simulator.line; + mode = simulator.mode; + startIndex = simulator.startIndex; + } + + int match(CharStream input, int mode) { + this.mode = mode; + final mark = input.mark(); + try { + startIndex = input.index; + prevAccept.reset(); + final dfa = decisionToDFA[mode]; + if (dfa.s0 == null) { + return matchATN(input); + } else { + return execATN(input, dfa.s0!); + } + } finally { + input.release(mark); + } + } + + @override + void reset() { + prevAccept.reset(); + startIndex = -1; + line = 1; + charPositionInLine = 0; + mode = Lexer.DEFAULT_MODE; + } + + @override + void clearDFA() { + for (var d = 0; d < decisionToDFA.length; d++) { + decisionToDFA[d] = DFA(atn.getDecisionState(d)!, d); + } + } + + int matchATN(CharStream input) { + ATNState startState = atn.modeToStartState[mode]; + + if (debug) { + log('matchATN mode $mode start: $startState\n', level: Level.FINE.value); + } + + final old_mode = mode; + + final s0_closure = computeStartState(input, startState); + final suppressEdge = s0_closure.hasSemanticContext; + s0_closure.hasSemanticContext = false; + + final next = addDFAState(s0_closure); + if (!suppressEdge) { + decisionToDFA[mode].s0 = next; + } + + final predict = execATN(input, next); + + if (debug) { + log('DFA after matchATN: ${decisionToDFA[old_mode].toLexerString()}\n', + level: Level.FINE.value); + } + + return predict; + } + + int execATN(CharStream input, DFAState ds0) { + //log("enter exec index "+input.index()+" from "+ds0.configs, level: Level.FINE.value); + if (debug) { + log('start state closure=${ds0.configs}\n', level: Level.FINE.value); + } + + if (ds0.isAcceptState) { + // allow zero-length tokens + captureSimState(prevAccept, input, ds0); + } + + var t = input.LA(1)!; + + var s = ds0; // s is current/from DFA state + + while (true) { + // while more work + if (debug) { + log('execATN loop starting closure: ${s.configs}\n', + level: Level.FINE.value); + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=null, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=null, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set; there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + var target = getExistingTargetState(s, t); + target ??= computeTargetState(input, s, t); + + if (target == ATNSimulator.ERROR) { + break; + } + + // If this is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if (t != IntStream.EOF) { + consume(input); + } + + if (target.isAcceptState) { + captureSimState(prevAccept, input, target); + if (t == IntStream.EOF) { + break; + } + } + + t = input.LA(1)!; + s = target; // flip; current DFA target becomes new src/from state + } + + return failOrAccept(prevAccept, input, s.configs, t); + } + + /// Get an existing target state for an edge in the DFA. If the target state + /// for the edge has not yet been computed or is otherwise not available, + /// this method returns null. + /// + /// @param s The current DFA state + /// @param t The next input symbol + /// @return The existing target DFA state for the given input symbol + /// [t], or null if the target state for this edge is not + /// already cached + + DFAState? getExistingTargetState(DFAState s, int t) { + if (s.edges == null || t < MIN_DFA_EDGE || t > MAX_DFA_EDGE) { + return null; + } + + final target = s.edges![t - MIN_DFA_EDGE]; + if (debug && target != null) { + log('reuse state ${s.stateNumber} edge to ${target.stateNumber}', + level: Level.FINE.value); + } + + return target; + } + + /// Compute a target state for an edge in the DFA, and attempt to add the + /// computed state and corresponding edge to the DFA. + /// + /// @param input The input stream + /// @param s The current DFA state + /// @param t The next input symbol + /// + /// @return The computed target DFA state for the given input symbol + /// [t]. If [t] does not lead to a valid DFA state, this method + /// returns {@link #ERROR}. + + DFAState computeTargetState(CharStream input, DFAState s, int t) { + ATNConfigSet reach = OrderedATNConfigSet(); + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + getReachableConfigSet(input, s.configs, reach, t); + + if (reach.isEmpty) { + // we got nowhere on t from s + if (!reach.hasSemanticContext) { + // we got nowhere on t, don't throw out this knowledge; it'd + // cause a failover from DFA later. + addDFAEdge(s, t, ATNSimulator.ERROR); + } + + // stop when we can't match any more char + return ATNSimulator.ERROR; + } + + // Add an edge from s to target DFA found/created for reach + return addDFAEdgeByConfig(s, t, reach); + } + + int failOrAccept( + SimState prevAccept, + CharStream input, + ATNConfigSet reach, + int t, + ) { + if (prevAccept.dfaState != null) { + final lexerActionExecutor = prevAccept.dfaState!.lexerActionExecutor; + accept( + input, + lexerActionExecutor, + startIndex, + prevAccept.index, + prevAccept.line, + prevAccept.charPos, + ); + return prevAccept.dfaState!.prediction; + } else { + // if no accept and EOF is first char, return EOF + if (t == IntStream.EOF && input.index == startIndex) { + return Token.EOF; + } + + throw LexerNoViableAltException(recog, input, startIndex, reach); + } + } + + /// Given a starting configuration set, figure out all ATN configurations + /// we can reach upon input [t]. Parameter [reach] is a return + /// parameter. + void getReachableConfigSet( + CharStream input, + ATNConfigSet configs, + ATNConfigSet reach, + int t, + ) { + // this is used to skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + var skipAlt = ATN.INVALID_ALT_NUMBER; + for (var c in configs) { + final currentAltReachedAcceptState = c.alt == skipAlt; + if (currentAltReachedAcceptState && + (c as LexerATNConfig).hasPassedThroughNonGreedyDecision()) { + continue; + } + + if (debug) { + log('testing ${getTokenName(t)} at ${c.toString(recog, true)}\n', + level: Level.FINE.value); + } + + final n = c.state.numberOfTransitions; + for (var ti = 0; ti < n; ti++) { + // for each transition + final trans = c.state.transition(ti); + final target = getReachableTarget(trans, t); + if (target != null) { + var lexerActionExecutor = (c as LexerATNConfig).lexerActionExecutor; + if (lexerActionExecutor != null) { + lexerActionExecutor = lexerActionExecutor + .fixOffsetBeforeMatch(input.index - startIndex); + } + + final treatEofAsEpsilon = t == IntStream.EOF; + if (closure( + input, + LexerATNConfig.dup(c, target, + lexerActionExecutor: lexerActionExecutor), + reach, + currentAltReachedAcceptState, + true, + treatEofAsEpsilon)) { + // any remaining configs for this alt have a lower priority than + // the one that just reached an accept state. + skipAlt = c.alt; + break; + } + } + } + } + } + + void accept( + CharStream input, + LexerActionExecutor? lexerActionExecutor, + int startIndex, + int index, + int line, + int charPos, + ) { + if (debug) { + log('ACTION $lexerActionExecutor\n', level: Level.FINE.value); + } + + // seek to after last char in token + input.seek(index); + this.line = line; + charPositionInLine = charPos; + + if (lexerActionExecutor != null) { + lexerActionExecutor.execute(recog, input, startIndex); + } + } + + ATNState? getReachableTarget(Transition trans, int t) { + if (trans.matches(t, Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE)) { + return trans.target; + } + + return null; + } + + ATNConfigSet computeStartState(CharStream input, ATNState p) { + PredictionContext initialContext = EmptyPredictionContext.Instance; + ATNConfigSet configs = OrderedATNConfigSet(); + for (var i = 0; i < p.numberOfTransitions; i++) { + final target = p.transition(i).target; + final c = LexerATNConfig(target, i + 1, initialContext); + closure(input, c, configs, false, false, false); + } + return configs; + } + + /// Since the alternatives within any lexer decision are ordered by + /// preference, this method stops pursuing the closure as soon as an accept + /// state is reached. After the first accept state is reached by depth-first + /// search from [config], all other (potentially reachable) states for + /// this rule would have a lower priority. + /// + /// @return [true] if an accept state is reached, otherwise + /// [false]. + bool closure( + CharStream input, + LexerATNConfig config, + ATNConfigSet configs, + bool currentAltReachedAcceptState, + bool speculative, + bool treatEofAsEpsilon) { + if (debug) { + log('closure(' + config.toString(recog, true) + ')', + level: Level.FINE.value); + } + + if (config.state is RuleStopState) { + if (debug) { + log('closure at ${recog.ruleNames[config.state.ruleIndex]} rule stop $config\n', + level: Level.FINE.value); + } + + if (config.context == null || config.context!.hasEmptyPath()) { + if (config.context == null || config.context!.isEmpty) { + configs.add(config); + return true; + } else { + configs.add(LexerATNConfig.dup( + config, + config.state, + context: EmptyPredictionContext.Instance, + )); + currentAltReachedAcceptState = true; + } + } + + if (config.context != null && !config.context!.isEmpty) { + for (var i = 0; i < config.context!.length; i++) { + if (config.context!.getReturnState(i) != + PredictionContext.EMPTY_RETURN_STATE) { + final newContext = + config.context!.getParent(i); // "pop" return state + final returnState = atn.states[config.context!.getReturnState(i)]!; + final c = + LexerATNConfig.dup(config, returnState, context: newContext); + currentAltReachedAcceptState = closure( + input, + c, + configs, + currentAltReachedAcceptState, + speculative, + treatEofAsEpsilon, + ); + } + } + } + + return currentAltReachedAcceptState; + } + + // optimization + if (!config.state.onlyHasEpsilonTransitions()) { + if (!currentAltReachedAcceptState || + !config.hasPassedThroughNonGreedyDecision()) { + configs.add(config); + } + } + + final p = config.state; + for (var i = 0; i < p.numberOfTransitions; i++) { + final t = p.transition(i); + final c = getEpsilonTarget( + input, + config, + t, + configs, + speculative, + treatEofAsEpsilon, + ); + if (c != null) { + currentAltReachedAcceptState = closure(input, c, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon); + } + } + + return currentAltReachedAcceptState; + } + + // side-effect: can alter configs.hasSemanticContext + + LexerATNConfig? getEpsilonTarget( + CharStream input, + LexerATNConfig config, + Transition t, + ATNConfigSet configs, + bool speculative, + bool treatEofAsEpsilon, + ) { + LexerATNConfig? c; + switch (t.type) { + case TransitionType.RULE: + final ruleTransition = t as RuleTransition; + PredictionContext newContext = SingletonPredictionContext.create( + config.context!, + ruleTransition.followState.stateNumber, + ); + c = LexerATNConfig.dup(config, t.target, context: newContext); + break; + + case TransitionType.PRECEDENCE: + throw UnsupportedError( + 'Precedence predicates are not supported in lexers.'); + case TransitionType.PREDICATE: + /* Track traversing semantic predicates. If we traverse, + we cannot add a DFA state for this "reach" computation + because the DFA would not test the predicate again in the + future. Rather than creating collections of semantic predicates + like v3 and testing them on prediction, v4 will test them on the + fly all the time using the ATN not the DFA. This is slower but + semantically it's not used that often. One of the key elements to + this predicate mechanism is not adding DFA states that see + predicates immediately afterwards in the ATN. For example, + + a : ID {p1}? | ID {p2}? ; + + should create the start state for rule 'a' (to save start state + competition), but should not create target of ID state. The + collection of ATN states the following ID references includes + states reached by traversing predicates. Since this is when we + test them, we cannot cash the DFA state target of ID. + */ + final pt = t as PredicateTransition; + if (debug) { + log('EVAL rule ${pt.ruleIndex}:${pt.predIndex}', + level: Level.FINE.value); + } + configs.hasSemanticContext = true; + if (evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative)) { + c = LexerATNConfig.dup(config, t.target); + } + break; + case TransitionType.ACTION: + if (config.context == null || config.context!.hasEmptyPath()) { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty is false. In this case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + final lexerActionExecutor = LexerActionExecutor.append( + config.lexerActionExecutor, + atn.lexerActions![(t as ActionTransition).actionIndex], + ); + c = LexerATNConfig.dup(config, t.target, + lexerActionExecutor: lexerActionExecutor); + } else { + // ignore actions in referenced rules + c = LexerATNConfig.dup(config, t.target); + } + break; + + case TransitionType.EPSILON: + c = LexerATNConfig.dup(config, t.target); + break; + + case TransitionType.ATOM: + case TransitionType.RANGE: + case TransitionType.SET: + if (treatEofAsEpsilon) { + if (t.matches( + IntStream.EOF, Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE)) { + c = LexerATNConfig.dup(config, t.target); + break; + } + } + break; + case TransitionType.NOT_SET: + break; + case TransitionType.WILDCARD: + break; + case TransitionType.INVALID: + throw ArgumentError.value(t.type, 'TransitionType'); + } + + return c; + } + + /// Evaluate a predicate specified in the lexer. + /// + ///

    If [speculative] is [true], this method was called before + /// {@link #consume} for the matched character. This method should call + /// {@link #consume} before evaluating the predicate to ensure position + /// sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine}, + /// and {@link Lexer#getCharPositionInLine}, properly reflect the current + /// lexer state. This method should restore [input] and the simulator + /// to the original state before returning (i.e. undo the actions made by the + /// call to {@link #consume}.

    + /// + /// @param input The input stream. + /// @param ruleIndex The rule containing the predicate. + /// @param predIndex The index of the predicate within the rule. + /// @param speculative [true] if the current index in [input] is + /// one character before the predicate's location. + /// + /// @return [true] if the specified predicate evaluates to + /// [true]. + bool evaluatePredicate( + CharStream input, + int ruleIndex, + int predIndex, + bool speculative, + ) { + if (!speculative) { + return recog.sempred(null, ruleIndex, predIndex); + } + + final savedCharPositionInLine = charPositionInLine; + final savedLine = line; + final index = input.index; + final marker = input.mark(); + try { + consume(input); + return recog.sempred(null, ruleIndex, predIndex); + } finally { + charPositionInLine = savedCharPositionInLine; + line = savedLine; + input.seek(index); + input.release(marker); + } + } + + void captureSimState(SimState settings, CharStream input, DFAState dfaState) { + settings.index = input.index; + settings.line = line; + settings.charPos = charPositionInLine; + settings.dfaState = dfaState; + } + + DFAState addDFAEdgeByConfig(DFAState from, int t, ATNConfigSet q) { + /* leading to this call, ATNConfigSet.hasSemanticContext is used as a + * marker indicating dynamic predicate evaluation makes this edge + * dependent on the specific input sequence, so the static edge in the + * DFA should be omitted. The target DFAState is still created since + * execATN has the ability to resynchronize with the DFA state cache + * following the predicate evaluation step. + * + * TJP notes: next time through the DFA, we see a pred again and eval. + * If that gets us to a previously created (but dangling) DFA + * state, we can continue in pure DFA mode from there. + */ + final suppressEdge = q.hasSemanticContext; + q.hasSemanticContext = false; + + final to = addDFAState(q); + + if (suppressEdge) { + return to; + } + + addDFAEdge(from, t, to); + return to; + } + + void addDFAEdge(DFAState p, int t, DFAState q) { + if (t < MIN_DFA_EDGE || t > MAX_DFA_EDGE) { + // Only track edges within the DFA bounds + return; + } + + if (debug) { + log('EDGE $p -> $q upon ${String.fromCharCode(t)}', + level: Level.FINE.value); + } + + p.edges ??= List.filled(MAX_DFA_EDGE - MIN_DFA_EDGE + 1, null); + p.edges![t - MIN_DFA_EDGE] = q; // connect + } + + /// Add a new DFA state if there isn't one with this set of + /// configurations already. This method also detects the first + /// configuration containing an ATN rule stop state. Later, when + /// traversing the DFA, we will know which rule to accept. + DFAState addDFAState(ATNConfigSet configs) { + /* the lexer evaluates predicates on-the-fly; by this point configs + * should not contain any configurations with unevaluated predicates. + */ + assert(!configs.hasSemanticContext); + + final proposed = DFAState(configs: configs); + ATNConfig? firstConfigWithRuleStopState; + for (var c in configs) { + if (c.state is RuleStopState) { + firstConfigWithRuleStopState = c; + break; + } + } + + if (firstConfigWithRuleStopState != null) { + proposed.isAcceptState = true; + proposed.lexerActionExecutor = + (firstConfigWithRuleStopState as LexerATNConfig).lexerActionExecutor; + proposed.prediction = + atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; + } + + final dfa = decisionToDFA[mode]; + final existing = dfa.states[proposed]; + if (existing != null) return existing; + + final newState = proposed; + + newState.stateNumber = dfa.states.length; + configs.readOnly = true; + newState.configs = configs; + dfa.states[newState] = newState; + return newState; + } + + DFA getDFA(int mode) { + return decisionToDFA[mode]; + } + + /// Get the text matched so far for the current token. + + String getText(CharStream input) { + // index is first lookahead char, don't include. + return input.getText(Interval.of(startIndex, input.index - 1)); + } + + void consume(CharStream input) { + final curChar = input.LA(1); + if (curChar == 10) { + // Is new line + line++; + charPositionInLine = 0; + } else { + charPositionInLine++; + } + input.consume(); + } + + String getTokenName(int t) { + if (t == -1) return 'EOF'; + //if ( atn.g!=null ) return atn.g.getTokenDisplayName(t); + return "'${String.fromCharCode(t)}'"; + } +} diff --git a/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart new file mode 100644 index 0000000000..062ff6fd27 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart @@ -0,0 +1,2790 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:collection'; +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../dfa/dfa.dart'; +import '../../error/error.dart'; +import '../../input_stream.dart'; +import '../../interval_set.dart'; +import '../../misc/misc.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../prediction_context.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../token_stream.dart'; +import '../../util/bit_set.dart'; +import '../../util/murmur_hash.dart'; +import 'atn.dart'; +import 'atn_config.dart'; +import 'atn_config_set.dart'; +import 'atn_simulator.dart'; +import 'atn_state.dart'; +import 'semantic_context.dart'; +import 'transition.dart'; + +/// The embodiment of the adaptive LL(*), ALL(*), parsing strategy. +/// +///

    +/// The basic complexity of the adaptive strategy makes it harder to understand. +/// We begin with ATN simulation to build paths in a DFA. Subsequent prediction +/// requests go through the DFA first. If they reach a state without an edge for +/// the current symbol, the algorithm fails over to the ATN simulation to +/// complete the DFA path for the current input (until it finds a conflict state +/// or uniquely predicting state).

    +/// +///

    +/// All of that is done without using the outer context because we want to create +/// a DFA that is not dependent upon the rule invocation stack when we do a +/// prediction. One DFA works in all contexts. We avoid using context not +/// necessarily because it's slower, although it can be, but because of the DFA +/// caching problem. The closure routine only considers the rule invocation stack +/// created during prediction beginning in the decision rule. For example, if +/// prediction occurs without invoking another rule's ATN, there are no context +/// stacks in the configurations. When lack of context leads to a conflict, we +/// don't know if it's an ambiguity or a weakness in the strong LL(*) parsing +/// strategy (versus full LL(*)).

    +/// +///

    +/// When SLL yields a configuration set with conflict, we rewind the input and +/// retry the ATN simulation, this time using full outer context without adding +/// to the DFA. Configuration context stacks will be the full invocation stacks +/// from the start rule. If we get a conflict using full context, then we can +/// definitively say we have a true ambiguity for that input sequence. If we +/// don't get a conflict, it implies that the decision is sensitive to the outer +/// context. (It is not context-sensitive in the sense of context-sensitive +/// grammars.)

    +/// +///

    +/// The next time we reach this DFA state with an SLL conflict, through DFA +/// simulation, we will again retry the ATN simulation using full context mode. +/// This is slow because we can't save the results and have to "interpret" the +/// ATN each time we get that input.

    +/// +///

    +/// CACHING FULL CONTEXT PREDICTIONS

    +/// +///

    +/// We could cache results from full context to predicted alternative easily and +/// that saves a lot of time but doesn't work in presence of predicates. The set +/// of visible predicates from the ATN start state changes depending on the +/// context, because closure can fall off the end of a rule. I tried to cache +/// tuples (stack context, semantic context, predicted alt) but it was slower +/// than interpreting and much more complicated. Also required a huge amount of +/// memory. The goal is not to create the world's fastest parser anyway. I'd like +/// to keep this algorithm simple. By launching multiple threads, we can improve +/// the speed of parsing across a large number of files.

    +/// +///

    +/// There is no strict ordering between the amount of input used by SLL vs LL, +/// which makes it really hard to build a cache for full context. Let's say that +/// we have input A B C that leads to an SLL conflict with full context X. That +/// implies that using X we might only use A B but we could also use A B C D to +/// resolve conflict. Input A B C D could predict alternative 1 in one position +/// in the input and A B C E could predict alternative 2 in another position in +/// input. The conflicting SLL configurations could still be non-unique in the +/// full context prediction, which would lead us to requiring more input than the +/// original A B C. To make a prediction cache work, we have to track the exact +/// input used during the previous prediction. That amounts to a cache that maps +/// X to a specific DFA for that context.

    +/// +///

    +/// Something should be done for left-recursive expression predictions. They are +/// likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry +/// with full LL thing Sam does.

    +/// +///

    +/// AVOIDING FULL CONTEXT PREDICTION

    +/// +///

    +/// We avoid doing full context retry when the outer context is empty, we did not +/// dip into the outer context by falling off the end of the decision state rule, +/// or when we force SLL mode.

    +/// +///

    +/// As an example of the not dip into outer context case, consider as super +/// constructor calls versus function calls. One grammar might look like +/// this:

    +/// +///
    +/// ctorBody
    +///   : '{' superCall? stat* '}'
    +///   ;
    +/// 
    +/// +///

    +/// Or, you might see something like

    +/// +///
    +/// stat
    +///   : superCall ';'
    +///   | expression ';'
    +///   | ...
    +///   ;
    +/// 
    +/// +///

    +/// In both cases I believe that no closure operations will dip into the outer +/// context. In the first case ctorBody in the worst case will stop at the '}'. +/// In the 2nd case it should stop at the ';'. Both cases should stay within the +/// entry rule and not dip into the outer context.

    +/// +///

    +/// PREDICATES

    +/// +///

    +/// Predicates are always evaluated if present in either SLL or LL both. SLL and +/// LL simulation deals with predicates differently. SLL collects predicates as +/// it performs closure operations like ANTLR v3 did. It delays predicate +/// evaluation until it reaches and accept state. This allows us to cache the SLL +/// ATN simulation whereas, if we had evaluated predicates on-the-fly during +/// closure, the DFA state configuration sets would be different and we couldn't +/// build up a suitable DFA.

    +/// +///

    +/// When building a DFA accept state during ATN simulation, we evaluate any +/// predicates and return the sole semantically valid alternative. If there is +/// more than 1 alternative, we report an ambiguity. If there are 0 alternatives, +/// we throw an exception. Alternatives without predicates act like they have +/// true predicates. The simple way to think about it is to strip away all +/// alternatives with false predicates and choose the minimum alternative that +/// remains.

    +/// +///

    +/// When we start in the DFA and reach an accept state that's predicated, we test +/// those and return the minimum semantically viable alternative. If no +/// alternatives are viable, we throw an exception.

    +/// +///

    +/// During full LL ATN simulation, closure always evaluates predicates and +/// on-the-fly. This is crucial to reducing the configuration set size during +/// closure. It hits a landmine when parsing with the Java grammar, for example, +/// without this on-the-fly evaluation.

    +/// +///

    +/// SHARING DFA

    +/// +///

    +/// All instances of the same parser share the same decision DFAs through a +/// static field. Each instance gets its own ATN simulator but they share the +/// same {@link #decisionToDFA} field. They also share a +/// [PredictionContextCache] object that makes sure that all +/// [PredictionContext] objects are shared among the DFA states. This makes +/// a big size difference.

    +/// +///

    +/// THREAD SAFETY

    +/// +///

    +/// The [ParserATNSimulator] locks on the {@link #decisionToDFA} field when +/// it adds a new DFA object to that array. {@link #addDFAEdge} +/// locks on the DFA for the current decision when setting the +/// {@link DFAState#edges} field. {@link #addDFAState} locks on +/// the DFA for the current decision when looking up a DFA state to see if it +/// already exists. We must make sure that all requests to add DFA states that +/// are equivalent result in the same shared DFA object. This is because lots of +/// threads will be trying to update the DFA at once. The +/// {@link #addDFAState} method also locks inside the DFA lock +/// but this time on the shared context cache when it rebuilds the +/// configurations' [PredictionContext] objects using cached +/// subgraphs/nodes. No other locking occurs, even during DFA simulation. This is +/// safe as long as we can guarantee that all threads referencing +/// {@code s.edge[t]} get the same physical target [DFAState], or +/// null. Once into the DFA, the DFA simulation does not reference the +/// {@link DFA#states} map. It follows the {@link DFAState#edges} field to new +/// targets. The DFA simulator will either find {@link DFAState#edges} to be +/// null, to be non-null and {@code dfa.edges[t]} null, or +/// {@code dfa.edges[t]} to be non-null. The +/// {@link #addDFAEdge} method could be racing to set the field +/// but in either case the DFA simulator works; if null, and requests ATN +/// simulation. It could also race trying to get {@code dfa.edges[t]}, but either +/// way it will work because it's not doing a test and set operation.

    +/// +///

    +/// Starting with SLL then failing to combined SLL/LL (Two-Stage +/// Parsing)

    +/// +///

    +/// Sam pointed out that if SLL does not give a syntax error, then there is no +/// point in doing full LL, which is slower. We only have to try LL if we get a +/// syntax error. For maximum speed, Sam starts the parser set to pure SLL +/// mode with the [BailErrorStrategy]:

    +/// +///
    +/// parser.{@link Parser#interpreter interpreter}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
    +/// parser.{@link Parser#setErrorHandler setErrorHandler}(new [BailErrorStrategy]());
    +/// 
    +/// +///

    +/// If it does not get a syntax error, then we're done. If it does get a syntax +/// error, we need to retry with the combined SLL/LL strategy.

    +/// +///

    +/// The reason this works is as follows. If there are no SLL conflicts, then the +/// grammar is SLL (at least for that input set). If there is an SLL conflict, +/// the full LL analysis must yield a set of viable alternatives which is a +/// subset of the alternatives reported by SLL. If the LL set is a singleton, +/// then the grammar is LL but not SLL. If the LL set is the same size as the SLL +/// set, the decision is SLL. If the LL set has size > 1, then that decision +/// is truly ambiguous on the current input. If the LL set is smaller, then the +/// SLL conflict resolution might choose an alternative that the full LL would +/// rule out as a possibility based upon better context information. If that's +/// the case, then the SLL parse will definitely get an error because the full LL +/// analysis says it's not viable. If SLL conflict resolution chooses an +/// alternative within the LL set, them both SLL and LL would choose the same +/// alternative because they both choose the minimum of multiple conflicting +/// alternatives.

    +/// +///

    +/// Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and +/// a smaller LL set called s. If s is {@code {2, 3}}, then SLL +/// parsing will get an error because SLL will pursue alternative 1. If +/// s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will +/// choose the same alternative because alternative one is the minimum of either +/// set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax +/// error. If s is {@code {1}} then SLL will succeed.

    +/// +///

    +/// Of course, if the input is invalid, then we will get an error for sure in +/// both SLL and LL parsing. Erroneous input will therefore require 2 passes over +/// the input.

    +class ParserATNSimulator extends ATNSimulator { + static const bool debug = bool.fromEnvironment( + 'ANTLR_PARSER_DEBUG', + defaultValue: false, + ); + static const bool trace_atn_sim = bool.fromEnvironment( + 'ANTLR_PARSER_LIST_ATN_DECISIONS_DEBUG', + defaultValue: false, + ); + static const bool dfa_debug = bool.fromEnvironment( + 'ANTLR_PARSER_DFA_DEBUG', + defaultValue: false, + ); + static const bool retry_debug = bool.fromEnvironment( + 'ANTLR_PARSER_RETRY_DEBUG', + defaultValue: false, + ); + + /// Just in case this optimization is bad, add an ENV variable to turn it off */ + static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = + bool.fromEnvironment('TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT'); + + final Parser parser; + + final List decisionToDFA; + + /// SLL, LL, or LL + exact ambig detection? */ + + PredictionMode predictionMode = PredictionMode.LL; + + /// Each prediction operation uses a cache for merge of prediction contexts. + /// Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + /// isn't synchronized but we're ok since two threads shouldn't reuse same + /// parser/atnsim object because it can only handle one input at a time. + /// This maps graphs a and b to merged result c. (a,b)→c. We can avoid + /// the merge if we ever see a and b again. Note that (b,a)→c should + /// also be examined during cache lookup. + Map, PredictionContext>? + mergeCache; + + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + late TokenStream input; + int startIndex = 0; + ParserRuleContext? _outerContext; + DFA? _dfa; + + ParserATNSimulator( + this.parser, + ATN atn, + this.decisionToDFA, + PredictionContextCache? sharedContextCache, + ) : super(atn, sharedContextCache); + + @override + void reset() {} + + @override + void clearDFA() { + for (var d = 0; d < decisionToDFA.length; d++) { + decisionToDFA[d] = DFA(atn.getDecisionState(d), d); + } + } + + int adaptivePredict( + TokenStream input_, + int decision, + ParserRuleContext? outerContext, + ) { + if (debug || trace_atn_sim) { + log('adaptivePredict decision $decision' ' exec LA(1)==' + + getLookaheadName(input_) + + ' line ${input_.LT(1)!.line}:${input_.LT(1)!.charPositionInLine}'); + } + + input = input_; + startIndex = input_.index; + _outerContext = outerContext; + final dfa = decisionToDFA[decision]; + _dfa = dfa; + + final m = input_.mark(); + final index = startIndex; + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + try { + DFAState? s0; + if (dfa.isPrecedenceDfa()) { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(parser.precedence); + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0; + } + + if (s0 == null) { + outerContext ??= ParserRuleContext.EMPTY; + if (debug || trace_atn_sim) { + log('predictATN decision ${dfa.decision}' ' exec LA(1)==' + + getLookaheadName(input_) + + ', outerContext=' + + outerContext.toString(recog: parser)); + } + + final fullCtx = false; + var s0_closure = computeStartState( + dfa.atnStartState!, + ParserRuleContext.EMPTY, + fullCtx, + ); + + if (dfa.isPrecedenceDfa()) { + /* If this is a precedence DFA, we use applyPrecedenceFilter + * to convert the computed start state to a precedence start + * state. We then use DFA.setPrecedenceStartState to set the + * appropriate start state for the precedence level rather + * than simply setting DFA.s0. + */ + // not used for prediction but useful to know start configs anyway + dfa.s0!.configs = s0_closure; + s0_closure = applyPrecedenceFilter(s0_closure); + s0 = addDFAState(dfa, DFAState(configs: s0_closure)); + dfa.setPrecedenceStartState(parser.precedence, s0); + } else { + s0 = addDFAState(dfa, DFAState(configs: s0_closure)); + dfa.s0 = s0; + } + } + + final alt = execATN(dfa, s0, input_, index, outerContext!); + if (debug) { + log('DFA after predictATN: ' + dfa.toString(parser.vocabulary)); + } + return alt; + } finally { + mergeCache = null; // wack cache after each prediction + _dfa = null; + input_.seek(index); + input_.release(m); + } + } + + /// Performs ATN simulation to compute a predicted alternative based + /// upon the remaining input, but also updates the DFA cache to avoid + /// having to traverse the ATN again for the same input sequence. + /// + /// There are some key conditions we're looking for after computing a new + /// set of ATN configs (proposed DFA state): + /// if the set is empty, there is no viable alternative for current symbol + /// does the state uniquely predict an alternative? + /// does the state have a conflict that would prevent us from + /// putting it on the work list? + /// + /// We also have some key operations to do: + /// add an edge from previous DFA state to potentially new DFA state, D, + /// upon current symbol but only if adding to work list, which means in all + /// cases except no viable alternative (and possibly non-greedy decisions?) + /// collecting predicates and adding semantic context to DFA accept states + /// adding rule context to context-sensitive DFA accept states + /// consuming an input symbol + /// reporting a conflict + /// reporting an ambiguity + /// reporting a context sensitivity + /// reporting insufficient predicates + /// + /// cover these cases: + /// dead end + /// single alt + /// single alt + preds + /// conflict + /// conflict + preds + /// + int execATN(DFA dfa, DFAState s0, TokenStream input, int startIndex, + ParserRuleContext outerContext) { + if (debug || trace_atn_sim) { + log('execATN decision ${dfa.decision}' ' exec LA(1)==' + + getLookaheadName(input) + + ' line ${input.LT(1)!.line}' + + ':${input.LT(1)!.charPositionInLine}'); + } + + var previousD = s0; + + if (debug) log('s0 = $s0'); + + var t = input.LA(1)!; + + while (true) { + // while more work + var D = getExistingTargetState(previousD, t); + D ??= computeTargetState(dfa, previousD, t); + D = D as DFAState; + if (D == ATNSimulator.ERROR) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + final e = noViableAlt( + input, + outerContext, + previousD.configs, + startIndex, + ); + input.seek(startIndex); + final alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule( + previousD.configs, outerContext); + if (alt != ATN.INVALID_ALT_NUMBER) { + return alt; + } + throw e; + } + + if (D.requiresFullContext && predictionMode != PredictionMode.SLL) { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + var conflictingAlts = D.configs.conflictingAlts; + if (D.predicates != null) { + if (debug) log('DFA state has preds in DFA sim LL failover'); + final conflictIndex = input.index; + if (conflictIndex != startIndex) { + input.seek(startIndex); + } + + conflictingAlts = + evalSemanticContext(D.predicates!, outerContext, true); + if (conflictingAlts.cardinality == 1) { + if (debug) log('Full LL avoided'); + return conflictingAlts.nextset(0); + } + + if (conflictIndex != startIndex) { + // restore the index so reporting the fallback to full + // context occurs with the index at the correct spot + input.seek(conflictIndex); + } + } + + if (dfa_debug) log('ctx sensitive state $outerContext in $D'); + final fullCtx = true; + final s0_closure = computeStartState( + dfa.atnStartState!, + outerContext, + fullCtx, + ); + reportAttemptingFullContext( + dfa, + conflictingAlts, + D.configs, + startIndex, + input.index, + ); + final alt = execATNWithFullContext( + dfa, + D, + s0_closure, + input, + startIndex, + outerContext, + ); + return alt; + } + + if (D.isAcceptState) { + if (D.predicates == null) { + return D.prediction; + } + + final stopIndex = input.index; + input.seek(startIndex); + final alts = evalSemanticContext(D.predicates!, outerContext, true); + switch (alts.cardinality) { + case 0: + throw noViableAlt(input, outerContext, D.configs, startIndex); + + case 1: + return alts.nextset(0); + + default: + // report ambiguity after predicate evaluation to make sure the correct + // set of ambig alts is reported. + reportAmbiguity( + dfa, + D, + startIndex, + stopIndex, + false, + alts, + D.configs, + ); + return alts.nextset(0); + } + } + + previousD = D; + + if (t != IntStream.EOF) { + input.consume(); + t = input.LA(1)!; + } + } + } + + /// Get an existing target state for an edge in the DFA. If the target state + /// for the edge has not yet been computed or is otherwise not available, + /// this method returns null. + /// + /// @param previousD The current DFA state + /// @param t The next input symbol + /// @return The existing target DFA state for the given input symbol + /// [t], or null if the target state for this edge is not + /// already cached + DFAState? getExistingTargetState(DFAState previousD, int t) { + final edges = previousD.edges; + if (edges == null || t + 1 < 0 || t + 1 >= edges.length) { + return null; + } + + return edges[t + 1]; + } + + /// Compute a target state for an edge in the DFA, and attempt to add the + /// computed state and corresponding edge to the DFA. + /// + /// @param dfa The DFA + /// @param previousD The current DFA state + /// @param t The next input symbol + /// + /// @return The computed target DFA state for the given input symbol + /// [t]. If [t] does not lead to a valid DFA state, this method + /// returns {@link #ERROR}. + DFAState? computeTargetState(DFA dfa, DFAState previousD, int t) { + final reach = computeReachSet(previousD.configs, t, false); + if (reach == null) { + addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); + return ATNSimulator.ERROR; + } + + // create new target state; we'll add to DFA after it's complete + DFAState? D = DFAState(configs: reach); + + final predictedAlt = getUniqueAlt(reach); + + if (debug) { + final altSubSets = + PredictionModeExtension.getConflictingAltSubsets(reach); + log('SLL altSubSets=$altSubSets' + ', configs=$reach' + ', predict=$predictedAlt, allSubsetsConflict=${PredictionModeExtension.allSubsetsConflict(altSubSets)}, conflictingAlts=${getConflictingAlts(reach)}'); + } + + if (predictedAlt != ATN.INVALID_ALT_NUMBER) { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true; + D.configs.uniqueAlt = predictedAlt; + D.prediction = predictedAlt; + } else if (PredictionModeExtension.hasSLLConflictTerminatingPrediction( + predictionMode, reach)) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = getConflictingAlts(reach); + D.requiresFullContext = true; + // in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = true; + D.prediction = D.configs.conflictingAlts!.nextset(0); + } + + if (D.isAcceptState && D.configs.hasSemanticContext) { + predicateDFAState(D, atn.getDecisionState(dfa.decision)); + if (D.predicates != null) { + D.prediction = ATN.INVALID_ALT_NUMBER; + } + } + + // all adds to dfa are done after we've created full D state + D = addDFAEdge(dfa, previousD, t, D); + return D; + } + + void predicateDFAState(DFAState dfaState, DecisionState? decisionState) { + // Todo: this if was added due to a possible null pointer error + if (decisionState == null) return; + + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + final nalts = decisionState.numberOfTransitions; + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + final altsToCollectPredsFrom = getConflictingAltsOrUniqueAlt( + dfaState.configs, + ); + final altToPred = getPredsForAmbigAlts( + altsToCollectPredsFrom, + dfaState.configs, + nalts, + ); + if (altToPred != null) { + dfaState.predicates = + getPredicatePredictions(altsToCollectPredsFrom, altToPred); + dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.prediction = altsToCollectPredsFrom.nextset(0); + } + } + + // comes back with reach.uniqueAlt set to a valid alt + int execATNWithFullContext( + DFA dfa, + DFAState D, // how far we got in SLL DFA before failing over + ATNConfigSet s0, + TokenStream input, + int startIndex, + ParserRuleContext outerContext) { + if (debug || trace_atn_sim) { + log('execATNWithFullContext $s0'); + } + final fullCtx = true; + var foundExactAmbig = false; + ATNConfigSet? reach; + var previous = s0; + input.seek(startIndex); + var t = input.LA(1)!; + int predictedAlt; + while (true) { + // while more work +// log("LL REACH "+getLookaheadName(input)+ +// " from configs.size="+previous.length+ +// " line "+input.LT(1).getLine()+":"+input.LT(1).getCharPositionInLine()); + reach = computeReachSet(previous, t, fullCtx); + if (reach == null) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + final e = noViableAlt(input, outerContext, previous, startIndex); + input.seek(startIndex); + final alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule( + previous, + outerContext, + ); + if (alt != ATN.INVALID_ALT_NUMBER) { + return alt; + } + throw e; + } + + final altSubSets = + PredictionModeExtension.getConflictingAltSubsets(reach); + if (debug) { + log('LL altSubSets=$altSubSets' + ', predict=${PredictionModeExtension.getUniqueAlt(altSubSets)}' + ', resolvesToJustOneViableAlt=${PredictionModeExtension.resolvesToJustOneViableAlt(altSubSets)}'); + } + +// log("altSubSets: "+altSubSets); +// log("reach="+reach+", "+reach.conflictingAlts, level: Level.SEVERE.value); + reach.uniqueAlt = getUniqueAlt(reach); + // unique prediction? + if (reach.uniqueAlt != ATN.INVALID_ALT_NUMBER) { + predictedAlt = reach.uniqueAlt; + break; + } + if (predictionMode != PredictionMode.LL_EXACT_AMBIG_DETECTION) { + predictedAlt = + PredictionModeExtension.resolvesToJustOneViableAlt(altSubSets); + if (predictedAlt != ATN.INVALID_ALT_NUMBER) { + break; + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if (PredictionModeExtension.allSubsetsConflict(altSubSets) && + PredictionModeExtension.allSubsetsEqual(altSubSets)) { + foundExactAmbig = true; + predictedAlt = PredictionModeExtension.getSingleViableAlt(altSubSets); + break; + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + + previous = reach; + if (t != IntStream.EOF) { + input.consume(); + t = input.LA(1)!; + } + } + + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if (reach.uniqueAlt != ATN.INVALID_ALT_NUMBER) { + reportContextSensitivity( + dfa, predictedAlt, reach, startIndex, input.index); + return predictedAlt; + } + + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + /* + In non-exact ambiguity detection mode, we might actually be able to + detect an exact ambiguity, but I'm not going to spend the cycles + needed to check. We only emit ambiguity warnings in exact ambiguity + mode. + + For example, we might know that we have conflicting configurations. + But, that does not mean that there is no way forward without a + conflict. It's possible to have nonconflicting alt subsets as in: + + LL altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + from + + [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + + In this case, (17,1,[5 $]) indicates there is some next sequence that + would resolve this without conflict to alternative 1. Any other viable + next sequence, however, is associated with a conflict. We stop + looking for input because no amount of further lookahead will alter + the fact that we should predict alternative 1. We just can't say for + sure that there is an ambiguity without looking further. + */ + reportAmbiguity( + dfa, D, startIndex, input.index, foundExactAmbig, reach.alts, reach); + + return predictedAlt; + } + + ATNConfigSet? computeReachSet(ATNConfigSet config, int t, bool fullCtx) { + if (debug) log('in computeReachSet, starting closure: $config'); + + mergeCache ??= {}; + + final intermediate = ATNConfigSet(fullCtx); + + /* Configurations already in a rule stop state indicate reaching the end + * of the decision rule (local context) or end of the start rule (full + * context). Once reached, these configurations are never updated by a + * closure operation, so they are handled separately for the performance + * advantage of having a smaller intermediate set when calling closure. + * + * For full-context reach operations, separate handling is required to + * ensure that the alternative matching the longest overall sequence is + * chosen when multiple such configurations can match the input. + */ + List? skippedStopStates; + + // First figure out where we can reach on input t + for (var c in config) { + if (debug) log('testing ' + getTokenName(t) + ' at ' + c.toString()); + + if (c.state is RuleStopState) { + assert(c.context!.isEmpty); + if (fullCtx || t == IntStream.EOF) { + skippedStopStates ??= []; + + skippedStopStates.add(c); + } + + continue; + } + + final n = c.state.numberOfTransitions; + for (var ti = 0; ti < n; ti++) { + // for each transition + final trans = c.state.transition(ti); + final target = getReachableTarget(trans, t); + if (target != null) { + intermediate.add(ATNConfig.dup(c, state: target), mergeCache); + } + } + } + + // Now figure out where the reach operation can take us... + + ATNConfigSet? reach; + + /* This block optimizes the reach operation for intermediate sets which + * trivially indicate a termination state for the overall + * adaptivePredict operation. + * + * The conditions assume that intermediate + * contains all configurations relevant to the reach set, but this + * condition is not true when one or more configurations have been + * withheld in skippedStopStates, or when the current symbol is EOF. + */ + if (skippedStopStates == null && t != Token.EOF) { + if (intermediate.length == 1) { + // Don't pursue the closure if there is just one state. + // It can only have one alternative; just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate; + } else if (getUniqueAlt(intermediate) != ATN.INVALID_ALT_NUMBER) { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate; + } + } + + /* If the reach set could not be trivially determined, perform a closure + * operation on the intermediate set to compute its initial value. + */ + if (reach == null) { + reach = ATNConfigSet(fullCtx); + final closureBusy = {}; + final treatEofAsEpsilon = t == Token.EOF; + for (var c in intermediate) { + closure(c, reach, closureBusy, false, fullCtx, treatEofAsEpsilon); + } + } + + if (t == IntStream.EOF) { + /* After consuming EOF no additional input is possible, so we are + * only interested in configurations which reached the end of the + * decision rule (local context) or end of the start rule (full + * context). Update reach to contain only these configurations. This + * handles both explicit EOF transitions in the grammar and implicit + * EOF transitions following the end of the decision or start rule. + * + * When reach==intermediate, no closure operation was performed. In + * this case, removeAllConfigsNotInRuleStopState needs to check for + * reachable rule stop states as well as configurations already in + * a rule stop state. + * + * This is handled before the configurations in skippedStopStates, + * because any configurations potentially added from that list are + * already guaranteed to meet this condition whether or not it's + * required. + */ + reach = removeAllConfigsNotInRuleStopState(reach, reach == intermediate); + } + + /* If skippedStopStates is not null, then it contains at least one + * configuration. For full-context reach operations, these + * configurations reached the end of the start rule, in which case we + * only add them back to reach if no configuration during the current + * closure operation reached such a state. This ensures adaptivePredict + * chooses an alternative matching the longest overall sequence when + * multiple alternatives are viable. + */ + if (skippedStopStates != null && + (!fullCtx || + !PredictionModeExtension.hasConfigInRuleStopState(reach))) { + assert(skippedStopStates.isNotEmpty); + for (var c in skippedStopStates) { + reach.add(c, mergeCache); + } + } + + if (reach.isEmpty) return null; + return reach; + } + + /// Return a configuration set containing only the configurations from + /// [configs] which are in a [RuleStopState]. If all + /// configurations in [configs] are already in a rule stop state, this + /// method simply returns [configs]. + /// + ///

    When [lookToEndOfRule] is true, this method uses + /// {@link ATN#nextTokens} for each configuration in [configs] which is + /// not already in a rule stop state to see if a rule stop state is reachable + /// from the configuration via epsilon-only transitions.

    + /// + /// @param configs the configuration set to update + /// @param lookToEndOfRule when true, this method checks for rule stop states + /// reachable by epsilon-only transitions from each configuration in + /// [configs]. + /// + /// @return [configs] if all configurations in [configs] are in a + /// rule stop state, otherwise return a new configuration set containing only + /// the configurations from [configs] which are in a rule stop state + ATNConfigSet removeAllConfigsNotInRuleStopState( + ATNConfigSet configs, bool lookToEndOfRule) { + if (PredictionModeExtension.allConfigsInRuleStopStates(configs)) { + return configs; + } + + final result = ATNConfigSet(configs.fullCtx); + for (var config in configs) { + if (config.state is RuleStopState) { + result.add(config, mergeCache); + continue; + } + + if (lookToEndOfRule && config.state.onlyHasEpsilonTransitions()) { + final nextTokens = atn.nextTokens(config.state); + if (nextTokens.contains(Token.EPSILON)) { + ATNState endOfRuleState = atn.ruleToStopState[config.state.ruleIndex]; + result.add(ATNConfig.dup(config, state: endOfRuleState), mergeCache); + } + } + } + + return result; + } + + ATNConfigSet computeStartState(ATNState p, RuleContext ctx, bool fullCtx) { + // always at least the implicit call to start rule + final initialContext = PredictionContext.fromRuleContext(atn, ctx); + final configs = ATNConfigSet(fullCtx); + + for (var i = 0; i < p.numberOfTransitions; i++) { + final target = p.transition(i).target; + final c = ATNConfig(target, i + 1, initialContext); + final closureBusy = {}; + closure(c, configs, closureBusy, true, fullCtx, false); + } + + return configs; + } + + /* parrt internal source braindump that doesn't mess up + * external API spec. + context-sensitive in that they can only be properly evaluated + in the context of the proper prec argument. Without pruning, + these predicates are normal predicates evaluated when we reach + conflict state (or unique prediction). As we cannot evaluate + these predicates out of context, the resulting conflict leads + to full LL evaluation and nonlinear prediction which shows up + very clearly with fairly large expressions. + + Example grammar: + + e : e '*' e + | e '+' e + | INT + ; + + We convert that to the following: + + e[int prec] + : INT + ( {3>=prec}? '*' e[4] + | {2>=prec}? '+' e[3] + )* + ; + + The (..)* loop has a decision for the inner block as well as + an enter or exit decision, which is what concerns us here. At + the 1st + of input 1+2+3, the loop entry sees both predicates + and the loop exit also sees both predicates by falling off the + edge of e. This is because we have no stack information with + SLL and find the follow of e, which will hit the return states + inside the loop after e[4] and e[3], which brings it back to + the enter or exit decision. In this case, we know that we + cannot evaluate those predicates because we have fallen off + the edge of the stack and will in general not know which prec + parameter is the right one to use in the predicate. + + Because we have special information, that these are precedence + predicates, we can resolve them without failing over to full + LL despite their context sensitive nature. We make an + assumption that prec[-1] <= prec[0], meaning that the current + precedence level is greater than or equal to the precedence + level of recursive invocations above us in the stack. For + example, if predicate {3>=prec}? is true of the current prec, + then one option is to enter the loop to match it now. The + other option is to exit the loop and the left recursive rule + to match the current operator in rule invocation further up + the stack. But, we know that all of those prec are lower or + the same value and so we can decide to enter the loop instead + of matching it later. That means we can strip out the other + configuration for the exit branch. + + So imagine we have (14,1,$,{2>=prec}?) and then + (14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization + allows us to collapse these two configurations. We know that + if {2>=prec}? is true for the current prec parameter, it will + also be true for any prec from an invoking e call, indicated + by dipsIntoOuterContext. As the predicates are both true, we + have the option to evaluate them early in the decision start + state. We do this by stripping both predicates and choosing to + enter the loop as it is consistent with the notion of operator + precedence. It's also how the full LL conflict resolution + would work. + + The solution requires a different DFA start state for each + precedence level. + + The basic filter mechanism is to remove configurations of the + form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In + other words, for the same ATN state and predicate context, + remove any configuration associated with an exit branch if + there is a configuration associated with the enter branch. + + It's also the case that the filter evaluates precedence + predicates and resolves conflicts according to precedence + levels. For example, for input 1+2+3 at the first +, we see + prediction filtering + + [(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1), + (11,2,[$],up=1), (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext + + to + + [(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext + + This filters because {3>=prec}? evals to true and collapses + (11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict + resolution based upon rules of operator precedence fits with + our usual match first alt upon conflict. + + We noticed a problem where a recursive call resets precedence + to 0. Sam's fix: each config has flag indicating if it has + returned from an expr[0] call. then just don't filter any + config with that flag set. flag is carried along in + closure(). so to avoid adding field, set bit just under sign + bit of dipsIntoOuterContext (SUPPRESS_PRECEDENCE_FILTER). + With the change you filter "unless (p, 2, pi) was reached + after leaving the rule stop state of the LR rule containing + state p, corresponding to a rule invocation with precedence + level 0" + */ + + /// This method transforms the start state computed by + /// {@link #computeStartState} to the special start state used by a + /// precedence DFA for a particular precedence value. The transformation + /// process applies the following changes to the start state's configuration + /// set. + /// + ///
      + ///
    1. Evaluate the precedence predicates for each configuration using + /// {@link SemanticContext#evalPrecedence}.
    2. + ///
    3. When {@link ATNConfig#isPrecedenceFilterSuppressed} is [false], + /// remove all configurations which predict an alternative greater than 1, + /// for which another configuration that predicts alternative 1 is in the + /// same ATN state with the same prediction context. This transformation is + /// valid for the following reasons: + ///
        + ///
      • The closure block cannot contain any epsilon transitions which bypass + /// the body of the closure, so all states reachable via alternative 1 are + /// part of the precedence alternatives of the transformed left-recursive + /// rule.
      • + ///
      • The "primary" portion of a left recursive rule cannot contain an + /// epsilon transition, so the only way an alternative other than 1 can exist + /// in a state that is also reachable via alternative 1 is by nesting calls + /// to the left-recursive rule, with the outer calls not being at the + /// preferred precedence level. The + /// {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN + /// configurations which do not meet this condition, and therefore are not + /// eligible for elimination during the filtering process.
      • + ///
      + ///
    4. + ///
    + /// + ///

    + /// The prediction context must be considered by this filter to address + /// situations like the following. + ///

    + /// + ///
    +  /// grammar TA;
    +  /// prog: statement* EOF;
    +  /// statement: letterA | statement letterA 'b' ;
    +  /// letterA: 'a';
    +  /// 
    + ///
    + ///

    + /// If the above grammar, the ATN state immediately before the token + /// reference {@code 'a'} in [letterA] is reachable from the left edge + /// of both the primary and closure blocks of the left-recursive rule + /// [statement]. The prediction context associated with each of these + /// configurations distinguishes between them, and prevents the alternative + /// which stepped out to [prog] (and then back in to [statement] + /// from being eliminated by the filter. + ///

    + /// + /// @param configs The configuration set computed by + /// {@link #computeStartState} as the start state for the DFA. + /// @return The transformed configuration set representing the start state + /// for a precedence DFA at a particular precedence level (determined by + /// calling {@link Parser#getPrecedence}). + ATNConfigSet applyPrecedenceFilter(ATNConfigSet configs) { + final statesFromAlt1 = {}; + final configSet = ATNConfigSet(configs.fullCtx); + for (var config in configs) { + // handle alt 1 first + if (config.alt != 1) { + continue; + } + + final updatedContext = config.semanticContext.evalPrecedence( + parser, + _outerContext, + ); + if (updatedContext == null) { + // the configuration was eliminated + continue; + } + assert(config.context != null); + final configContext = config.context!; + + statesFromAlt1[config.state.stateNumber] = configContext; + if (updatedContext != config.semanticContext) { + configSet.add( + ATNConfig.dup(config, semanticContext: updatedContext), mergeCache); + } else { + configSet.add(config, mergeCache); + } + } + + for (var config in configs) { + if (config.alt == 1) { + // already handled + continue; + } + + if (!config.isPrecedenceFilterSuppressed()) { + /* In the future, this elimination step could be updated to also + * filter the prediction context for alternatives predicting alt>1 + * (basically a graph subtraction algorithm). + */ + assert(config.context != null); + final configContext = config.context!; + final context = statesFromAlt1[config.state.stateNumber]; + if (context != null && context == configContext) { + // eliminated + continue; + } + } + + configSet.add(config, mergeCache); + } + + return configSet; + } + + ATNState? getReachableTarget(Transition trans, int ttype) { + if (trans.matches(ttype, 0, atn.maxTokenType)) { + return trans.target; + } + + return null; + } + + List? getPredsForAmbigAlts( + BitSet ambigAlts, + ATNConfigSet configs, + int nalts, + ) { + // REACH=[1|1|[]|0:0, 1|2|[]|0:1] + /* altToPred starts as an array of all null contexts. The entry at index i + * corresponds to alternative i. altToPred[i] may have one of three values: + * 1. null: no ATNConfig c is found such that c.alt==i + * 2. SemanticContext.NONE: At least one ATNConfig c exists such that + * c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + * alt i has at least one unpredicated config. + * 3. Non-NONE Semantic Context: There exists at least one, and for all + * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + * + * From this, it is clear that NONE||anything==NONE. + */ + final altToPred = List.filled(nalts + 1, null); + + for (var c in configs) { + if (ambigAlts[c.alt]) { + altToPred[c.alt] = + SemanticContext.or(altToPred[c.alt], c.semanticContext); + } + } + + var nPredAlts = 0; + for (var i = 1; i <= nalts; i++) { + if (altToPred[i] == null) { + altToPred[i] = EmptySemanticContext.Instance; + } else if (altToPred[i] != EmptySemanticContext.Instance) { + nPredAlts++; + } + } + +// // Optimize away p||p and p&&p TODO: optimize() was a no-op +// for (int i = 0; i < altToPred.length; i++) { +// altToPred[i] = altToPred[i].optimize(); +// } + + if (debug) log('getPredsForAmbigAlts result $altToPred'); + // nonambig alts are null in altToPred + if (nPredAlts == 0) return null; + return altToPred; + } + + List? getPredicatePredictions( + BitSet? ambigAlts, + List altToPred, + ) { + final pairs = []; + var containsPredicate = false; + for (var i = 1; i < altToPred.length; i++) { + final pred = altToPred[i]; + + // unpredicated is indicated by SemanticContext.NONE + assert(pred != null); + + if (ambigAlts != null && ambigAlts[i]) { + pairs.add(PredPrediction(pred!, i)); + } + if (pred != EmptySemanticContext.Instance) containsPredicate = true; + } + + if (!containsPredicate) { + return null; + } + +// log(Arrays.toString(altToPred)+"->"+pairs); + return pairs; + } + + /// This method is used to improve the localization of error messages by + /// choosing an alternative rather than throwing a + /// [NoViableAltException] in particular prediction scenarios where the + /// {@link #ERROR} state was reached during ATN simulation. + /// + ///

    + /// The default implementation of this method uses the following + /// algorithm to identify an ATN configuration which successfully parsed the + /// decision entry rule. Choosing such an alternative ensures that the + /// [ParserRuleContext] returned by the calling rule will be complete + /// and valid, and the syntax error will be reported later at a more + /// localized location.

    + /// + ///
      + ///
    • If a syntactically valid path or paths reach the end of the decision rule and + /// they are semantically valid if predicated, return the min associated alt.
    • + ///
    • Else, if a semantically invalid but syntactically valid path exist + /// or paths exist, return the minimum associated alt. + ///
    • + ///
    • Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.
    • + ///
    + /// + ///

    + /// In some scenarios, the algorithm described above could predict an + /// alternative which will result in a [FailedPredicateException] in + /// the parser. Specifically, this could occur if the only configuration + /// capable of successfully parsing to the end of the decision rule is + /// blocked by a semantic predicate. By choosing this alternative within + /// {@link #adaptivePredict} instead of throwing a + /// [NoViableAltException], the resulting + /// [FailedPredicateException] in the parser will identify the specific + /// predicate which is preventing the parser from successfully parsing the + /// decision rule, which helps developers identify and correct logic errors + /// in semantic predicates. + ///

    + /// + /// @param configs The ATN configurations which were valid immediately before + /// the {@link #ERROR} state was reached + /// @param outerContext The is the \gamma_0 initial parser context from the paper + /// or the parser stack at the instant before prediction commences. + /// + /// @return The value to return from {@link #adaptivePredict}, or + /// {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not + /// identified and {@link #adaptivePredict} should report an error instead. + int getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule( + ATNConfigSet configs, ParserRuleContext outerContext) { + final sets = splitAccordingToSemanticValidity(configs, outerContext); + final semValidConfigs = sets.a; + final semInvalidConfigs = sets.b; + var alt = getAltThatFinishedDecisionEntryRule(semValidConfigs); + if (alt != ATN.INVALID_ALT_NUMBER) { + // semantically/syntactically viable path exists + return alt; + } + // Is there a syntactically valid path with a failed pred? + if (semInvalidConfigs.isNotEmpty) { + alt = getAltThatFinishedDecisionEntryRule(semInvalidConfigs); + if (alt != ATN.INVALID_ALT_NUMBER) { + // syntactically viable path exists + return alt; + } + } + return ATN.INVALID_ALT_NUMBER; + } + + int getAltThatFinishedDecisionEntryRule(ATNConfigSet configs) { + final alts = IntervalSet(); + for (var c in configs) { + assert(c.context != null); + if (c.outerContextDepth > 0 || + (c.state is RuleStopState && c.context!.hasEmptyPath())) { + alts.addOne(c.alt); + } + } + if (alts.length == 0) return ATN.INVALID_ALT_NUMBER; + return alts.minElement; + } + + /// Walk the list of configurations and split them according to + /// those that have preds evaluating to true/false. If no pred, assume + /// true pred and include in succeeded set. Returns Pair of sets. + /// + /// Create a new set so as not to alter the incoming parameter. + /// + /// Assumption: the input stream has been restored to the starting point + /// prediction, which is where predicates need to evaluate. + Pair splitAccordingToSemanticValidity( + ATNConfigSet configs, + ParserRuleContext outerContext, + ) { + final succeeded = ATNConfigSet(configs.fullCtx); + final failed = ATNConfigSet(configs.fullCtx); + for (var c in configs) { + if (c.semanticContext != EmptySemanticContext.Instance) { + final predicateEvaluationResult = evalSemanticContextOne( + c.semanticContext, + outerContext, + c.alt, + configs.fullCtx, + ); + if (predicateEvaluationResult) { + succeeded.add(c); + } else { + failed.add(c); + } + } else { + succeeded.add(c); + } + } + return Pair(succeeded, failed); + } + + /// Look through a list of predicate/alt pairs, returning alts for the + /// pairs that win. A [Instance] predicate indicates an alt containing an + /// unpredicated config which behaves as "always true." If !complete + /// then we stop at the first predicate that evaluates to true. This + /// includes pairs with null predicates. + BitSet evalSemanticContext( + List predPredictions, + ParserRuleContext outerContext, + bool complete, + ) { + final predictions = BitSet(); + for (var pair in predPredictions) { + if (pair.pred == EmptySemanticContext.Instance) { + predictions.set(pair.alt); + if (!complete) { + break; + } + continue; + } + + final fullCtx = false; // in dfa + final predicateEvaluationResult = + evalSemanticContextOne(pair.pred, outerContext, pair.alt, fullCtx); + if (debug || dfa_debug) { + log('eval pred $pair=$predicateEvaluationResult'); + } + + if (predicateEvaluationResult) { + if (debug || dfa_debug) log('PREDICT ' + pair.alt.toString()); + predictions.set(pair.alt); + if (!complete) { + break; + } + } + } + + return predictions; + } + + /// Evaluate a semantic context within a specific parser context. + /// + ///

    + /// This method might not be called for every semantic context evaluated + /// during the prediction process. In particular, we currently do not + /// evaluate the following but it may change in the future:

    + /// + ///
      + ///
    • Precedence predicates (represented by + /// {@link SemanticContext.PrecedencePredicate}) are not currently evaluated + /// through this method.
    • + ///
    • Operator predicates (represented by {@link SemanticContext.AND} and + /// {@link SemanticContext.OR}) are evaluated as a single semantic + /// context, rather than evaluating the operands individually. + /// Implementations which require evaluation results from individual + /// predicates should override this method to explicitly handle evaluation of + /// the operands within operator predicates.
    • + ///
    + /// + /// @param pred The semantic context to evaluate + /// @param parserCallStack The parser context in which to evaluate the + /// semantic context + /// @param alt The alternative which is guarded by [pred] + /// @param fullCtx [true] if the evaluation is occurring during LL + /// prediction; otherwise, [false] if the evaluation is occurring + /// during SLL prediction + /// + /// @since 4.3 + bool evalSemanticContextOne( + SemanticContext pred, + ParserRuleContext? parserCallStack, + int alt, + bool fullCtx, + ) { + return pred.eval(parser, parserCallStack); + } + + /* TODO: If we are doing predicates, there is no point in pursuing + closure operations if we reach a DFA state that uniquely predicts + alternative. We will not be caching that DFA state and it is a + waste to pursue the closure. Might have to advance when we do + ambig detection thought :( + */ + + void closure( + ATNConfig config, + ATNConfigSet configs, + Set closureBusy, + bool collectPredicates, + bool fullCtx, + bool treatEofAsEpsilon) { + final initialDepth = 0; + closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEofAsEpsilon); + assert(!fullCtx || !configs.dipsIntoOuterContext); + } + + void closureCheckingStopState( + ATNConfig config, + ATNConfigSet configs, + Set closureBusy, + bool collectPredicates, + bool fullCtx, + int depth, + bool treatEofAsEpsilon) { + if (debug) log('closure(' + config.toString(parser, true) + ')'); + + assert(config.context != null); + + final configContext = config.context!; + + if (config.state is RuleStopState) { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if (!configContext.isEmpty) { + for (var i = 0; i < configContext.length; i++) { + if (configContext.getReturnState(i) == + PredictionContext.EMPTY_RETURN_STATE) { + if (fullCtx) { + configs.add( + ATNConfig.dup( + config, + state: config.state, + context: EmptyPredictionContext.Instance, + ), + mergeCache); + continue; + } else { + // we have no context info, just chase follow links (if greedy) + if (debug) { + log('FALLING off rule ' + getRuleName(config.state.ruleIndex)); + } + closure_( + config, + configs, + closureBusy, + collectPredicates, + fullCtx, + depth, + treatEofAsEpsilon, + ); + } + continue; + } + final returnState = atn.states[configContext.getReturnState(i)]!; + final newContext = configContext.getParent(i); // "pop" return state + final c = ATNConfig( + returnState, + config.alt, + newContext, + config.semanticContext, + ); + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + // + // This assignment also propagates the + // isPrecedenceFilterSuppressed() value to the new + // configuration. + c.reachesIntoOuterContext = config.reachesIntoOuterContext; +// assert(depth > int.MIN_VALUE); + closureCheckingStopState(c, configs, closureBusy, collectPredicates, + fullCtx, depth - 1, treatEofAsEpsilon); + } + return; + } else if (fullCtx) { + // reached end of start rule + configs.add(config, mergeCache); + return; + } else { + // else if we have no context info, just chase follow links (if greedy) + if (debug) { + log('FALLING off rule ' + getRuleName(config.state.ruleIndex)); + } + } + } + + closure_( + config, + configs, + closureBusy, + collectPredicates, + fullCtx, + depth, + treatEofAsEpsilon, + ); + } + + /// Do the actual work of walking epsilon edges */ + void closure_( + ATNConfig config, + ATNConfigSet configs, + Set closureBusy, + bool collectPredicates, + bool fullCtx, + int depth, + bool treatEofAsEpsilon) { + final p = config.state; + // optimization + if (!p.onlyHasEpsilonTransitions()) { + configs.add(config, mergeCache); + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. +// if ( debug ) log("added config "+configs); + } + + for (var i = 0; i < p.numberOfTransitions; i++) { + if (i == 0 && canDropLoopEntryEdgeInLeftRecursiveRule(config)) continue; + + final t = p.transition(i); + final continueCollecting = (t is! ActionTransition) && collectPredicates; + final c = getEpsilonTarget(config, t, continueCollecting, depth == 0, + fullCtx, treatEofAsEpsilon); + if (c != null) { + var newDepth = depth; + if (config.state is RuleStopState) { + assert(!fullCtx); + // target fell off end of rule; mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. + + if (_dfa != null && _dfa!.isPrecedenceDfa()) { + final outermostPrecedenceReturn = + (t as EpsilonTransition).outermostPrecedenceReturn; + if (outermostPrecedenceReturn == _dfa!.atnStartState!.ruleIndex) { + c.setPrecedenceFilterSuppressed(true); + } + } + + c.reachesIntoOuterContext++; + + if (!closureBusy.add(c)) { + // avoid infinite recursion for right-recursive rules + continue; + } + + // TODO: can remove? only care when we add to set per middle of this method + configs.dipsIntoOuterContext = true; +// assert(newDepth > int.MIN_VALUE); + newDepth--; + if (debug) log('dips into outer ctx: $c'); + } else { + if (!t.isEpsilon && !closureBusy.add(c)) { + // avoid infinite recursion for EOF* and EOF+ + continue; + } + + if (t is RuleTransition) { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if (newDepth >= 0) { + newDepth++; + } + } + } + + closureCheckingStopState( + c, + configs, + closureBusy, + continueCollecting, + fullCtx, + newDepth, + treatEofAsEpsilon, + ); + } + } + } + + /// Implements first-edge (loop entry) elimination as an optimization + /// during closure operations. See antlr/antlr4#1398. + /// + /// The optimization is to avoid adding the loop entry config when + /// the exit path can only lead back to the same + /// StarLoopEntryState after popping context at the rule end state + /// (traversing only epsilon edges, so we're still in closure, in + /// this same rule). + /// + /// We need to detect any state that can reach loop entry on + /// epsilon w/o exiting rule. We don't have to look at FOLLOW + /// links, just ensure that all stack tops for config refer to key + /// states in LR rule. + /// + /// To verify we are in the right situation we must first check + /// closure is at a StarLoopEntryState generated during LR removal. + /// Then we check that each stack top of context is a return state + /// from one of these cases: + /// + /// 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state + /// 2. expr op expr. The return state is the block end of internal block of (...)* + /// 3. 'between' expr 'and' expr. The return state of 2nd expr reference. + /// That state points at block end of internal block of (...)*. + /// 4. expr '?' expr ':' expr. The return state points at block end, + /// which points at loop entry state. + /// + /// If any is true for each stack top, then closure does not add a + /// config to the current config set for edge[0], the loop entry branch. + /// + /// Conditions fail if any context for the current config is: + /// + /// a. empty (we'd fall out of expr to do a global FOLLOW which could + /// even be to some weird spot in expr) or, + /// b. lies outside of expr or, + /// c. lies within expr but at a state not the BlockEndState + /// generated during LR removal + /// + /// Do we need to evaluate predicates ever in closure for this case? + /// + /// No. Predicates, including precedence predicates, are only + /// evaluated when computing a DFA start state. I.e., only before + /// the lookahead (but not parser) consumes a token. + /// + /// There are no epsilon edges allowed in LR rule alt blocks or in + /// the "primary" part (ID here). If closure is in + /// StarLoopEntryState any lookahead operation will have consumed a + /// token as there are no epsilon-paths that lead to + /// StarLoopEntryState. We do not have to evaluate predicates + /// therefore if we are in the generated StarLoopEntryState of a LR + /// rule. Note that when making a prediction starting at that + /// decision point, decision d=2, compute-start-state performs + /// closure starting at edges[0], edges[1] emanating from + /// StarLoopEntryState. That means it is not performing closure on + /// StarLoopEntryState during compute-start-state. + /// + /// How do we know this always gives same prediction answer? + /// + /// Without predicates, loop entry and exit paths are ambiguous + /// upon remaining input +b (in, say, a+b). Either paths lead to + /// valid parses. Closure can lead to consuming + immediately or by + /// falling out of this call to expr back into expr and loop back + /// again to StarLoopEntryState to match +b. In this special case, + /// we choose the more efficient path, which is to take the bypass + /// path. + /// + /// The lookahead language has not changed because closure chooses + /// one path over the other. Both paths lead to consuming the same + /// remaining input during a lookahead operation. If the next token + /// is an operator, lookahead will enter the choice block with + /// operators. If it is not, lookahead will exit expr. Same as if + /// closure had chosen to enter the choice block immediately. + /// + /// Closure is examining one config (some loopentrystate, some alt, + /// context) which means it is considering exactly one alt. Closure + /// always copies the same alt to any derived configs. + /// + /// How do we know this optimization doesn't mess up precedence in + /// our parse trees? + /// + /// Looking through expr from left edge of stat only has to confirm + /// that an input, say, a+b+c; begins with any valid interpretation + /// of an expression. The precedence actually doesn't matter when + /// making a decision in stat seeing through expr. It is only when + /// parsing rule expr that we must use the precedence to get the + /// right interpretation and, hence, parse tree. + /// + /// @since 4.6 + bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig config) { + if (TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT) return false; + final p = config.state; + + assert(config.context != null); + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if (p.stateType != StateType.STAR_LOOP_ENTRY || + !(p as StarLoopEntryState) + .isPrecedenceDecision || // Are we the special loop entry/exit state? + config.context!.isEmpty || // If SLL wildcard + config.context!.hasEmptyPath()) { + return false; + } + + final configContext = config.context!; + + // Require all return states to return back to the same rule + // that p is in. + final numCtxs = configContext.length; + for (var i = 0; i < numCtxs; i++) { + // for each stack context + final returnState = atn.states[configContext.getReturnState(i)]!; + if (returnState.ruleIndex != p.ruleIndex) return false; + } + + final decisionStartState = p.transition(0).target as BlockStartState; + final blockEndStateNum = decisionStartState.endState!.stateNumber; + final blockEndState = atn.states[blockEndStateNum] as BlockEndState; + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + for (var i = 0; i < numCtxs; i++) { + // for each stack context + final returnStateNumber = configContext.getReturnState(i); + final returnState = atn.states[returnStateNumber]!; + // all states must have single outgoing epsilon edge + if (returnState.numberOfTransitions != 1 || + !returnState.transition(0).isEpsilon) { + return false; + } + // Look for prefix op case like 'not expr', (' type ')' expr + final returnStateTarget = returnState.transition(0).target; + if (returnState.stateType == StateType.BLOCK_END && + returnStateTarget == p) { + continue; + } + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if (returnState == blockEndState) { + continue; + } + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if (returnStateTarget == blockEndState) { + continue; + } + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if (returnStateTarget.stateType == StateType.BLOCK_END && + returnStateTarget.numberOfTransitions == 1 && + returnStateTarget.transition(0).isEpsilon && + returnStateTarget.transition(0).target == p) { + continue; + } + + // anything else ain't conforming + return false; + } + + return true; + } + + String getRuleName(int index) { + if (index >= 0) return parser.ruleNames[index]; + return ''; + } + + ATNConfig? getEpsilonTarget( + ATNConfig config, + Transition t, + bool collectPredicates, + bool inContext, + bool fullCtx, + bool treatEofAsEpsilon) { + switch (t.type) { + case TransitionType.RULE: + return ruleTransition(config, t as RuleTransition); + + case TransitionType.PRECEDENCE: + return precedenceTransition( + config, + t as PrecedencePredicateTransition, + collectPredicates, + inContext, + fullCtx, + ); + + case TransitionType.PREDICATE: + return predTransition( + config, + t as PredicateTransition, + collectPredicates, + inContext, + fullCtx, + ); + case TransitionType.ACTION: + return actionTransition(config, t as ActionTransition); + + case TransitionType.EPSILON: + return ATNConfig.dup(config, state: t.target); + + case TransitionType.ATOM: + case TransitionType.RANGE: + case TransitionType.SET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if (treatEofAsEpsilon) { + if (t.matches(Token.EOF, 0, 1)) { + return ATNConfig.dup(config, state: t.target); + } + } + + return null; + + default: + return null; + } + } + + ATNConfig actionTransition(ATNConfig config, ActionTransition t) { + if (debug) log('ACTION edge ${t.ruleIndex}:${t.actionIndex}'); + return ATNConfig.dup(config, state: t.target); + } + + ATNConfig? precedenceTransition( + ATNConfig config, + PrecedencePredicateTransition pt, + bool collectPredicates, + bool inContext, + bool fullCtx) { + if (debug) { + log('PRED (collectPredicates=$collectPredicates) ${pt.precedence}>=_p, ctx dependent=true'); + + log('context surrounding pred is ${parser.getRuleInvocationStack()}'); + } + + ATNConfig? c; + if (collectPredicates && inContext) { + if (fullCtx) { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + final currentPosition = input.index; + input.seek(startIndex); + final predSucceeds = evalSemanticContextOne( + pt.predicate, _outerContext, config.alt, fullCtx); + input.seek(currentPosition); + if (predSucceeds) { + c = ATNConfig.dup(config, state: pt.target); // no pred context + } + } else { + final newSemCtx = SemanticContext.and( + config.semanticContext, + pt.predicate, + ); + c = ATNConfig.dup(config, state: pt.target, semanticContext: newSemCtx); + } + } else { + c = ATNConfig.dup(config, state: pt.target); + } + + if (debug) log('config from pred transition=$c'); + return c; + } + + ATNConfig? predTransition( + ATNConfig config, + PredicateTransition pt, + bool collectPredicates, + bool inContext, + bool fullCtx, + ) { + if (debug) { + log('PRED (collectPredicates=$collectPredicates) ' + '${pt.ruleIndex}:${pt.predIndex}' + ', ctx dependent=${pt.isCtxDependent}'); + + log('context surrounding pred is ${parser.getRuleInvocationStack()}'); + } + + ATNConfig? c; + if (collectPredicates && + (!pt.isCtxDependent || (pt.isCtxDependent && inContext))) { + if (fullCtx) { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + final currentPosition = input.index; + input.seek(startIndex); + final predSucceeds = evalSemanticContextOne( + pt.predicate, + _outerContext, + config.alt, + fullCtx, + ); + input.seek(currentPosition); + if (predSucceeds) { + c = ATNConfig.dup(config, state: pt.target); // no pred context + } + } else { + final newSemCtx = + SemanticContext.and(config.semanticContext, pt.predicate); + c = ATNConfig.dup(config, state: pt.target, semanticContext: newSemCtx); + } + } else { + c = ATNConfig.dup(config, state: pt.target); + } + + if (debug) log('config from pred transition=$c'); + return c; + } + + ATNConfig ruleTransition(ATNConfig config, RuleTransition t) { + if (debug) { + log('CALL rule ' + + getRuleName(t.target.ruleIndex) + + ', ctx=${config.context}'); + } + + final returnState = t.followState; + PredictionContext newContext = SingletonPredictionContext.create( + config.context, returnState.stateNumber); + return ATNConfig.dup(config, state: t.target, context: newContext); + } + + /// Gets a [BitSet] containing the alternatives in [configs] + /// which are part of one or more conflicting alternative subsets. + /// + /// @param configs The [ATNConfigSet] to analyze. + /// @return The alternatives in [configs] which are part of one or more + /// conflicting alternative subsets. If [configs] does not contain any + /// conflicting subsets, this method returns an empty [BitSet]. + BitSet getConflictingAlts(ATNConfigSet configs) { + final altsets = PredictionModeExtension.getConflictingAltSubsets(configs); + return PredictionModeExtension.getAlts(altsets); + } + + /// Sam pointed out a problem with the previous definition, v3, of + /// ambiguous states. If we have another state associated with conflicting + /// alternatives, we should keep going. For example, the following grammar + /// + /// s : (ID | ID ID?) ';' ; + /// + /// When the ATN simulation reaches the state before ';', it has a DFA + /// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally + /// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node + /// because alternative to has another way to continue, via [6|2|[]]. + /// The key is that we have a single state that has config's only associated + /// with a single alternative, 2, and crucially the state transitions + /// among the configurations are all non-epsilon transitions. That means + /// we don't consider any conflicts that include alternative 2. So, we + /// ignore the conflict between alts 1 and 2. We ignore a set of + /// conflicting alts when there is an intersection with an alternative + /// associated with a single alt state in the state→config-list map. + /// + /// It's also the case that we might have two conflicting configurations but + /// also a 3rd nonconflicting configuration for a different alternative: + /// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: + /// + /// a : A | A | A B ; + /// + /// After matching input A, we reach the stop state for rule A, state 1. + /// State 8 is the state right before B. Clearly alternatives 1 and 2 + /// conflict and no amount of further lookahead will separate the two. + /// However, alternative 3 will be able to continue and so we do not + /// stop working on this state. In the previous example, we're concerned + /// with states associated with the conflicting alternatives. Here alt + /// 3 is not associated with the conflicting configs, but since we can continue + /// looking for input reasonably, I don't declare the state done. We + /// ignore a set of conflicting alts when we have an alternative + /// that we still need to pursue. + BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet configs) { + BitSet? conflictingAlts; + if (configs.uniqueAlt != ATN.INVALID_ALT_NUMBER) { + conflictingAlts = BitSet(); + conflictingAlts.set(configs.uniqueAlt); + } else { + conflictingAlts = configs.conflictingAlts; + } + return conflictingAlts!; + } + + String getTokenName(int t) { + if (t == Token.EOF) { + return 'EOF'; + } + + final vocabulary = parser.vocabulary; + final displayName = vocabulary.getDisplayName(t); + if (displayName == t.toString()) { + return displayName; + } + + return displayName + '<$t>'; + } + + String getLookaheadName(TokenStream input) { + return getTokenName(input.LA(1)!); + } + + /// Used for debugging in adaptivePredict around execATN but I cut + /// it out for clarity now that alg. works well. We can leave this + /// "dead" code for a bit. + void dumpDeadEndConfigs(NoViableAltException nvae) { + log('dead end configs: ', level: Level.SEVERE.value); + for (var c in nvae.deadEndConfigs!) { + var trans = 'no edges'; + if (c.state.numberOfTransitions > 0) { + final t = c.state.transition(0); + if (t is AtomTransition) { + final at = t; + trans = 'Atom ' + getTokenName(at.atomLabel); + } else if (t is SetTransition) { + final st = t; + final not = st is NotSetTransition; + trans = (not ? '~' : '') + 'Set ' + st.label.toString(); + } + } + log(c.toString(parser, true) + ':' + trans, level: Level.SEVERE.value); + } + } + + NoViableAltException noViableAlt( + TokenStream input, + ParserRuleContext outerContext, + ATNConfigSet configs, + int startIndex, + ) { + return NoViableAltException( + parser, + input, + input.get(startIndex), + input.LT(1), + configs, + outerContext, + ); + } + + static int getUniqueAlt(ATNConfigSet configs) { + var alt = ATN.INVALID_ALT_NUMBER; + for (var c in configs) { + if (alt == ATN.INVALID_ALT_NUMBER) { + alt = c.alt; // found first alt + } else if (c.alt != alt) { + return ATN.INVALID_ALT_NUMBER; + } + } + return alt; + } + + /// Add an edge to the DFA, if possible. This method calls + /// {@link #addDFAState} to ensure the [to] state is present in the + /// DFA. If [from] is null, or if [t] is outside the + /// range of edges that can be represented in the DFA tables, this method + /// returns without adding the edge to the DFA. + /// + ///

    If [to] is null, this method returns null. + /// Otherwise, this method returns the [DFAState] returned by calling + /// {@link #addDFAState} for the [to] state.

    + /// + /// @param dfa The DFA + /// @param from The source state for the edge + /// @param t The input symbol + /// @param to The target state for the edge + /// + /// @return If [to] is null, this method returns null; + /// otherwise this method returns the result of calling {@link #addDFAState} + /// on [to] + DFAState? addDFAEdge(DFA dfa, DFAState? from, int t, DFAState to) { + if (debug) { + log('EDGE $from -> $to upon ' + getTokenName(t)); + } + + to = addDFAState(dfa, to); // used existing if possible not incoming + if (from == null || t < -1 || t > atn.maxTokenType) { + return to; + } + + from.edges ??= List.filled(atn.maxTokenType + 1 + 1, null); + + from.edges![t + 1] = to; // connect + + if (debug) { + log('DFA=\n' + dfa.toString(parser.vocabulary)); + } + + return to; + } + + /// Add state [D] to the DFA if it is not already present, and return + /// the actual instance stored in the DFA. If a state equivalent to [D] + /// is already in the DFA, the existing state is returned. Otherwise this + /// method returns [D] after adding it to the DFA. + /// + ///

    If [D] is {@link #ERROR}, this method returns {@link #ERROR} and + /// does not change the DFA.

    + /// + /// @param dfa The dfa + /// @param D The DFA state to add + /// @return The state stored in the DFA. This will be either the existing + /// state if [D] is already in the DFA, or [D] itself if the + /// state was not already present. + DFAState addDFAState(DFA dfa, DFAState D) { + if (D == ATNSimulator.ERROR) { + return D; + } + + final existing = dfa.states[D]; + if (existing != null) return existing; + + D.stateNumber = dfa.states.length; + if (!D.configs.readOnly) { + D.configs.optimizeConfigs(this); + D.configs.readOnly = true; + } + dfa.states[D] = D; + if (debug) log('adding new DFA state: $D'); + return D; + } + + void reportAttemptingFullContext( + DFA dfa, + BitSet? conflictingAlts, + ATNConfigSet configs, + int startIndex, + int stopIndex, + ) { + if (debug || retry_debug) { + final interval = Interval.of(startIndex, stopIndex); + log( + 'reportAttemptingFullContext decision=${dfa.decision}:$configs' + ', input=' + + parser.tokenStream.getText(interval), + ); + } + + parser.errorListenerDispatch.reportAttemptingFullContext( + parser, + dfa, + startIndex, + stopIndex, + conflictingAlts, + configs, + ); + } + + void reportContextSensitivity(DFA dfa, int prediction, ATNConfigSet configs, + int startIndex, int stopIndex) { + if (debug || retry_debug) { + final interval = Interval.of(startIndex, stopIndex); + log( + 'reportContextSensitivity decision=${dfa.decision}:$configs' + ', input=' + + parser.tokenStream.getText(interval), + ); + } + + parser.errorListenerDispatch.reportContextSensitivity( + parser, + dfa, + startIndex, + stopIndex, + prediction, + configs, + ); + } + + /// If context sensitive parsing, we know it's ambiguity not conflict */ + void reportAmbiguity( + DFA dfa, + DFAState D, // the DFA state from execATN() that had SLL conflicts + int startIndex, + int stopIndex, + bool exact, + BitSet? ambigAlts, + ATNConfigSet configs, + ) // configs that LL not SLL considered conflicting + { + if (debug || retry_debug) { + final interval = Interval.of(startIndex, stopIndex); + log( + 'reportAmbiguity $ambigAlts:$configs' ', input=' + + parser.tokenStream.getText(interval), + ); + } + + parser.errorListenerDispatch.reportAmbiguity( + parser, + dfa, + startIndex, + stopIndex, + exact, + ambigAlts, + configs, + ); + } +} + +/// This enumeration defines the prediction modes available in ANTLR 4 along with +/// utility methods for analyzing configuration sets for conflicts and/or +/// ambiguities. +enum PredictionMode { + /// The SLL(*) prediction mode. This prediction mode ignores the current + /// parser context when making predictions. This is the fastest prediction + /// mode, and provides correct results for many grammars. This prediction + /// mode is more powerful than the prediction mode provided by ANTLR 3, but + /// may result in syntax errors for grammar and input combinations which are + /// not SLL. + /// + ///

    + /// When using this prediction mode, the parser will either return a correct + /// parse tree (i.e. the same parse tree that would be returned with the + /// {@link #LL} prediction mode), or it will report a syntax error. If a + /// syntax error is encountered when using the {@link #SLL} prediction mode, + /// it may be due to either an actual syntax error in the input or indicate + /// that the particular combination of grammar and input requires the more + /// powerful {@link #LL} prediction abilities to complete successfully.

    + /// + ///

    + /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs.

    + SLL, + + /// The LL(*) prediction mode. This prediction mode allows the current parser + /// context to be used for resolving SLL conflicts that occur during + /// prediction. This is the fastest prediction mode that guarantees correct + /// parse results for all combinations of grammars with syntactically correct + /// inputs. + /// + ///

    + /// When using this prediction mode, the parser will make correct decisions + /// for all syntactically-correct grammar and input combinations. However, in + /// cases where the grammar is truly ambiguous this prediction mode might not + /// report a precise answer for exactly which alternatives are + /// ambiguous.

    + /// + ///

    + /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs.

    + LL, + + /// The LL(*) prediction mode with exact ambiguity detection. In addition to + /// the correctness guarantees provided by the {@link #LL} prediction mode, + /// this prediction mode instructs the prediction algorithm to determine the + /// complete and exact set of ambiguous alternatives for every ambiguous + /// decision encountered while parsing. + /// + ///

    + /// This prediction mode may be used for diagnosing ambiguities during + /// grammar development. Due to the performance overhead of calculating sets + /// of ambiguous alternatives, this prediction mode should be avoided when + /// the exact results are not necessary.

    + /// + ///

    + /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs.

    + LL_EXACT_AMBIG_DETECTION, +} + +extension PredictionModeExtension on PredictionMode { + /// Computes the SLL prediction termination condition. + /// + ///

    + /// This method computes the SLL prediction termination condition for both of + /// the following cases.

    + /// + ///
      + ///
    • The usual SLL+LL fallback upon SLL conflict
    • + ///
    • Pure SLL without LL fallback
    • + ///
    + /// + ///

    COMBINED SLL+LL PARSING

    + /// + ///

    When LL-fallback is enabled upon SLL conflict, correct predictions are + /// ensured regardless of how the termination condition is computed by this + /// method. Due to the substantially higher cost of LL prediction, the + /// prediction should only fall back to LL when the additional lookahead + /// cannot lead to a unique SLL prediction.

    + /// + ///

    Assuming combined SLL+LL parsing, an SLL configuration set with only + /// conflicting subsets should fall back to full LL, even if the + /// configuration sets don't resolve to the same alternative (e.g. + /// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + /// configuration, SLL could continue with the hopes that more lookahead will + /// resolve via one of those non-conflicting configurations.

    + /// + ///

    Here's the prediction termination rule them: SLL (for SLL+LL parsing) + /// stops when it sees only conflicting configuration subsets. In contrast, + /// full LL keeps going when there is uncertainty.

    + /// + ///

    HEURISTIC

    + /// + ///

    As a heuristic, we stop prediction when we see any conflicting subset + /// unless we see a state that only has one alternative associated with it. + /// The single-alt-state thing lets prediction continue upon rules like + /// (otherwise, it would admit defeat too soon):

    + /// + ///

    {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}

    + /// + ///

    When the ATN simulation reaches the state before {@code ';'}, it has a + /// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally + /// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + /// processing this node because alternative to has another way to continue, + /// via {@code [6|2|[]]}.

    + /// + ///

    It also let's us continue for this rule:

    + /// + ///

    {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}

    + /// + ///

    After matching input A, we reach the stop state for rule A, state 1. + /// State 8 is the state right before B. Clearly alternatives 1 and 2 + /// conflict and no amount of further lookahead will separate the two. + /// However, alternative 3 will be able to continue and so we do not stop + /// working on this state. In the previous example, we're concerned with + /// states associated with the conflicting alternatives. Here alt 3 is not + /// associated with the conflicting configs, but since we can continue + /// looking for input reasonably, don't declare the state done.

    + /// + ///

    PURE SLL PARSING

    + /// + ///

    To handle pure SLL parsing, all we have to do is make sure that we + /// combine stack contexts for configurations that differ only by semantic + /// predicate. From there, we can do the usual SLL termination heuristic.

    + /// + ///

    PREDICATES IN SLL+LL PARSING

    + /// + ///

    SLL decisions don't evaluate predicates until after they reach DFA stop + /// states because they need to create the DFA cache that works in all + /// semantic situations. In contrast, full LL evaluates predicates collected + /// during start state computation so it can ignore predicates thereafter. + /// This means that SLL termination detection can totally ignore semantic + /// predicates.

    + /// + ///

    Implementation-wise, [ATNConfigSet] combines stack contexts but not + /// semantic predicate contexts so we might see two configurations like the + /// following.

    + /// + ///

    {@code (s, 1, x, {}), (s, 1, x', {p})}

    + /// + ///

    Before testing these configurations against others, we have to merge + /// [x] and {@code x'} (without modifying the existing configurations). + /// For example, we test {@code (x+x')==x''} when looking for conflicts in + /// the following configurations.

    + /// + ///

    {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

    + /// + ///

    If the configuration set has predicates (as indicated by + /// {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of + /// the configurations to strip out all of the predicates so that a standard + /// [ATNConfigSet] will merge everything ignoring predicates.

    + static bool hasSLLConflictTerminatingPrediction( + PredictionMode mode, ATNConfigSet configs) { +/* Configs in rule stop states indicate reaching the end of the decision + * rule (local context) or end of start rule (full context). If all + * configs meet this condition, then none of the configurations is able + * to match additional input so we terminate prediction. + */ + if (allConfigsInRuleStopStates(configs)) { + return true; + } + +// pure SLL mode parsing + if (mode == PredictionMode.SLL) { +// Don't bother with combining configs from different semantic +// contexts if we can fail over to full LL; costs more time +// since we'll often fail over anyway. + if (configs.hasSemanticContext) { +// dup configs, tossing out semantic predicates + final dup = ATNConfigSet(); + for (var c in configs) { + c = ATNConfig.dup(c, semanticContext: EmptySemanticContext.Instance); + dup.add(c); + } + configs = dup; + } +// now we have combined contexts for configs with dissimilar preds + } + +// pure SLL or combined SLL+LL mode parsing + + final altsets = getConflictingAltSubsets(configs); + final heuristic = + hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(configs); + return heuristic; + } + + /// Checks if any configuration in [configs] is in a + /// [RuleStopState]. Configurations meeting this condition have reached + /// the end of the decision rule (local context) or end of start rule (full + /// context). + /// + /// @param configs the configuration set to test + /// @return [true] if any configuration in [configs] is in a + /// [RuleStopState], otherwise [false] + static bool hasConfigInRuleStopState(ATNConfigSet configs) { + for (var c in configs) { + if (c.state is RuleStopState) { + return true; + } + } + + return false; + } + + /// Checks if all configurations in [configs] are in a + /// [RuleStopState]. Configurations meeting this condition have reached + /// the end of the decision rule (local context) or end of start rule (full + /// context). + /// + /// @param configs the configuration set to test + /// @return [true] if all configurations in [configs] are in a + /// [RuleStopState], otherwise [false] + static bool allConfigsInRuleStopStates(ATNConfigSet configs) { + for (var config in configs) { + if (config.state is! RuleStopState) { + return false; + } + } + + return true; + } + + /// Full LL prediction termination. + /// + ///

    Can we stop looking ahead during ATN simulation or is there some + /// uncertainty as to which alternative we will ultimately pick, after + /// consuming more input? Even if there are partial conflicts, we might know + /// that everything is going to resolve to the same minimum alternative. That + /// means we can stop since no more lookahead will change that fact. On the + /// other hand, there might be multiple conflicts that resolve to different + /// minimums. That means we need more look ahead to decide which of those + /// alternatives we should predict.

    + /// + ///

    The basic idea is to split the set of configurations [C], into + /// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + /// non-conflicting configurations. Two configurations conflict if they have + /// identical {@link ATNConfig#state} and {@link ATNConfig#context} values + /// but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)} + /// and {@code (s, j, ctx, _)} for {@code i!=j}.

    + /// + ///

    Reduce these configuration subsets to the set of possible alternatives. + /// You can compute the alternative subsets in one pass as follows:

    + /// + ///

    {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in + /// [C] holding [s] and [ctx] fixed.

    + /// + ///

    Or in pseudo-code, for each configuration [c] in [C]:

    + /// + ///
    +  /// map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
    +  /// alt and not pred
    +  /// 
    + /// + ///

    The values in [map] are the set of {@code A_s,ctx} sets.

    + /// + ///

    If {@code |A_s,ctx|=1} then there is no conflict associated with + /// [s] and [ctx].

    + /// + ///

    Reduce the subsets to singletons by choosing a minimum of each subset. If + /// the union of these alternative subsets is a singleton, then no amount of + /// more lookahead will help us. We will always pick that alternative. If, + /// however, there is more than one alternative, then we are uncertain which + /// alternative to predict and must continue looking for resolution. We may + /// or may not discover an ambiguity in the future, even if there are no + /// conflicting subsets this round.

    + /// + ///

    The biggest sin is to terminate early because it means we've made a + /// decision but were uncertain as to the eventual outcome. We haven't used + /// enough lookahead. On the other hand, announcing a conflict too late is no + /// big deal; you will still have the conflict. It's just inefficient. It + /// might even look until the end of file.

    + /// + ///

    No special consideration for semantic predicates is required because + /// predicates are evaluated on-the-fly for full LL prediction, ensuring that + /// no configuration contains a semantic context during the termination + /// check.

    + /// + ///

    CONFLICTING CONFIGS

    + /// + ///

    Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict + /// when {@code i!=j} but {@code x=x'}. Because we merge all + /// {@code (s, i, _)} configurations together, that means that there are at + /// most [n] configurations associated with state [s] for + /// [n] possible alternatives in the decision. The merged stacks + /// complicate the comparison of configuration contexts [x] and + /// {@code x'}. Sam checks to see if one is a subset of the other by calling + /// merge and checking to see if the merged result is either [x] or + /// {@code x'}. If the [x] associated with lowest alternative [i] + /// is the superset, then [i] is the only possible prediction since the + /// others resolve to {@code min(i)} as well. However, if [x] is + /// associated with {@code j>i} then at least one stack configuration for + /// [j] is not in conflict with alternative [i]. The algorithm + /// should keep going, looking for more lookahead due to the uncertainty.

    + /// + ///

    For simplicity, I'm doing a equality check between [x] and + /// {@code x'} that lets the algorithm continue to consume lookahead longer + /// than necessary. The reason I like the equality is of course the + /// simplicity but also because that is the test you need to detect the + /// alternatives that are actually in conflict.

    + /// + ///

    CONTINUE/STOP RULE

    + /// + ///

    Continue if union of resolved alternative sets from non-conflicting and + /// conflicting alternative subsets has more than one alternative. We are + /// uncertain about which alternative to predict.

    + /// + ///

    The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + /// alternatives are still in the running for the amount of input we've + /// consumed at this point. The conflicting sets let us to strip away + /// configurations that won't lead to more states because we resolve + /// conflicts to the configuration with a minimum alternate for the + /// conflicting set.

    + /// + ///

    CASES

    + /// + ///
      + /// + ///
    • no conflicts and more than 1 alternative in set => continue
    • + /// + ///
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, + /// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set + /// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = + /// {@code {1,3}} => continue + ///
    • + /// + ///
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, + /// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set + /// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = + /// {@code {1}} => stop and predict 1
    • + /// + ///
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, + /// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U + /// {@code {1}} = {@code {1}} => stop and predict 1, can announce + /// ambiguity {@code {1,2}}
    • + /// + ///
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, + /// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U + /// {@code {2}} = {@code {1,2}} => continue
    • + /// + ///
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, + /// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U + /// {@code {3}} = {@code {1,3}} => continue
    • + /// + ///
    + /// + ///

    EXACT AMBIGUITY DETECTION

    + /// + ///

    If all states report the same conflicting set of alternatives, then we + /// know we have the exact ambiguity set.

    + /// + ///

    |A_i|>1 and + /// A_i = A_j for all i, j.

    + /// + ///

    In other words, we continue examining lookahead until all {@code A_i} + /// have more than one alternative and all {@code A_i} are the same. If + /// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate + /// because the resolved set is {@code {1}}. To determine what the real + /// ambiguity is, we have to know whether the ambiguity is between one and + /// two or one and three so we keep going. We can only stop prediction when + /// we need exact ambiguity detection when the sets look like + /// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

    + static int resolvesToJustOneViableAlt(List altsets) { + return getSingleViableAlt(altsets); + } + + /// Determines if every alternative subset in [altsets] contains more + /// than one alternative. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if every [BitSet] in [altsets] has + /// {@link BitSet#cardinality cardinality} > 1, otherwise [false] + static bool allSubsetsConflict(List altsets) { + return !hasNonConflictingAltSet(altsets); + } + + /// Determines if any single alternative subset in [altsets] contains + /// exactly one alternative. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if [altsets] contains a [BitSet] with + /// {@link BitSet#cardinality cardinality} 1, otherwise [false] + static bool hasNonConflictingAltSet(List altsets) { + for (var alts in altsets) { + if (alts.cardinality == 1) { + return true; + } + } + return false; + } + + /// Determines if any single alternative subset in [altsets] contains + /// more than one alternative. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if [altsets] contains a [BitSet] with + /// {@link BitSet#cardinality cardinality} > 1, otherwise [false] + static bool hasConflictingAltSet(List altsets) { + for (var alts in altsets) { + if (alts.cardinality > 1) { + return true; + } + } + return false; + } + + /// Determines if every alternative subset in [altsets] is equivalent. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if every member of [altsets] is equal to the + /// others, otherwise [false] + static bool allSubsetsEqual(List altsets) { + final first = altsets.first; + return altsets.every((e) => e == first); + } + + /// Returns the unique alternative predicted by all alternative subsets in + /// [altsets]. If no such alternative exists, this method returns + /// {@link ATN#INVALID_ALT_NUMBER}. + /// + /// @param altsets a collection of alternative subsets + static int getUniqueAlt(List altsets) { + final all = getAlts(altsets); + if (all.cardinality == 1) return all.nextset(0); + return ATN.INVALID_ALT_NUMBER; + } + + /// Gets the complete set of represented alternatives for a collection of + /// alternative subsets. This method returns the union of each [BitSet] + /// in [altsets]. + /// + /// @param altsets a collection of alternative subsets + /// @return the set of represented alternatives in [altsets] + static BitSet getAlts(List altsets) { + final all = BitSet(); + for (var alts in altsets) { + all.or(alts); + } + return all; + } + + /// Get union of all alts from configs. + /// + /// @since 4.5.1 + static BitSet getAltsFromConfigs(ATNConfigSet configs) { + final alts = BitSet(); + for (var config in configs) { + alts.set(config.alt); + } + return alts; + } + + /// This function gets the conflicting alt subsets from a configuration set. + /// For each configuration [c] in [configs]: + /// + ///
    +  /// map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
    +  /// alt and not pred
    +  /// 
    + static List getConflictingAltSubsets(ATNConfigSet configs) { + final configToAlts = + HashMap(equals: (ATNConfig? a, ATNConfig? b) { + if (identical(a, b)) return true; + if (a == null || b == null) return false; + return a.state.stateNumber == b.state.stateNumber && + a.context == b.context; + }, hashCode: (ATNConfig o) { + /** + * The hash code is only a function of the {@link ATNState#stateNumber} + * and {@link ATNConfig#context}. + */ + var hashCode = MurmurHash.initialize(7); + hashCode = MurmurHash.update(hashCode, o.state.stateNumber); + hashCode = MurmurHash.update(hashCode, o.context); + hashCode = MurmurHash.finish(hashCode, 2); + return hashCode; + }); + for (var c in configs) { + var alts = configToAlts[c]; + if (alts == null) { + alts = BitSet(); + configToAlts[c] = alts; + } + alts.set(c.alt); + } + return configToAlts.values.toList(); + } + + /// Get a map from state to alt subset from a configuration set. For each + /// configuration [c] in [configs]: + /// + ///
    +  /// map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
    +  /// 
    + static Map getStateToAltMap(ATNConfigSet configs) { + final m = {}; + for (var c in configs) { + var alts = m[c.state]; + if (alts == null) { + alts = BitSet(); + m[c.state] = alts; + } + alts.set(c.alt); + } + return m; + } + + static bool hasStateAssociatedWithOneAlt(ATNConfigSet configs) { + final x = getStateToAltMap(configs); + for (var alts in x.values) { + if (alts.cardinality == 1) return true; + } + return false; + } + + static int getSingleViableAlt(List altsets) { + final viableAlts = BitSet(); + for (var alts in altsets) { + final minAlt = alts.nextset(0); + viableAlts.set(minAlt); + if (viableAlts.cardinality > 1) { + // more than 1 viable alt + return ATN.INVALID_ALT_NUMBER; + } + } + return viableAlts.nextset(0); + } +} diff --git a/runtime/Dart/lib/src/atn/src/profiling_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/profiling_atn_simulator.dart new file mode 100644 index 0000000000..2cb6281b97 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/profiling_atn_simulator.dart @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import '../../dfa/dfa.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../token_stream.dart'; +import '../../util/bit_set.dart'; +import 'atn_config_set.dart'; +import 'atn_simulator.dart'; +import 'info.dart'; +import 'parser_atn_simulator.dart'; +import 'semantic_context.dart'; + +class ProfilingATNSimulator extends ParserATNSimulator { + late List decisions; + late int numDecisions; + + late int _sllStopIndex; + late int _llStopIndex; + + late int currentDecision; + DFAState? currentState; + + /// At the point of LL failover, we record how SLL would resolve the conflict so that + /// we can determine whether or not a decision / input pair is context-sensitive. + /// If LL gives a different result than SLL's predicted alternative, we have a + /// context sensitivity for sure. The converse is not necessarily true, however. + /// It's possible that after conflict resolution chooses minimum alternatives, + /// SLL could get the same answer as LL. Regardless of whether or not the result indicates + /// an ambiguity, it is not treated as a context sensitivity because LL prediction + /// was not required in order to produce a correct prediction for this decision and input sequence. + /// It may in fact still be a context sensitivity but we don't know by looking at the + /// minimum alternatives for the current input. + int? conflictingAltResolvedBySLL; + + ProfilingATNSimulator(Parser parser) + : super( + parser, + parser.interpreter!.atn, + parser.interpreter!.decisionToDFA, + parser.interpreter!.sharedContextCache, + ) { + numDecisions = atn.decisionToState.length; + decisions = List.generate( + numDecisions, + (index) => DecisionInfo(index), + ); + } + + @override + int adaptivePredict( + TokenStream input, + int decision, + ParserRuleContext? outerContext, + ) { + try { + _sllStopIndex = -1; + _llStopIndex = -1; + currentDecision = decision; + + final start = + DateTime.now(); // TODO get nano seconds expensive but useful info + final alt = super.adaptivePredict(input, decision, outerContext); + final stop = DateTime.now(); + decisions[decision].timeInPrediction += + (stop.difference(start)).inMicroseconds; + decisions[decision].invocations++; + + final SLL_k = _sllStopIndex - startIndex + 1; + decisions[decision].SLL_TotalLook += SLL_k; + decisions[decision].SLL_MinLook = decisions[decision].SLL_MinLook == 0 + ? SLL_k + : min(decisions[decision].SLL_MinLook, SLL_k); + if (SLL_k > decisions[decision].SLL_MaxLook) { + decisions[decision].SLL_MaxLook = SLL_k; + decisions[decision].SLL_MaxLookEvent = LookaheadEventInfo( + decision, + null, + alt, + input, + startIndex, + _sllStopIndex, + false, + ); + } + + if (_llStopIndex >= 0) { + final LL_k = _llStopIndex - startIndex + 1; + decisions[decision].LL_TotalLook += LL_k; + decisions[decision].LL_MinLook = decisions[decision].LL_MinLook == 0 + ? LL_k + : min(decisions[decision].LL_MinLook, LL_k); + if (LL_k > decisions[decision].LL_MaxLook) { + decisions[decision].LL_MaxLook = LL_k; + decisions[decision].LL_MaxLookEvent = LookaheadEventInfo( + decision, null, alt, input, startIndex, _llStopIndex, true); + } + } + + return alt; + } finally { + currentDecision = -1; + } + } + + @override + DFAState? getExistingTargetState(DFAState previousD, int t) { + // this method is called after each time the input position advances + // during SLL prediction + _sllStopIndex = input.index; + + final existingTargetState = super.getExistingTargetState(previousD, t); + if (existingTargetState != null) { + // count only if we transition over a DFA state + decisions[currentDecision].SLL_DFATransitions += 1; + if (existingTargetState == ATNSimulator.ERROR) { + decisions[currentDecision].errors.add( + ErrorInfo( + currentDecision, + previousD.configs, + input, + startIndex, + _sllStopIndex, + false, + ), + ); + } + } + + currentState = existingTargetState; + return existingTargetState; + } + + @override + DFAState? computeTargetState(DFA dfa, DFAState previousD, int t) { + final state = super.computeTargetState(dfa, previousD, t); + currentState = state; + return state; + } + + @override + ATNConfigSet? computeReachSet(ATNConfigSet closure, int t, bool fullCtx) { + if (fullCtx) { + // this method is called after each time the input position advances + // during full context prediction + _llStopIndex = input.index; + } + + final reachConfigs = super.computeReachSet(closure, t, fullCtx); + if (fullCtx) { + // count computation even if error + decisions[currentDecision].LL_ATNTransitions += 1; + if (reachConfigs != null) { + } else { + // no reach on current lookahead symbol. ERROR. + // TODO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule() + decisions[currentDecision].errors.add( + ErrorInfo( + currentDecision, + closure, + input, + startIndex, + _llStopIndex, + true, + ), + ); + } + } else { + decisions[currentDecision].SLL_ATNTransitions += 1; + if (reachConfigs != null) { + } else { + // no reach on current lookahead symbol. ERROR. + decisions[currentDecision].errors.add(ErrorInfo( + currentDecision, + closure, + input, + startIndex, + _sllStopIndex, + false, + )); + } + } + return reachConfigs; + } + + @override + bool evalSemanticContextOne( + SemanticContext pred, + ParserRuleContext? parserCallStack, + int alt, + bool fullCtx, + ) { + final result = super.evalSemanticContextOne( + pred, + parserCallStack, + alt, + fullCtx, + ); + if (pred is! PrecedencePredicate) { + final fullContext = _llStopIndex >= 0; + final stopIndex = fullContext ? _llStopIndex : _sllStopIndex; + decisions[currentDecision].predicateEvals.add(PredicateEvalInfo( + currentDecision, + input, + startIndex, + stopIndex, + pred, + result, + alt, + fullCtx)); + } + + return result; + } + + @override + void reportAttemptingFullContext( + DFA dfa, + BitSet? conflictingAlts, + ATNConfigSet configs, + int startIndex, + int stopIndex, + ) { + if (conflictingAlts != null) { + conflictingAltResolvedBySLL = conflictingAlts.nextset(0); + } else { + conflictingAltResolvedBySLL = configs.alts.nextset(0); + } + + decisions[currentDecision].LL_Fallback += 1; + + super.reportAttemptingFullContext( + dfa, + conflictingAlts, + configs, + startIndex, + stopIndex, + ); + } + + @override + void reportContextSensitivity(DFA dfa, int prediction, ATNConfigSet configs, + int startIndex, int stopIndex) { + if (prediction != conflictingAltResolvedBySLL) { + decisions[currentDecision].contextSensitivities.add( + ContextSensitivityInfo( + currentDecision, configs, input, startIndex, stopIndex)); + } + super.reportContextSensitivity( + dfa, prediction, configs, startIndex, stopIndex); + } + + @override + void reportAmbiguity( + DFA dfa, + DFAState D, + int startIndex, + int stopIndex, + bool exact, + BitSet? ambigAlts, + ATNConfigSet configs, + ) { + final prediction = + ambigAlts != null ? ambigAlts.nextset(0) : configs.alts.nextset(0); + if (configs.fullCtx && prediction != conflictingAltResolvedBySLL) { + // Even though this is an ambiguity we are reporting, we can + // still detect some context sensitivities. Both SLL and LL + // are showing a conflict, hence an ambiguity, but if they resolve + // to different minimum alternatives we have also identified a + // context sensitivity. + decisions[currentDecision].contextSensitivities.add( + ContextSensitivityInfo( + currentDecision, + configs, + input, + startIndex, + stopIndex, + ), + ); + } + decisions[currentDecision].ambiguities.add( + AmbiguityInfo( + currentDecision, + configs, + ambigAlts, + input, + startIndex, + stopIndex, + configs.fullCtx, + ), + ); + super.reportAmbiguity( + dfa, + D, + startIndex, + stopIndex, + exact, + ambigAlts, + configs, + ); + } + + // --------------------------------------------------------------------- + + List get decisionInfo { + return decisions; + } +} diff --git a/runtime/Dart/lib/src/atn/src/semantic_context.dart b/runtime/Dart/lib/src/atn/src/semantic_context.dart new file mode 100644 index 0000000000..6661e8fef6 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/semantic_context.dart @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:antlr4/src/atn/src/atn_simulator.dart'; +import 'package:collection/collection.dart'; + +import '../../recognizer.dart'; +import '../../rule_context.dart'; +import '../../util/murmur_hash.dart'; + +/// A tree structure used to record the semantic context in which +/// an ATN configuration is valid. It's either a single predicate, +/// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +/// +///

    I have scoped the [AND], [OR], and [Predicate] subclasses of +/// [SemanticContext] within the scope of this outer class.

    +abstract class SemanticContext { + const SemanticContext(); + + /// For context independent predicates, we evaluate them without a local + /// context (i.e., null context). That way, we can evaluate them without + /// having to create proper rule-specific context during prediction (as + /// opposed to the parser, which creates them naturally). In a practical + /// sense, this avoids a cast exception from RuleContext to myruleContext. + /// + ///

    For context dependent predicates, we must pass in a local context so that + /// references such as $arg evaluate properly as _localctx.arg. We only + /// capture context dependent predicates in the context in which we begin + /// prediction, so we passed in the outer context here in case of context + /// dependent predicate evaluation.

    + bool eval(Recognizer parser, RuleContext? parserCallStack); + + /// Evaluate the precedence predicates for the context and reduce the result. + /// + /// @param parser The parser instance. + /// @param parserCallStack + /// @return The simplified semantic context after precedence predicates are + /// evaluated, which will be one of the following values. + ///
      + ///
    • {@link #NONE}in if the predicate simplifies to [true] after + /// precedence predicates are evaluated.
    • + ///
    • nullin if the predicate simplifies to [false] after + /// precedence predicates are evaluated.
    • + ///
    • [this]in if the semantic context is not changed as a result of + /// precedence predicate evaluation.
    • + ///
    • A non-null [SemanticContext]in the new simplified + /// semantic context after precedence predicates are evaluated.
    • + ///
    + SemanticContext? evalPrecedence( + Recognizer parser, + RuleContext? parserCallStack, + ) { + return this; + } + + static SemanticContext? and(SemanticContext? a, SemanticContext? b) { + if (a == null || a == EmptySemanticContext.Instance) return b; + if (b == null || b == EmptySemanticContext.Instance) return a; + final result = AND(a, b); + if (result.opnds.length == 1) { + return result.opnds[0]; + } + + return result; + } + + /// + /// @see ParserATNSimulator#getPredsForAmbigAlts + static SemanticContext? or(SemanticContext? a, SemanticContext? b) { + if (a == null) return b; + if (b == null) return a; + if (a == EmptySemanticContext.Instance || b == EmptySemanticContext.Instance) return EmptySemanticContext.Instance; + final result = OR(a, b); + if (result.opnds.length == 1) { + return result.opnds[0]; + } + + return result; + } + + static Iterable filterPrecedencePredicates( + Iterable collection) { + return collection.whereType(); + } + + static Iterable filterNonPrecedencePredicates( + Iterable collection) { + return collection.where((e) => e is! PrecedencePredicate); + } +} + +class EmptySemanticContext extends SemanticContext { + /// The default [SemanticContext], which is semantically equivalent to + /// a predicate of the form {@code {true}?}. + static const SemanticContext Instance = Predicate(); + + @override + bool eval(Recognizer parser, RuleContext? parserCallStack) { + return false; + } +} + +class Predicate extends SemanticContext { + final int ruleIndex; + final int predIndex; + final bool isCtxDependent; // e.g., $i ref in pred + + const Predicate( + [this.ruleIndex = -1, this.predIndex = -1, this.isCtxDependent = false]); + + @override + bool eval(Recognizer parser, RuleContext? parserCallStack) { + final localctx = isCtxDependent ? parserCallStack : null; + return parser.sempred(localctx, ruleIndex, predIndex); + } + + @override + int get hashCode { + var hashCode = MurmurHash.initialize(); + hashCode = MurmurHash.update(hashCode, ruleIndex); + hashCode = MurmurHash.update(hashCode, predIndex); + hashCode = MurmurHash.update(hashCode, isCtxDependent ? 1 : 0); + hashCode = MurmurHash.finish(hashCode, 3); + return hashCode; + } + + @override + bool operator ==(Object obj) { + return obj is Predicate && + ruleIndex == obj.ruleIndex && + predIndex == obj.predIndex && + isCtxDependent == obj.isCtxDependent; + } + + @override + String toString() { + return '{$ruleIndex:$predIndex}?'; + } +} + +class PrecedencePredicate extends SemanticContext + implements Comparable { + final int precedence; + + PrecedencePredicate([this.precedence = 0]); + + @override + bool eval(Recognizer parser, RuleContext? parserCallStack) { + return parser.precpred(parserCallStack, precedence); + } + + @override + SemanticContext? evalPrecedence( + Recognizer parser, + RuleContext? parserCallStack, + ) { + if (parser.precpred(parserCallStack, precedence)) { + return EmptySemanticContext.Instance; + } else { + return null; + } + } + + @override + int compareTo(PrecedencePredicate o) { + return precedence - o.precedence; + } + + @override + int get hashCode { + var hashCode = 1; + hashCode = 31 * hashCode + precedence; + return hashCode; + } + + @override + bool operator ==(Object other) { + if (other is! PrecedencePredicate) { + return false; + } + return precedence == other.precedence; + } + +// precedence >= _precedenceStack.peek() + @override + String toString() { + return '{$precedence>=prec}?'; + } +} + +/// This is the base class for semantic context "operators", which operate on +/// a collection of semantic context "operands". +/// +/// @since 4.3 +abstract class Operator extends SemanticContext { + /// Gets the operands for the semantic context operator. + /// + /// @return a collection of [SemanticContext] operands for the + /// operator. + /// + /// @since 4.3 + List get operands; +} + +/// A semantic context which is true whenever none of the contained contexts +/// is false. + +class AND extends Operator { + late final List opnds; + + AND(SemanticContext a, SemanticContext b) { + var operands = {}; + if (a is AND) { + operands.addAll(a.opnds); + } else { + operands.add(a); + } + if (b is AND) { + operands.addAll(b.opnds); + } else { + operands.add(b); + } + + final precedencePredicates = + SemanticContext.filterPrecedencePredicates(operands); + + operands = SemanticContext.filterNonPrecedencePredicates(operands).toSet(); + if (precedencePredicates.isNotEmpty) { + // interested in the transition with the lowest precedence + final reduced = + precedencePredicates.reduce((a, b) => a.compareTo(b) <= 0 ? a : b); + operands.add(reduced); + } + + opnds = operands.toList(); + } + + @override + List get operands { + return opnds; + } + + @override + bool operator ==(Object other) { + if (other is! AND) return false; + return ListEquality().equals(opnds, other.opnds); + } + + @override + int get hashCode { + return MurmurHash.getHashCode(opnds, runtimeType.hashCode); + } + + /// {@inheritDoc} + /// + ///

    + /// The evaluation of predicates by this context is short-circuiting, but + /// unordered.

    + + @override + bool eval(Recognizer parser, RuleContext? parserCallStack) { + for (var opnd in opnds) { + if (!opnd.eval(parser, parserCallStack)) return false; + } + return true; + } + + @override + SemanticContext? evalPrecedence( + Recognizer parser, + RuleContext? parserCallStack, + ) { + var differs = false; + final operands = []; + for (var context in opnds) { + final evaluated = context.evalPrecedence(parser, parserCallStack); + differs |= (evaluated != context); + if (evaluated == null) { + // The AND context is false if any element is false + return null; + } else if (evaluated != EmptySemanticContext.Instance) { + // Reduce the result by skipping true elements + operands.add(evaluated); + } + } + + if (!differs) { + return this; + } + + if (operands.isEmpty) { + // all elements were true, so the AND context is true + return EmptySemanticContext.Instance; + } + + SemanticContext? result = operands[0]; + for (var i = 1; i < operands.length; i++) { + result = SemanticContext.and(result, operands[i]); + } + + return result; + } + + @override + String toString() { + return opnds.join('&&'); + } +} + +/// A semantic context which is true whenever at least one of the contained +/// contexts is true. +class OR extends Operator { + late final List opnds; + + OR(SemanticContext a, SemanticContext b) { + var operands = {}; + if (a is OR) { + operands.addAll(a.opnds); + } else { + operands.add(a); + } + if (b is OR) { + operands.addAll(b.opnds); + } else { + operands.add(b); + } + + final precedencePredicates = + SemanticContext.filterPrecedencePredicates(operands); + + operands = SemanticContext.filterNonPrecedencePredicates(operands).toSet(); + if (precedencePredicates.isNotEmpty) { + // interested in the transition with the highest precedence + final reduced = + precedencePredicates.reduce((a, b) => a.compareTo(b) >= 0 ? a : b); + operands.add(reduced); + } + + opnds = operands.toList(); + } + + @override + List get operands { + return opnds; + } + + @override + bool operator ==(Object other) { + if (other is! OR) return false; + return ListEquality().equals(opnds, other.opnds); + } + + @override + int get hashCode { + return MurmurHash.getHashCode(opnds, runtimeType.hashCode); + } + + /// {@inheritDoc} + /// + ///

    + /// The evaluation of predicates by this context is short-circuiting, but + /// unordered.

    + + @override + bool eval(Recognizer parser, RuleContext? parserCallStack) { + for (var opnd in opnds) { + if (opnd.eval(parser, parserCallStack)) return true; + } + return false; + } + + @override + SemanticContext? evalPrecedence( + Recognizer parser, + RuleContext? parserCallStack, + ) { + var differs = false; + final operands = []; + for (var context in opnds) { + final evaluated = context.evalPrecedence(parser, parserCallStack); + differs |= (evaluated != context); + if (evaluated == EmptySemanticContext.Instance) { + // The OR context is true if any element is true + return EmptySemanticContext.Instance; + } else if (evaluated != null) { + // Reduce the result by skipping false elements + operands.add(evaluated); + } + } + + if (!differs) { + return this; + } + + if (operands.isEmpty) { + // all elements were false, so the OR context is false + return null; + } + + SemanticContext? result = operands[0]; + for (var i = 1; i < operands.length; i++) { + result = SemanticContext.or(result, operands[i]); + } + + return result; + } + + @override + String toString() { + return opnds.join('||'); + } +} diff --git a/runtime/Dart/lib/src/atn/src/transition.dart b/runtime/Dart/lib/src/atn/src/transition.dart new file mode 100644 index 0000000000..5e44aff1fa --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/transition.dart @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../interval_set.dart'; +import '../../token.dart'; +import 'atn_state.dart'; +import 'semantic_context.dart'; + +enum TransitionType { + INVALID, // 0 is not used + EPSILON, + RANGE, + RULE, + PREDICATE, // e.g., {isType(input.LT(1))}? + ATOM, + ACTION, + SET, // ~(A|B) or ~atom, wildcard, which convert to next 2 + NOT_SET, + WILDCARD, + PRECEDENCE, +} + +/// An ATN transition between any two ATN states. Subclasses define +/// atom, set, epsilon, action, predicate, rule transitions. +/// +///

    This is a one way link. It emanates from a state (usually via a list of +/// transitions) and has a target state.

    +/// +///

    Since we never have to change the ATN transitions once we construct it, +/// we can fix these transitions as specific classes. The DFA transitions +/// on the other hand need to update the labels as it adds transitions to +/// the states. We'll use the term Edge for the DFA to distinguish them from +/// ATN transitions.

    +abstract class Transition { + /// The target of this transition. */ + ATNState target; + + Transition(this.target); + + TransitionType get type; + + /// Determines if the transition is an "epsilon" transition. + /// + ///

    The default implementation returns [false].

    + /// + /// @return [true] if traversing this transition in the ATN does not + /// consume an input symbol; otherwise, [false] if traversing this + /// transition consumes (matches) an input symbol. + bool get isEpsilon => false; + + IntervalSet? get label => null; + + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol); +} + +class EpsilonTransition extends Transition { + /// @return the rule index of a precedence rule for which this transition is + /// returning from, where the precedence value is 0; otherwise, -1. + /// + /// @see ATNConfig#isPrecedenceFilterSuppressed() + /// @see ParserATNSimulator#applyPrecedenceFilter(ATNConfigSet) + /// @since 4.4.1 + final int outermostPrecedenceReturn; + + EpsilonTransition(ATNState target, [this.outermostPrecedenceReturn = -1]) + : super(target); + + @override + bool get isEpsilon => true; + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return false; + } + + @override + String toString() { + return 'epsilon'; + } + + @override + TransitionType get type => TransitionType.EPSILON; +} + +class RangeTransition extends Transition { + final int from; + final int to; + + RangeTransition(ATNState target, this.from, this.to) : super(target); + + @override + IntervalSet get label { + return IntervalSet.ofRange(from, to); + } + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return symbol >= from && symbol <= to; + } + + @override + String toString() { + return "'$from..$to'"; + } + + @override + TransitionType get type => TransitionType.RANGE; +} + +class RuleTransition extends Transition { + /// Ptr to the rule definition object for this rule ref */ + final int ruleIndex; // no Rule object at runtime + + final int precedence; + + /// What node to begin computations following ref to rule */ + ATNState followState; + + RuleTransition( + RuleStartState ruleStart, + this.ruleIndex, + this.precedence, + this.followState, + ) : super(ruleStart); + + @override + bool get isEpsilon => true; + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return false; + } + + @override + TransitionType get type => TransitionType.RULE; +} + +abstract class AbstractPredicateTransition extends Transition { + AbstractPredicateTransition(ATNState target) : super(target); +} + +class PredicateTransition extends AbstractPredicateTransition { + final int ruleIndex; + final int predIndex; + final bool isCtxDependent; // e.g., $i ref in pred + + PredicateTransition( + target, this.ruleIndex, this.predIndex, this.isCtxDependent) + : super(target); + + @override + bool get isEpsilon => true; + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } + + Predicate get predicate => Predicate(ruleIndex, predIndex, isCtxDependent); + + @override + String toString() { + return 'pred_$ruleIndex:$predIndex'; + } + + @override + TransitionType get type => TransitionType.PREDICATE; +} + +/// TODO: make all transitions sets? no, should remove set edges */ +class AtomTransition extends Transition { + /// The token type or character value; or, signifies special label. */ + final int atomLabel; + + AtomTransition(ATNState target, this.atomLabel) : super(target); + + @override + IntervalSet get label { + return IntervalSet.ofOne(atomLabel); + } + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return atomLabel == symbol; + } + + @override + String toString() { + return label.toString(); + } + + @override + TransitionType get type => TransitionType.ATOM; +} + +class ActionTransition extends Transition { + final int ruleIndex; + final int actionIndex; + final bool isCtxDependent; // e.g., $i ref in pred + + ActionTransition(target, this.ruleIndex, + [this.actionIndex = -1, this.isCtxDependent = false]) + : super(target); + + @override + bool get isEpsilon => + true; // we are to be ignored by analysis 'cept for predicates + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) => false; + + @override + String toString() { + return 'action_$ruleIndex:$actionIndex'; + } + + @override + TransitionType get type => TransitionType.ACTION; +} + +// A transition containing a set of values. +class SetTransition extends Transition { + @override + late IntervalSet label; + + SetTransition(ATNState target, [IntervalSet? st]) : super(target) { + label = st ?? IntervalSet.ofOne(Token.INVALID_TYPE); + } + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return label.contains(symbol); + } + + @override + String toString() { + return label.toString(); + } + + @override + TransitionType get type => TransitionType.SET; +} + +class NotSetTransition extends SetTransition { + NotSetTransition(target, st) : super(target, st); + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && + symbol <= maxVocabSymbol && + !super.matches(symbol, minVocabSymbol, maxVocabSymbol); + } + + @override + String toString() { + return '~' + super.toString(); + } + + @override + TransitionType get type => TransitionType.NOT_SET; +} + +class WildcardTransition extends Transition { + WildcardTransition(target) : super(target); + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; + } + + @override + String toString() { + return '.'; + } + + @override + TransitionType get type => TransitionType.WILDCARD; +} + +class PrecedencePredicateTransition extends AbstractPredicateTransition { + final int precedence; + + PrecedencePredicateTransition(target, this.precedence) : super(target); + + @override + bool get isEpsilon => true; + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) => false; + + PrecedencePredicate get predicate { + return PrecedencePredicate(precedence); + } + + @override + String toString() => '$precedence >= _p'; + + @override + TransitionType get type => TransitionType.PRECEDENCE; +} diff --git a/runtime/Dart/lib/src/dfa/dfa.dart b/runtime/Dart/lib/src/dfa/dfa.dart new file mode 100644 index 0000000000..552ddef7dd --- /dev/null +++ b/runtime/Dart/lib/src/dfa/dfa.dart @@ -0,0 +1,8 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/dfa.dart'; +export 'src/dfa_state.dart'; diff --git a/runtime/Dart/lib/src/dfa/src/dfa.dart b/runtime/Dart/lib/src/dfa/src/dfa.dart new file mode 100644 index 0000000000..7ee14406d8 --- /dev/null +++ b/runtime/Dart/lib/src/dfa/src/dfa.dart @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../vocabulary.dart'; +import '../../atn/atn.dart'; +import 'dfa_serializer.dart'; +import 'dfa_state.dart'; + +class DFA { + /// A set of all DFA states. Use [Map] so we can get old state back + /// ([Set] only allows you to see if it's there). + + Map states = {}; + + DFAState? s0; + + final int? decision; + + /// From which ATN state did we create this DFA? */ + + DecisionState? atnStartState; + + /// [true] if this DFA is for a precedence decision; otherwise, + /// [false]. This is the backing field for [isPrecedenceDfa]. + late bool precedenceDfa; + + DFA(this.atnStartState, [this.decision]) { + var precedenceDfa = false; + if (atnStartState is StarLoopEntryState) { + if ((atnStartState as StarLoopEntryState).isPrecedenceDecision) { + precedenceDfa = true; + final precedenceState = DFAState(configs: ATNConfigSet()); + precedenceState.edges = []; + precedenceState.isAcceptState = false; + precedenceState.requiresFullContext = false; + s0 = precedenceState; + } + } + + this.precedenceDfa = precedenceDfa; + } + + /// Gets whether this DFA is a precedence DFA. Precedence DFAs use a special + /// start state {@link #s0} which is not stored in [states]. The + /// [DFAState.edges] array for this start state contains outgoing edges + /// supplying individual start states corresponding to specific precedence + /// values. + /// + /// @return [true] if this is a precedence DFA; otherwise, + /// [false]. + /// @see Parser#getPrecedence() + bool isPrecedenceDfa() { + return precedenceDfa; + } + + /// Get the start state for a specific precedence value. + /// + /// @param precedence The current precedence. + /// @return The start state corresponding to the specified precedence, or + /// null if no start state exists for the specified precedence. + /// + /// @throws IllegalStateException if this is not a precedence DFA. + /// @see #isPrecedenceDfa() + DFAState? getPrecedenceStartState(int precedence) { + if (!isPrecedenceDfa()) { + throw StateError( + 'Only precedence DFAs may contain a precedence start state.'); + } + + // s0.edges is never null for a precedence DFA + if (precedence < 0 || precedence >= s0!.edges!.length) { + return null; + } + + return s0!.edges![precedence]; + } + + /// Set the start state for a specific precedence value. + /// + /// @param precedence The current precedence. + /// @param startState The start state corresponding to the specified + /// precedence. + /// + /// @throws IllegalStateException if this is not a precedence DFA. + /// @see #isPrecedenceDfa() + void setPrecedenceStartState(int precedence, DFAState startState) { + if (!isPrecedenceDfa()) { + throw StateError( + 'Only precedence DFAs may contain a precedence start state.'); + } + + if (precedence < 0) { + return; + } + + // synchronization on s0 here is ok. when the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again + // s0.edges is never null for a precedence DFA + if (precedence >= s0!.edges!.length) { + final original = s0!.edges!; + s0!.edges = List.filled(precedence + 1, null); + List.copyRange(s0!.edges!, 0, original); + } + + s0!.edges![precedence] = startState; + } + + /// Return a list of all states in this DFA, ordered by state number. + + List getStates() { + final result = states.keys.toList(); + result.sort((DFAState o1, DFAState o2) { + return o1.stateNumber - o2.stateNumber; + }); + + return result; + } + + @override + String toString([Vocabulary? vocabulary]) { + vocabulary = vocabulary ?? VocabularyImpl.EMPTY_VOCABULARY; + if (s0 == null) { + return ''; + } + + final serializer = DFASerializer(this, vocabulary); + return serializer.toString(); + } + + String toLexerString() { + if (s0 == null) return ''; + DFASerializer serializer = LexerDFASerializer(this); + return serializer.toString(); + } +} diff --git a/runtime/Dart/lib/src/dfa/src/dfa_serializer.dart b/runtime/Dart/lib/src/dfa/src/dfa_serializer.dart new file mode 100644 index 0000000000..36698f52c7 --- /dev/null +++ b/runtime/Dart/lib/src/dfa/src/dfa_serializer.dart @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../util/utils.dart'; +import '../../vocabulary.dart'; +import 'dfa.dart'; +import 'dfa_state.dart'; + +/// A DFA walker that knows how to dump them to serialized strings. */ +class DFASerializer { + final DFA dfa; + + final Vocabulary vocabulary; + + DFASerializer(this.dfa, this.vocabulary); + + @override + String toString() { + if (dfa.s0 == null) return 'null'; + final buf = StringBuffer(); + final states = dfa.getStates(); + for (var s in states) { + var n = 0; + if (s.edges != null) n = s.edges!.length; + for (var i = 0; i < n; i++) { + final t = s.edges![i]; + if (t != null && t.stateNumber != 0x7FFFFFFF) { + buf.write(getStateString(s)); + final label = getEdgeLabel(i); + buf.write('-'); + buf.write(label); + buf.write('->'); + buf.write(getStateString(t)); + buf.write('\n'); + } + } + } + + final output = buf.toString(); + if (output.isEmpty) return 'null'; + //return Utils.sortLinesInString(output); + return output; + } + + String getEdgeLabel(int i) { + return vocabulary.getDisplayName(i - 1); + } + + String getStateString(DFAState s) { + final n = s.stateNumber; + final baseStateStr = (s.isAcceptState ? ':' : '') + + 's$n' + + (s.requiresFullContext ? '^' : ''); + if (s.isAcceptState) { + if (s.predicates != null) { + return baseStateStr + '=>${arrayToString(s.predicates)}'; + } else { + return baseStateStr + '=>${s.prediction}'; + } + } else { + return baseStateStr; + } + } +} + +class LexerDFASerializer extends DFASerializer { + LexerDFASerializer(dfa) : super(dfa, VocabularyImpl.EMPTY_VOCABULARY); + + @override + String getEdgeLabel(i) { + return "'" + String.fromCharCode(i) + "'"; + } +} diff --git a/runtime/Dart/lib/src/dfa/src/dfa_state.dart b/runtime/Dart/lib/src/dfa/src/dfa_state.dart new file mode 100644 index 0000000000..b8a6512b1c --- /dev/null +++ b/runtime/Dart/lib/src/dfa/src/dfa_state.dart @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../util/murmur_hash.dart'; +import '../../util/utils.dart'; + +/// Map a predicate to a predicted alternative. */ +class PredPrediction { + final SemanticContext pred; + final int alt; + + PredPrediction(this.pred, this.alt); + + @override + String toString() { + return '($pred, $alt)'; + } +} + +/// A DFA state represents a set of possible ATN configurations. +/// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state +/// to keep track of all possible states the ATN can be in after +/// reading each input symbol. That is to say, after reading +/// input a1a2..an, the DFA is in a state that represents the +/// subset T of the states of the ATN that are reachable from the +/// ATN's start state along some path labeled a1a2..an." +/// In conventional NFA→DFA conversion, therefore, the subset T +/// would be a bitset representing the set of states the +/// ATN could be in. We need to track the alt predicted by each +/// state as well, however. More importantly, we need to maintain +/// a stack of states, tracking the closure operations as they +/// jump from rule to rule, emulating rule invocations (method calls). +/// I have to add a stack to simulate the proper lookahead sequences for +/// the underlying LL grammar from which the ATN was derived. +/// +///

    I use a set of ATNConfig objects not simple states. An ATNConfig +/// is both a state (ala normal conversion) and a RuleContext describing +/// the chain of rules (if any) followed to arrive at that state.

    +/// +///

    A DFA state may have multiple references to a particular state, +/// but with different ATN contexts (with same or different alts) +/// meaning that state was reached via a different set of rule invocations.

    +class DFAState { + int stateNumber; + + ATNConfigSet configs = ATNConfigSet(); + + /// {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) + /// {@link Token#EOF} maps to {@code edges[0]}. + + List? edges; + + bool isAcceptState = false; + + /// if accept state, what ttype do we match or alt do we predict? + /// This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or + /// {@link #requiresFullContext}. + int prediction = 0; + + LexerActionExecutor? lexerActionExecutor; + + /// Indicates that this state was created during SLL prediction that + /// discovered a conflict between the configurations in the state. Future + /// {@link ParserATNSimulator#execATN} invocations immediately jumped doing + /// full context prediction if this field is true. + bool requiresFullContext = false; + + /// During SLL parsing, this is a list of predicates associated with the + /// ATN configurations of the DFA state. When we have predicates, + /// {@link #requiresFullContext} is [false] since full context prediction evaluates predicates + /// on-the-fly. If this is not null, then {@link #prediction} is + /// {@link ATN#INVALID_ALT_NUMBER}. + /// + ///

    We only use these for non-{@link #requiresFullContext} but conflicting states. That + /// means we know from the context (it's $ or we don't dip into outer + /// context) that it's an ambiguity not a conflict.

    + /// + ///

    This list is computed by {@link ParserATNSimulator#predicateDFAState}.

    + + List? predicates; + + DFAState({this.stateNumber = -1, required this.configs}); + + /// Get the set of all alts mentioned by all ATN configurations in this + /// DFA state. + Set? get altSet { + final alts = {}; + + for (var c in configs) { + alts.add(c.alt); + } + + if (alts.isEmpty) return null; + return alts; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(7); + hash = MurmurHash.update(hash, configs.hashCode); + hash = MurmurHash.finish(hash, 1); + return hash; + } + + /// Two [DFAState] instances are equal if their ATN configuration sets + /// are the same. This method is used to see if a state already exists. + /// + ///

    Because the number of alternatives and number of ATN configurations are + /// finite, there is a finite number of DFA states that can be processed. + /// This is necessary to show that the algorithm terminates.

    + /// + ///

    Cannot test the DFA state numbers here because in + /// {@link ParserATNSimulator#addDFAState} we need to know if any other state + /// exists that has this exact set of ATN configurations. The + /// {@link #stateNumber} is irrelevant.

    + + @override + bool operator ==(Object other) { + // compare set of ATN configurations in this set with other + if (identical(this, other)) return true; + + if (other is! DFAState) { + return false; + } + + return configs == other.configs; + } + + @override + String toString() { + final buf = StringBuffer(); + buf.write('$stateNumber:$configs'); + if (isAcceptState) { + buf.write('=>'); + if (predicates != null) { + buf.write(arrayToString(predicates)); + } else { + buf.write(prediction); + } + } + return buf.toString(); + } +} diff --git a/runtime/Dart/lib/src/error/error.dart b/runtime/Dart/lib/src/error/error.dart new file mode 100644 index 0000000000..8b96c26b94 --- /dev/null +++ b/runtime/Dart/lib/src/error/error.dart @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/diagnostic_error_listener.dart'; +export 'src/error_listener.dart'; +export 'src/error_strategy.dart'; +export 'src/errors.dart'; diff --git a/runtime/Dart/lib/src/error/src/diagnostic_error_listener.dart b/runtime/Dart/lib/src/error/src/diagnostic_error_listener.dart new file mode 100644 index 0000000000..ee576fdbc0 --- /dev/null +++ b/runtime/Dart/lib/src/error/src/diagnostic_error_listener.dart @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../dfa/dfa.dart'; +import '../../interval_set.dart'; +import '../../parser.dart'; +import '../../util/bit_set.dart'; +import 'error_listener.dart'; + +/// This implementation of [ANTLRErrorListener] can be used to identify +/// certain potential correctness and performance problems in grammars. "Reports" +/// are made by calling {@link Parser#notifyErrorListeners} with the appropriate +/// message. +/// +///
      +///
    • Ambiguities: These are cases where more than one path through the +/// grammar can match the input.
    • +///
    • Weak context sensitivity: These are cases where full-context +/// prediction resolved an SLL conflict to a unique alternative which equaled the +/// minimum alternative of the SLL conflict.
    • +///
    • Strong (forced) context sensitivity: These are cases where the +/// full-context prediction resolved an SLL conflict to a unique alternative, +/// and the minimum alternative of the SLL conflict was found to not be +/// a truly viable alternative. Two-stage parsing cannot be used for inputs where +/// this situation occurs.
    • +///
    +class DiagnosticErrorListener extends BaseErrorListener { + /// When [true], only exactly known ambiguities are reported. + final bool exactOnly; + + /// Initializes a new instance of [DiagnosticErrorListener], specifying + /// whether all ambiguities or only exact ambiguities are reported. + /// + /// @param exactOnly [true] to report only exact ambiguities, otherwise + /// [false] to report all ambiguities. + DiagnosticErrorListener([this.exactOnly = true]); + + @override + void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, bool exact, BitSet? ambigAlts, ATNConfigSet configs) { + if (exactOnly && !exact) { + return; + } + + final decision = getDecisionDescription(recognizer, dfa); + final conflictingAlts = getConflictingAlts(ambigAlts, configs); + final text = + recognizer.tokenStream.getText(Interval.of(startIndex, stopIndex)); + final message = + "reportAmbiguity d=$decision: ambigAlts=$conflictingAlts, input='$text'"; + recognizer.notifyErrorListeners(message); + } + + @override + void reportAttemptingFullContext( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + BitSet? conflictingAlts, + ATNConfigSet configs, + ) { + final decision = getDecisionDescription(recognizer, dfa); + final text = recognizer.tokenStream.getText( + Interval.of(startIndex, stopIndex), + ); + final message = "reportAttemptingFullContext d=$decision, input='$text'"; + recognizer.notifyErrorListeners(message); + } + + @override + void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, int prediction, ATNConfigSet configs) { + final decision = getDecisionDescription(recognizer, dfa); + final text = + recognizer.tokenStream.getText(Interval.of(startIndex, stopIndex)); + final message = "reportContextSensitivity d=$decision, input='$text'"; + recognizer.notifyErrorListeners(message); + } + + String getDecisionDescription(Parser recognizer, DFA dfa) { + final decision = dfa.decision; + final ruleIndex = dfa.atnStartState?.ruleIndex; + + final ruleNames = recognizer.ruleNames; + if (ruleIndex == null || ruleIndex < 0 || ruleIndex >= ruleNames.length) { + return decision.toString(); + } + + final ruleName = ruleNames[ruleIndex]; + if (ruleName.isEmpty) { + return decision.toString(); + } + + return '$decision ($ruleName)'; + } + + /// Computes the set of conflicting or ambiguous alternatives from a + /// configuration set, if that information was not already provided by the + /// parser. + /// + /// @param reportedAlts The set of conflicting or ambiguous alternatives, as + /// reported by the parser. + /// @param configs The conflicting or ambiguous configuration set. + /// @return Returns [reportedAlts] if it is not null, otherwise + /// returns the set of alternatives represented in [configs]. + BitSet getConflictingAlts(BitSet? reportedAlts, ATNConfigSet configs) { + if (reportedAlts != null) { + return reportedAlts; + } + + final result = BitSet(); + for (var config in configs) { + result.set(config.alt); + } + + return result; + } +} diff --git a/runtime/Dart/lib/src/error/src/error_listener.dart b/runtime/Dart/lib/src/error/src/error_listener.dart new file mode 100644 index 0000000000..36505fb6af --- /dev/null +++ b/runtime/Dart/lib/src/error/src/error_listener.dart @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../dfa/dfa.dart'; +import '../../parser.dart'; +import '../../recognizer.dart'; +import '../../util/bit_set.dart'; +import 'errors.dart'; + +import '../../util/platform_stub.dart' + if (dart.library.io) '../../util/platform_io.dart' + if (dart.library.html) '../../util/platform_html.dart'; + +abstract class ErrorListener { + /// Upon syntax error, notify any interested parties. This is not how to + /// recover from errors or compute error messages. [ANTLRErrorStrategy] + /// specifies how to recover from syntax errors and how to compute error + /// messages. This listener's job is simply to emit a computed message, + /// though it has enough information to create its own message in many cases. + /// + ///

    The [RecognitionException] is non-null for all syntax errors except + /// when we discover mismatched token errors that we can recover from + /// in-line, without returning from the surrounding rule (via the single + /// token insertion and deletion mechanism).

    + /// + /// @param recognizer + /// What parser got the error. From this + /// object, you can access the context as well + /// as the input stream. + /// @param offendingSymbol + /// The offending token in the input token + /// stream, unless recognizer is a lexer (then it's null). If + /// no viable alternative error, [e] has token at which we + /// started production for the decision. + /// @param line + /// The line number in the input where the error occurred. + /// @param charPositionInLine + /// The character position within that line where the error occurred. + /// @param msg + /// The message to emit. + /// @param e + /// The exception generated by the parser that led to + /// the reporting of an error. It is null in the case where + /// the parser was able to recover in line without exiting the + /// surrounding rule. + void syntaxError( + Recognizer recognizer, + Object? offendingSymbol, + int? line, + int charPositionInLine, + String msg, + RecognitionException? e, + ); + + /// This method is called by the parser when a full-context prediction + /// results in an ambiguity. + /// + ///

    Each full-context prediction which does not result in a syntax error + /// will call either {@link #reportContextSensitivity} or + /// {@link #reportAmbiguity}.

    + /// + ///

    When [ambigAlts] is not null, it contains the set of potentially + /// viable alternatives identified by the prediction algorithm. When + /// [ambigAlts] is null, use {@link ATNConfigSet#getAlts} to obtain the + /// represented alternatives from the [configs] argument.

    + /// + ///

    When [exact] is [true], all of the potentially + /// viable alternatives are truly viable, i.e. this is reporting an exact + /// ambiguity. When [exact] is [false], at least two of + /// the potentially viable alternatives are viable for the current input, but + /// the prediction algorithm terminated as soon as it determined that at + /// least the minimum potentially viable alternative is truly + /// viable.

    + /// + ///

    When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction + /// mode is used, the parser is required to identify exact ambiguities so + /// [exact] will always be [true].

    + /// + ///

    This method is not used by lexers.

    + /// + /// @param recognizer the parser instance + /// @param dfa the DFA for the current decision + /// @param startIndex the input index where the decision started + /// @param stopIndex the input input where the ambiguity was identified + /// @param exact [true] if the ambiguity is exactly known, otherwise + /// [false]. This is always [true] when + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used. + /// @param ambigAlts the potentially ambiguous alternatives, or null + /// to indicate that the potentially ambiguous alternatives are the complete + /// set of represented alternatives in [configs] + /// @param configs the ATN configuration set where the ambiguity was + /// identified + void reportAmbiguity( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + bool exact, + BitSet? ambigAlts, + ATNConfigSet configs, + ); + + /// This method is called when an SLL conflict occurs and the parser is about + /// to use the full context information to make an LL decision. + /// + ///

    If one or more configurations in [configs] contains a semantic + /// predicate, the predicates are evaluated before this method is called. The + /// subset of alternatives which are still viable after predicates are + /// evaluated is reported in [conflictingAlts].

    + /// + ///

    This method is not used by lexers.

    + /// + /// @param recognizer the parser instance + /// @param dfa the DFA for the current decision + /// @param startIndex the input index where the decision started + /// @param stopIndex the input index where the SLL conflict occurred + /// @param conflictingAlts The specific conflicting alternatives. If this is + /// null, the conflicting alternatives are all alternatives + /// represented in [configs]. At the moment, conflictingAlts is non-null + /// (for the reference implementation, but Sam's optimized version can see this + /// as null). + /// @param configs the ATN configuration set where the SLL conflict was + /// detected + void reportAttemptingFullContext( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + BitSet? conflictingAlts, + ATNConfigSet configs, + ); + + /// This method is called by the parser when a full-context prediction has a + /// unique result. + /// + ///

    Each full-context prediction which does not result in a syntax error + /// will call either {@link #reportContextSensitivity} or + /// {@link #reportAmbiguity}.

    + /// + ///

    For prediction implementations that only evaluate full-context + /// predictions when an SLL conflict is found (including the default + /// [ParserATNSimulator] implementation), this method reports cases + /// where SLL conflicts were resolved to unique full-context predictions, + /// i.e. the decision was context-sensitive. This report does not necessarily + /// indicate a problem, and it may appear even in completely unambiguous + /// grammars.

    + /// + ///

    [configs] may have more than one represented alternative if the + /// full-context prediction algorithm does not evaluate predicates before + /// beginning the full-context prediction. In all cases, the final prediction + /// is passed as the [prediction] argument.

    + /// + ///

    Note that the definition of "context sensitivity" in this method + /// differs from the concept in {@link DecisionInfo#contextSensitivities}. + /// This method reports all instances where an SLL conflict occurred but LL + /// parsing produced a unique result, whether or not that unique result + /// matches the minimum alternative in the SLL conflicting set.

    + /// + ///

    This method is not used by lexers.

    + /// + /// @param recognizer the parser instance + /// @param dfa the DFA for the current decision + /// @param startIndex the input index where the decision started + /// @param stopIndex the input index where the context sensitivity was + /// finally determined + /// @param prediction the unambiguous result of the full-context prediction + /// @param configs the ATN configuration set where the unambiguous prediction + /// was determined + void reportContextSensitivity( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + int prediction, + ATNConfigSet configs, + ); +} + +class BaseErrorListener extends ErrorListener { + @override + void reportAmbiguity( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + bool exact, + BitSet? ambigAlts, + ATNConfigSet configs, + ) {} + + @override + void reportAttemptingFullContext( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + BitSet? conflictingAlts, + ATNConfigSet configs, + ) {} + + @override + void reportContextSensitivity( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + int prediction, + ATNConfigSet configs, + ) {} + + @override + void syntaxError( + Recognizer recognizer, + Object? offendingSymbol, + int? line, + int charPositionInLine, + String msg, + RecognitionException? e, + ) {} +} + +class ConsoleErrorListener extends BaseErrorListener { + /// Provides a default instance of [ConsoleErrorListener]. + static final INSTANCE = ConsoleErrorListener(); + + /// {@inheritDoc} + /// + ///

    + /// This implementation prints messages to {@link System//err} containing the + /// values of [line], [charPositionInLine], and [msg] using + /// the following format.

    + /// + ///
    +  /// line line:charPositionInLine msg
    +  /// 
    + @override + void syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + stderrWriteln('line $line:$column $msg'); + } +} + +/// This implementation of [ErrorListener] dispatches all calls to a +/// collection of delegate listeners. This reduces the effort required to support multiple +/// listeners. +class ProxyErrorListener implements ErrorListener { + final List delegates; + + ProxyErrorListener(this.delegates) { + // ignore: unnecessary_null_comparison + if (delegates == null) { + throw ArgumentError.notNull('delegates'); + } + } + + @override + void syntaxError( + Recognizer recognizer, + Object? offendingSymbol, + int? line, + int charPositionInLine, + String msg, + RecognitionException? e, + ) { + for (final listener in delegates) { + listener.syntaxError( + recognizer, + offendingSymbol, + line, + charPositionInLine, + msg, + e, + ); + } + } + + @override + void reportAmbiguity( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + bool exact, + BitSet? ambigAlts, + ATNConfigSet configs, + ) { + for (final listener in delegates) { + listener.reportAmbiguity( + recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs); + } + } + + @override + void reportAttemptingFullContext( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + BitSet? conflictingAlts, + ATNConfigSet configs, + ) { + for (final listener in delegates) { + listener.reportAttemptingFullContext( + recognizer, + dfa, + startIndex, + stopIndex, + conflictingAlts, + configs, + ); + } + } + + @override + void reportContextSensitivity( + Parser recognizer, + DFA dfa, + int startIndex, + int stopIndex, + int prediction, + ATNConfigSet configs, + ) { + for (final listener in delegates) { + listener.reportContextSensitivity( + recognizer, + dfa, + startIndex, + stopIndex, + prediction, + configs, + ); + } + } +} diff --git a/runtime/Dart/lib/src/error/src/error_strategy.dart b/runtime/Dart/lib/src/error/src/error_strategy.dart new file mode 100644 index 0000000000..a751545695 --- /dev/null +++ b/runtime/Dart/lib/src/error/src/error_strategy.dart @@ -0,0 +1,903 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../atn/atn.dart'; +import '../../interval_set.dart'; +import '../../misc/misc.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../tree/tree.dart'; +import 'errors.dart'; + +/// The interface for defining strategies to deal with syntax errors encountered +/// during a parse by ANTLR-generated parsers. We distinguish between three +/// different kinds of errors: +/// +///
      +///
    • The parser could not figure out which path to take in the ATN (none of +/// the available alternatives could possibly match)
    • +///
    • The current input does not match what we were looking for
    • +///
    • A predicate evaluated to false
    • +///
    +/// +/// Implementations of this interface report syntax errors by calling +/// {@link Parser#notifyErrorListeners}. +/// +///

    TODO: what to do about lexers

    +abstract class ErrorStrategy { + /// Reset the error handler state for the specified [recognizer]. + /// @param recognizer the parser instance + void reset(Parser recognizer); + + /// This method is called when an unexpected symbol is encountered during an + /// inline match operation, such as {@link Parser#match}. If the error + /// strategy successfully recovers from the match failure, this method + /// returns the [Token] instance which should be treated as the + /// successful result of the match. + /// + ///

    This method handles the consumption of any tokens - the caller should + /// not call {@link Parser#consume} after a successful recovery.

    + /// + ///

    Note that the calling code will not report an error if this method + /// returns successfully. The error strategy implementation is responsible + /// for calling {@link Parser#notifyErrorListeners} as appropriate.

    + /// + /// @param recognizer the parser instance + /// @ if the error strategy was not able to + /// recover from the unexpected input symbol + Token recoverInline(Parser recognizer); + + /// This method is called to recover from exception [e]. This method is + /// called after {@link #reportError} by the default exception handler + /// generated for a rule method. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception to recover from + /// @ if the error strategy could not recover from + /// the recognition exception + void recover(Parser recognizer, RecognitionException e); + + /// This method provides the error handler with an opportunity to handle + /// syntactic or semantic errors in the input stream before they result in a + /// [RecognitionException]. + /// + ///

    The generated code currently contains calls to {@link #sync} after + /// entering the decision state of a closure block ({@code (...)*} or + /// {@code (...)+}).

    + /// + ///

    For an implementation based on Jim Idle's "magic sync" mechanism, see + /// {@link DefaultErrorStrategy#sync}.

    + /// + /// @see DefaultErrorStrategy#sync + /// + /// @param recognizer the parser instance + /// @ if an error is detected by the error + /// strategy but cannot be automatically recovered at the current state in + /// the parsing process + void sync(Parser recognizer); + + /// Tests whether or not [recognizer] is in the process of recovering + /// from an error. In error recovery mode, {@link Parser#consume} adds + /// symbols to the parse tree by calling + /// {@link Parser#createErrorNode(ParserRuleContext, Token)} then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of + /// {@link Parser#createTerminalNode(ParserRuleContext, Token)}. + /// + /// @param recognizer the parser instance + /// @return [true] if the parser is currently recovering from a parse + /// error, otherwise [false] + bool inErrorRecoveryMode(Parser recognizer); + + /// This method is called by when the parser successfully matches an input + /// symbol. + /// + /// @param recognizer the parser instance + void reportMatch(Parser recognizer); + + /// Report any kind of [RecognitionException]. This method is called by + /// the default exception handler generated for a rule method. + /// + /// @param recognizer the parser instance + /// @param e the recognition exception to report + void reportError(Parser recognizer, RecognitionException e); +} + +/// This is the default implementation of [ANTLRErrorStrategy] used for +/// error reporting and recovery in ANTLR parsers. +class DefaultErrorStrategy implements ErrorStrategy { + /// Indicates whether the error strategy is currently "recovering from an + /// error". This is used to suppress reporting multiple error messages while + /// attempting to recover from a detected syntax error. + /// + /// @see #inErrorRecoveryMode + bool errorRecoveryMode = false; + + /// The index into the input stream where the last error occurred. + /// This is used to prevent infinite loops where an error is found + /// but no token is consumed during recovery...another error is found, + /// ad nauseum. This is a failsafe mechanism to guarantee that at least + /// one token/tree node is consumed for two errors. + int lastErrorIndex = -1; + + IntervalSet? lastErrorStates; + + /// This field is used to propagate information about the lookahead following + /// the previous match. Since prediction prefers completing the current rule + /// to error recovery efforts, error reporting may occur later than the + /// original point where it was discoverable. The original context is used to + /// compute the true expected sets as though the reporting occurred as early + /// as possible. + ParserRuleContext? nextTokensContext; + + /// @see #nextTokensContext + int? nextTokensState; + + /// {@inheritDoc} + /// + ///

    The default implementation simply calls {@link #endErrorCondition} to + /// ensure that the handler is not in error recovery mode.

    + + @override + void reset(Parser recognizer) { + endErrorCondition(recognizer); + } + + /// This method is called to enter error recovery mode when a recognition + /// exception is reported. + /// + /// @param recognizer the parser instance + void beginErrorCondition(Parser recognizer) { + errorRecoveryMode = true; + } + + /// {@inheritDoc} + + @override + bool inErrorRecoveryMode(Parser recognizer) { + return errorRecoveryMode; + } + + /// This method is called to leave error recovery mode after recovering from + /// a recognition exception. + /// + /// @param recognizer + void endErrorCondition(Parser recognizer) { + errorRecoveryMode = false; + lastErrorStates = null; + lastErrorIndex = -1; + } + + /// {@inheritDoc} + /// + ///

    The default implementation simply calls {@link #endErrorCondition}.

    + + @override + void reportMatch(Parser recognizer) { + endErrorCondition(recognizer); + } + + /// {@inheritDoc} + /// + ///

    The default implementation returns immediately if the handler is already + /// in error recovery mode. Otherwise, it calls {@link #beginErrorCondition} + /// and dispatches the reporting task based on the runtime type of [e] + /// according to the following table.

    + /// + ///
      + ///
    • [NoViableAltException]: Dispatches the call to + /// {@link #reportNoViableAlternative}
    • + ///
    • [InputMismatchException]: Dispatches the call to + /// {@link #reportInputMismatch}
    • + ///
    • [FailedPredicateException]: Dispatches the call to + /// {@link #reportFailedPredicate}
    • + ///
    • All other types: calls {@link Parser#notifyErrorListeners} to report + /// the exception
    • + ///
    + + @override + void reportError(Parser recognizer, RecognitionException e) { + // if we've already reported an error and have not matched a token + // yet successfully, don't report any errors. + if (inErrorRecoveryMode(recognizer)) { +// System.err.print("[SPURIOUS] "); + return; // don't report spurious errors + } + beginErrorCondition(recognizer); + if (e is NoViableAltException) { + reportNoViableAlternative(recognizer, e); + } else if (e is InputMismatchException) { + reportInputMismatch(recognizer, e); + } else if (e is FailedPredicateException) { + reportFailedPredicate(recognizer, e); + } else { + log('unknown recognition error type: ${e.runtimeType}', + level: Level.SEVERE.value); + recognizer.notifyErrorListeners(e.message, e.offendingToken, e); + } + } + + /// {@inheritDoc} + /// + ///

    The default implementation resynchronizes the parser by consuming tokens + /// until we find one in the resynchronization set--loosely the set of tokens + /// that can follow the current rule.

    + + @override + void recover(Parser recognizer, RecognitionException e) { +// System.out.println("recover in "+recognizer.getRuleInvocationStack()+ +// " index="+recognizer.inputStream.index()+ +// ", lastErrorIndex="+ +// lastErrorIndex+ +// ", states="+lastErrorStates); + if (lastErrorIndex == recognizer.inputStream.index && + lastErrorStates != null && + lastErrorStates!.contains(recognizer.state)) { + // uh oh, another error at same token index and previously-visited + // state in ATN; must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop; this is a failsafe. +// log("seen error condition before index=, level: Level.SEVERE.value"+ +// lastErrorIndex+", states="+lastErrorStates); +// log("FAILSAFE consumes "+recognizer.getTokenNames()[recognizer.inputStream.LA(1)], level: Level.SEVERE.value); + recognizer.consume(); + } + lastErrorIndex = recognizer.inputStream.index; + lastErrorStates ??= IntervalSet(); + lastErrorStates!.addOne(recognizer.state); + final followSet = getErrorRecoverySet(recognizer); + consumeUntil(recognizer, followSet); + } + + /// The default implementation of {@link ANTLRErrorStrategy#sync} makes sure + /// that the current lookahead symbol is consistent with what were expecting + /// at this point in the ATN. You can call this anytime but ANTLR only + /// generates code to check before subrules/loops and each iteration. + /// + ///

    Implements Jim Idle's magic sync mechanism in closures and optional + /// subrules. E.g.,

    + /// + ///
    +  /// a : sync ( stuff sync )* ;
    +  /// sync : {consume to what can follow sync} ;
    +  /// 
    + /// + /// At the start of a sub rule upon error, {@link #sync} performs single + /// token deletion, if possible. If it can't do that, it bails on the current + /// rule and uses the default error recovery, which consumes until the + /// resynchronization set of the current rule. + /// + ///

    If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + /// with an empty alternative), then the expected set includes what follows + /// the subrule.

    + /// + ///

    During loop iteration, it consumes until it sees a token that can start a + /// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to + /// stay in the loop as long as possible.

    + /// + ///

    ORIGINS

    + /// + ///

    Previous versions of ANTLR did a poor job of their recovery within loops. + /// A single mismatch token or missing token would force the parser to bail + /// out of the entire rules surrounding the loop. So, for rule

    + /// + ///
    +  /// classDef : 'class' ID '{' member* '}'
    +  /// 
    + /// + /// input with an extra token between members would force the parser to + /// consume until it found the next class definition rather than the next + /// member definition of the current class. + /// + ///

    This functionality cost a little bit of effort because the parser has to + /// compare token set at the start of the loop and at each iteration. If for + /// some reason speed is suffering for you, you can turn off this + /// functionality by simply overriding this method as a blank { }.

    + + @override + void sync(Parser recognizer) { + final s = recognizer.interpreter!.atn.states[recognizer.state]!; +// log("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName(), level: Level.SEVERE.value); + // If already recovering, don't try to sync + if (inErrorRecoveryMode(recognizer)) { + return; + } + + final tokens = recognizer.inputStream; + final la = tokens.LA(1)!; + + // try cheaper subset first; might get lucky. seems to shave a wee bit off + final nextTokens = recognizer.getATN().nextTokens(s); + if (nextTokens.contains(la)) { + // We are sure the token matches + nextTokensContext = null; + nextTokensState = ATNState.INVALID_STATE_NUMBER; + return; + } + + if (nextTokens.contains(Token.EPSILON)) { + if (nextTokensContext == null) { + // It's possible the next token won't match; information tracked + // by sync is restricted for performance. + nextTokensContext = recognizer.context; + nextTokensState = recognizer.state; + } + return; + } + + switch (s.stateType) { + case StateType.BLOCK_START: + case StateType.STAR_BLOCK_START: + case StateType.PLUS_BLOCK_START: + case StateType.STAR_LOOP_ENTRY: + // report error and recover if possible + if (singleTokenDeletion(recognizer) != null) { + return; + } + + throw InputMismatchException(recognizer); + + case StateType.PLUS_LOOP_BACK: + case StateType.STAR_LOOP_BACK: +// log("at loop back: "+s.getClass().getSimpleName(), level: Level.SEVERE.value); + reportUnwantedToken(recognizer); + final expecting = recognizer.expectedTokens; + final whatFollowsLoopIterationOrRule = + expecting | getErrorRecoverySet(recognizer); + consumeUntil(recognizer, whatFollowsLoopIterationOrRule); + break; + + default: + // do nothing if we can't identify the exact kind of ATN state + break; + } + } + + /// This is called by {@link #reportError} when the exception is a + /// [NoViableAltException]. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception + void reportNoViableAlternative(Parser recognizer, NoViableAltException e) { + final tokens = recognizer.inputStream; + String input; + + if (e.startToken.type == Token.EOF) { + input = ''; + } else { + input = tokens.getTextRange(e.startToken, e.offendingToken); + } + + final msg = 'no viable alternative at input ' + escapeWSAndQuote(input); + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /// This is called by {@link #reportError} when the exception is an + /// [InputMismatchException]. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception + void reportInputMismatch(Parser recognizer, InputMismatchException e) { + final msg = 'mismatched input ' + + getTokenErrorDisplay(e.offendingToken) + + ' expecting ' + + e.expectedTokens!.toString(vocabulary: recognizer.vocabulary); + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /// This is called by {@link #reportError} when the exception is a + /// [FailedPredicateException]. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception + void reportFailedPredicate(Parser recognizer, FailedPredicateException e) { + final ruleIndex = recognizer.context?.ruleIndex; + final ruleName = ruleIndex != null ? recognizer.ruleNames[ruleIndex] : ''; + final msg = 'rule ' + ruleName + ' ' + e.message; + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /// This method is called to report a syntax error which requires the removal + /// of a token from the input stream. At the time this method is called, the + /// erroneous symbol is current {@code LT(1)} symbol and has not yet been + /// removed from the input stream. When this method returns, + /// [recognizer] is in error recovery mode. + /// + ///

    This method is called when {@link #singleTokenDeletion} identifies + /// single-token deletion as a viable recovery strategy for a mismatched + /// input error.

    + /// + ///

    The default implementation simply returns if the handler is already in + /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + /// enter error recovery mode, followed by calling + /// {@link Parser#notifyErrorListeners}.

    + /// + /// @param recognizer the parser instance + void reportUnwantedToken(Parser recognizer) { + if (inErrorRecoveryMode(recognizer)) { + return; + } + + beginErrorCondition(recognizer); + + final t = recognizer.currentToken; + final tokenName = getTokenErrorDisplay(t); + final expecting = getExpectedTokens(recognizer); + final msg = 'extraneous input ' + + tokenName + + ' expecting ' + + expecting.toString(vocabulary: recognizer.vocabulary); + recognizer.notifyErrorListeners(msg, t, null); + } + + /// This method is called to report a syntax error which requires the + /// insertion of a missing token into the input stream. At the time this + /// method is called, the missing token has not yet been inserted. When this + /// method returns, [recognizer] is in error recovery mode. + /// + ///

    This method is called when {@link #singleTokenInsertion} identifies + /// single-token insertion as a viable recovery strategy for a mismatched + /// input error.

    + /// + ///

    The default implementation simply returns if the handler is already in + /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + /// enter error recovery mode, followed by calling + /// {@link Parser#notifyErrorListeners}.

    + /// + /// @param recognizer the parser instance + void reportMissingToken(Parser recognizer) { + if (inErrorRecoveryMode(recognizer)) { + return; + } + + beginErrorCondition(recognizer); + + final t = recognizer.currentToken; + final expecting = getExpectedTokens(recognizer); + final msg = 'missing ' + + expecting.toString(vocabulary: recognizer.vocabulary) + + ' at ' + + getTokenErrorDisplay(t); + + recognizer.notifyErrorListeners(msg, t, null); + } + + /// {@inheritDoc} + /// + ///

    The default implementation attempts to recover from the mismatched input + /// by using single token insertion and deletion as described below. If the + /// recovery attempt fails, this method throws an + /// [InputMismatchException].

    + /// + ///

    EXTRA TOKEN (single token deletion)

    + /// + ///

    {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the + /// right token, however, then assume {@code LA(1)} is some extra spurious + /// token and delete it. Then consume and return the next token (which was + /// the {@code LA(2)} token) as the successful result of the match operation.

    + /// + ///

    This recovery strategy is implemented by {@link #singleTokenDeletion}.

    + /// + ///

    MISSING TOKEN (single token insertion)

    + /// + ///

    If current token (at {@code LA(1)}) is consistent with what could come + /// after the expected {@code LA(1)} token, then assume the token is missing + /// and use the parser's [TokenFactory] to create it on the fly. The + /// "insertion" is performed by returning the created token as the successful + /// result of the match operation.

    + /// + ///

    This recovery strategy is implemented by {@link #singleTokenInsertion}.

    + /// + ///

    EXAMPLE

    + /// + ///

    For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When + /// the parser returns from the nested call to [expr], it will have + /// call chain:

    + /// + ///
    +  /// stat → expr → atom
    +  /// 
    + /// + /// and it will be trying to match the {@code ')'} at this point in the + /// derivation: + /// + ///
    +  /// => ID '=' '(' INT ')' ('+' atom)* ';'
    +  ///                    ^
    +  /// 
    + /// + /// The attempt to match {@code ')'} will fail when it sees {@code ';'} and + /// call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'} + /// is in the set of tokens that can follow the {@code ')'} token reference + /// in rule [atom]. It can assume that you forgot the {@code ')'}. + + @override + Token recoverInline(Parser recognizer) { +// SINGLE TOKEN DELETION + final matchedSymbol = singleTokenDeletion(recognizer); + if (matchedSymbol != null) { +// we have deleted the extra token. +// now, move past ttype token as if all were ok + recognizer.consume(); + return matchedSymbol; + } + +// SINGLE TOKEN INSERTION + if (singleTokenInsertion(recognizer)) { + return getMissingSymbol(recognizer); + } + +// even that didn't work; must throw the exception + InputMismatchException e; + if (nextTokensContext == null) { + e = InputMismatchException(recognizer); + } else { + e = InputMismatchException( + recognizer, + nextTokensState!, + nextTokensContext, + ); + } + + throw e; + } + + /// This method implements the single-token insertion inline error recovery + /// strategy. It is called by {@link #recoverInline} if the single-token + /// deletion strategy fails to recover from the mismatched input. If this + /// method returns [true], [recognizer] will be in error recovery + /// mode. + /// + ///

    This method determines whether or not single-token insertion is viable by + /// checking if the {@code LA(1)} input symbol could be successfully matched + /// if it were instead the {@code LA(2)} symbol. If this method returns + /// [true], the caller is responsible for creating and inserting a + /// token with the correct type to produce this behavior.

    + /// + /// @param recognizer the parser instance + /// @return [true] if single-token insertion is a viable recovery + /// strategy for the current mismatched input, otherwise [false] + bool singleTokenInsertion(Parser recognizer) { + final currentSymbolType = recognizer.inputStream.LA(1)!; + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token; error recovery + // is free to conjure up and insert the missing token + final currentState = recognizer.interpreter!.atn.states[recognizer.state]!; + final next = currentState.transition(0).target; + final atn = recognizer.interpreter!.atn; + final expectingAtLL2 = atn.nextTokens(next, recognizer.context); +// System.out.println("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames())); + if (expectingAtLL2.contains(currentSymbolType)) { + reportMissingToken(recognizer); + return true; + } + return false; + } + + /// This method implements the single-token deletion inline error recovery + /// strategy. It is called by {@link #recoverInline} to attempt to recover + /// from mismatched input. If this method returns null, the parser and error + /// handler state will not have changed. If this method returns non-null, + /// [recognizer] will not be in error recovery mode since the + /// returned token was a successful match. + /// + ///

    If the single-token deletion is successful, this method calls + /// {@link #reportUnwantedToken} to report the error, followed by + /// {@link Parser#consume} to actually "delete" the extraneous token. Then, + /// before returning {@link #reportMatch} is called to signal a successful + /// match.

    + /// + /// @param recognizer the parser instance + /// @return the successfully matched [Token] instance if single-token + /// deletion successfully recovers from the mismatched input, otherwise + /// null + Token? singleTokenDeletion(Parser recognizer) { + final nextTokenType = recognizer.inputStream.LA(2)!; + final expecting = getExpectedTokens(recognizer); + if (expecting.contains(nextTokenType)) { + reportUnwantedToken(recognizer); + /* + log("recoverFromMismatchedToken deleting , level: Level.SEVERE.value"+ + ((TokenStream)recognizer.inputStream).LT(1)+ + " since "+((TokenStream)recognizer.inputStream).LT(2)+ + " is what we want"); + */ + recognizer.consume(); // simply delete extra token + // we want to return the token we're actually matching + final matchedSymbol = recognizer.currentToken; + reportMatch(recognizer); // we know current token is correct + return matchedSymbol; + } + return null; + } + + /// Conjure up a missing token during error recovery. + /// + /// The recognizer attempts to recover from single missing + /// symbols. But, actions might refer to that missing symbol. + /// For example, x=ID {f($x);}. The action clearly assumes + /// that there has been an identifier matched previously and that + /// $x points at that token. If that token is missing, but + /// the next token in the stream is what we want we assume that + /// this token is missing and we keep going. Because we + /// have to return some token to replace the missing token, + /// we have to conjure one up. This method gives the user control + /// over the tokens returned for missing tokens. Mostly, + /// you will want to create something special for identifier + /// tokens. For literals such as '{' and ',', the default + /// action in the parser or tree parser works. It simply creates + /// a CommonToken of the appropriate type. The text will be the token. + /// If you change what tokens must be created by the lexer, + /// override this method to create the appropriate tokens. + Token getMissingSymbol(Parser recognizer) { + final currentSymbol = recognizer.currentToken; + final expecting = getExpectedTokens(recognizer); + var expectedTokenType = Token.INVALID_TYPE; + if (!expecting.isNil) { + expectedTokenType = expecting.minElement; // get any element + } + String tokenText; + if (expectedTokenType == Token.EOF) { + tokenText = ''; + } else { + tokenText = ''; + } + var current = currentSymbol; + final lookback = recognizer.inputStream.LT(-1); + if (current.type == Token.EOF && lookback != null) { + current = lookback; + } + return recognizer.tokenFactory.create( + expectedTokenType, + tokenText, + Pair(current.tokenSource, current.tokenSource?.inputStream), + Token.DEFAULT_CHANNEL, + -1, + -1, + current.line, + current.charPositionInLine, + ); + } + + IntervalSet getExpectedTokens(Parser recognizer) { + return recognizer.expectedTokens; + } + + /// How should a token be displayed in an error message? The default + /// is to display just the text, but during development you might + /// want to have a lot of information spit out. Override in that case + /// to use t.toString() (which, for CommonToken, dumps everything about + /// the token). This is better than forcing you to override a method in + /// your token objects because you don't have to go modify your lexer + /// so that it creates a new Java type. + String getTokenErrorDisplay(Token? t) { + if (t == null) return ''; + var s = getSymbolText(t); + if (s == null) { + if (getSymbolType(t) == Token.EOF) { + s = ''; + } else { + s = '<${getSymbolType(t)}>'; + } + } + return escapeWSAndQuote(s); + } + + String? getSymbolText(Token symbol) { + return symbol.text; + } + + int getSymbolType(Token symbol) { + return symbol.type; + } + + String escapeWSAndQuote(String s) { +// if ( s==null ) return s; + s = s.replaceAll('\n', r'\n'); + s = s.replaceAll('\r', r'\r'); + s = s.replaceAll('\t', r'\t'); + return "'" + s + "'"; + } + +/* Compute the error recovery set for the current rule. During + * rule invocation, the parser pushes the set of tokens that can + * follow that rule reference on the stack; this amounts to + * computing FIRST of what follows the rule reference in the + * enclosing rule. See LinearApproximator.FIRST(). + * This local follow set only includes tokens + * from within the rule; i.e., the FIRST computation done by + * ANTLR stops at the end of a rule. + * + * EXAMPLE + * + * When you find a "no viable alt exception", the input is not + * consistent with any of the alternatives for rule r. The best + * thing to do is to consume tokens until you see something that + * can legally follow a call to r *or* any rule that called r. + * You don't want the exact set of viable next tokens because the + * input might just be missing a token--you might consume the + * rest of the input looking for one of the missing tokens. + * + * Consider grammar: + * + * a : '[' b ']' + * | '(' b ')' + * ; + * b : c '^' INT ; + * c : ID + * | INT + * ; + * + * At each rule invocation, the set of tokens that could follow + * that rule is pushed on a stack. Here are the various + * context-sensitive follow sets: + * + * FOLLOW(b1_in_a) = FIRST(']') = ']' + * FOLLOW(b2_in_a) = FIRST(')') = ')' + * FOLLOW(c_in_b) = FIRST('^') = '^' + * + * Upon erroneous input "[]", the call chain is + * + * a -> b -> c + * + * and, hence, the follow context stack is: + * + * depth follow set start of rule execution + * 0 a (from main()) + * 1 ']' b + * 2 '^' c + * + * Notice that ')' is not included, because b would have to have + * been called from a different context in rule a for ')' to be + * included. + * + * For error recovery, we cannot consider FOLLOW(c) + * (context-sensitive or otherwise). We need the combined set of + * all context-sensitive FOLLOW sets--the set of all tokens that + * could follow any reference in the call chain. We need to + * resync to one of those tokens. Note that FOLLOW(c)='^' and if + * we resync'd to that token, we'd consume until EOF. We need to + * sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. + * In this case, for input "[]", LA(1) is ']' and in the set, so we would + * not consume anything. After printing an error, rule c would + * return normally. Rule b would not find the required '^' though. + * At this point, it gets a mismatched token error and throws an + * exception (since LA(1) is not in the viable following token + * set). The rule exception handler tries to recover, but finds + * the same recovery set and doesn't consume anything. Rule b + * exits normally returning to rule a. Now it finds the ']' (and + * with the successful match exits errorRecovery mode). + * + * So, you can see that the parser walks up the call chain looking + * for the token that was a member of the recovery set. + * + * Errors are not generated in errorRecovery mode. + * + * ANTLR's error recovery mechanism is based upon original ideas: + * + * "Algorithms + Data Structures = Programs" by Niklaus Wirth + * + * and + * + * "A note on error recovery in recursive descent parsers": + * http://portal.acm.org/citation.cfm?id=947902.947905 + * + * Later, Josef Grosch had some good ideas: + * + * "Efficient and Comfortable Error Recovery in Recursive Descent + * Parsers": + * ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip + * + * Like Grosch I implement context-sensitive FOLLOW sets that are combined + * at run-time upon error to avoid overhead during parsing. + */ + IntervalSet getErrorRecoverySet(Parser recognizer) { + final atn = recognizer.interpreter!.atn; + RuleContext? ctx = recognizer.context; + final recoverSet = IntervalSet(); + while (ctx != null && ctx.invokingState >= 0) { + // compute what follows who invoked us + final invokingState = atn.states[ctx.invokingState]!; + final rt = invokingState.transition(0) as RuleTransition; + final follow = atn.nextTokens(rt.followState); + recoverSet.addAll(follow); + ctx = ctx.parent; + } + recoverSet.remove(Token.EPSILON); + return recoverSet; + } + + /// Consume tokens until one matches the given token set. */ + void consumeUntil(Parser recognizer, IntervalSet set) { +// log("consumeUntil("+set.toString(recognizer.getTokenNames())+")", level: Level.SEVERE.value); + var ttype = recognizer.inputStream.LA(1)!; + while (ttype != Token.EOF && !set.contains(ttype)) { + //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]); +// recognizer.inputStream.consume(); + recognizer.consume(); + ttype = recognizer.inputStream.LA(1)!; + } + } +} + +/// This implementation of [ANTLRErrorStrategy] responds to syntax errors +/// by immediately canceling the parse operation with a +/// [ParseCancellationException]. The implementation ensures that the +/// {@link ParserRuleContext#exception} field is set for all parse tree nodes +/// that were not completed prior to encountering the error. +/// +///

    +/// This error strategy is useful in the following scenarios.

    +/// +///
      +///
    • Two-stage parsing: This error strategy allows the first +/// stage of two-stage parsing to immediately terminate if an error is +/// encountered, and immediately fall back to the second stage. In addition to +/// avoiding wasted work by attempting to recover from errors here, the empty +/// implementation of {@link BailErrorStrategy#sync} improves the performance of +/// the first stage.
    • +///
    • Silent validation: When syntax errors are not being +/// reported or logged, and the parse result is simply ignored if errors occur, +/// the [BailErrorStrategy] avoids wasting work on recovering from errors +/// when the result will be ignored either way.
    • +///
    +/// +///

    +/// {@code myparser.setErrorHandler(new BailErrorStrategy());}

    +/// +/// @see Parser#setErrorHandler(ANTLRErrorStrategy) +class BailErrorStrategy extends DefaultErrorStrategy { + /// Instead of recovering from exception [e], re-throw it wrapped + /// in a [ParseCancellationException] so it is not caught by the + /// rule function catches. Use {@link Exception#getCause()} to get the + /// original [RecognitionException]. + + @override + void recover(Parser recognizer, RecognitionException e) { + for (var context = recognizer.context; + context != null; + context = context.parent) { + context.exception = e; + } + + throw ParseCancellationException(e.message); + } + + /// Make sure we don't attempt to recover inline; if the parser + /// successfully recovers, it won't throw an exception. + + @override + Token recoverInline(Parser recognizer) { + final e = InputMismatchException(recognizer); + for (var context = recognizer.context; + context != null; + context = context.parent) { + context.exception = e; + } + + throw ParseCancellationException(e.message); + } + + /// Make sure we don't attempt to recover from problems in subrules. */ + + @override + void sync(Parser recognizer) {} +} diff --git a/runtime/Dart/lib/src/error/src/errors.dart b/runtime/Dart/lib/src/error/src/errors.dart new file mode 100644 index 0000000000..11e557638c --- /dev/null +++ b/runtime/Dart/lib/src/error/src/errors.dart @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../input_stream.dart'; +import '../../interval_set.dart'; +import '../../lexer.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../recognizer.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../token_stream.dart'; +import '../../util/utils.dart'; + +/// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +/// 3 kinds of errors: prediction errors, failed predicate errors, and +/// mismatched input errors. In each case, the parser knows where it is +/// in the input, where it is in the ATN, the rule invocation stack, +/// and what kind of problem occurred. +class RecognitionException extends StateError { + /// Gets the [Recognizer] where this exception occurred. + /// + ///

    If the recognizer is not available, this method returns null.

    + /// + /// @return The recognizer where this exception occurred, or null if + /// the recognizer is not available. + final Recognizer? recognizer; + + /// Gets the [RuleContext] at the time this exception was thrown. + /// + ///

    If the context is not available, this method returns null.

    + /// + /// @return The [RuleContext] at the time this exception was thrown. + /// If the context is not available, this method returns null. + final RuleContext? ctx; + + /// Gets the input stream which is the symbol source for the recognizer where + /// this exception was thrown. + /// + ///

    If the input stream is not available, this method returns null.

    + /// + /// @return The input stream which is the symbol source for the recognizer + /// where this exception was thrown, or null if the stream is not + /// available. + final StreamType inputStream; + + /// The current [Token] when an error occurred. Since not all streams + /// support accessing symbols by index, we have to track the [Token] + /// instance itself. + late Token offendingToken; + + /// Get the ATN state number the parser was in at the time the error + /// occurred. For [NoViableAltException] and + /// [LexerNoViableAltException] exceptions, this is the + /// [DecisionState] number. For others, it is the state whose outgoing + /// edge we couldn't match. + /// + ///

    If the state number is not known, this method returns -1.

    + int offendingState = -1; + + RecognitionException( + this.recognizer, + this.inputStream, + this.ctx, [ + String message = '', + ]) : super(message) { + if (recognizer != null) offendingState = recognizer!.state; + } + + /// Gets the set of input symbols which could potentially follow the + /// previously matched symbol at the time this exception was thrown. + /// + ///

    If the set of expected tokens is not known and could not be computed, + /// this method returns null.

    + /// + /// @return The set of token types that could potentially follow the current + /// state in the ATN, or null if the information is not available. + IntervalSet? get expectedTokens { + if (recognizer != null) { + return recognizer!.getATN().getExpectedTokens(offendingState, ctx); + } + return null; + } +} + +class LexerNoViableAltException extends RecognitionException { + /// Matching attempted at what input index? */ + final int startIndex; + + /// Which configurations did we try at input.index() that couldn't match input.LA(1)? */ + final ATNConfigSet deadEndConfigs; + + LexerNoViableAltException( + Lexer? lexer, + CharStream input, + this.startIndex, + this.deadEndConfigs, + ) : super(lexer, input, null); + + @override + String toString() { + var symbol = ''; + if (startIndex >= 0 && startIndex < inputStream.size) { + symbol = inputStream.getText(Interval.of(startIndex, startIndex)); + symbol = escapeWhitespace(symbol); + } + + return "$LexerNoViableAltException('$symbol')"; + } +} + +/// Indicates that the parser could not decide which of two or more paths +/// to take based upon the remaining input. It tracks the starting token +/// of the offending input and also knows where the parser was +/// in the various paths when the error. Reported by reportNoViableAlternative() +class NoViableAltException extends RecognitionException { + /// Which configurations did we try at input.index() that couldn't match input.LT(1)? */ + + final ATNConfigSet? deadEndConfigs; + + /// The token object at the start index; the input stream might + /// not be buffering tokens so get a reference to it. (At the + /// time the error occurred, of course the stream needs to keep a + /// buffer all of the tokens but later we might not have access to those.) + + final Token startToken; + +// NoViableAltException(Parser recognizer) { // LL(1) error +// this(recognizer, +// recognizer.inputStream, +// recognizer.getCurrentToken(), +// recognizer.getCurrentToken(), +// null, +// recognizer._ctx); +// } + + NoViableAltException._( + Parser recognizer, + TokenStream input, + this.startToken, + Token offendingToken, + this.deadEndConfigs, + ParserRuleContext? ctx, + ) : super(recognizer, input, ctx) { + this.offendingToken = offendingToken; + } + + NoViableAltException( + Parser recognizer, [ + TokenStream? input, + Token? startToken, + Token? offendingToken, + ATNConfigSet? deadEndConfigs, + ParserRuleContext? ctx, + ]) : this._( + recognizer, + input ?? recognizer.inputStream, + startToken ?? recognizer.currentToken, + offendingToken ?? recognizer.currentToken, + deadEndConfigs, + ctx ?? recognizer.context, + ); +} + +/// This signifies any kind of mismatched input exceptions such as +/// when the current input does not match the expected token. +class InputMismatchException extends RecognitionException { + InputMismatchException( + Parser recognizer, [ + int state = -1, + ParserRuleContext? ctx, + ]) : super(recognizer, recognizer.inputStream, ctx ?? recognizer.context) { + if (state != -1 && ctx != null) { + offendingState = state; + } + offendingToken = recognizer.currentToken; + } +} + +/// A semantic predicate failed during validation. Validation of predicates +/// occurs when normally parsing the alternative just like matching a token. +/// Disambiguating predicate evaluation occurs when we test a predicate during +/// prediction. +class FailedPredicateException extends RecognitionException { + int? ruleIndex; + int? predIndex; + final String? predicate; + + FailedPredicateException( + Parser recognizer, [ + this.predicate, + String? message, + ]) : super( + recognizer, + recognizer.inputStream, + recognizer.context, + formatMessage(predicate, message), + ) { + final s = recognizer.interpreter!.atn.states[recognizer.state]!; + + final trans = s.transition(0) as AbstractPredicateTransition; + if (trans is PredicateTransition) { + ruleIndex = trans.ruleIndex; + predIndex = trans.predIndex; + } + offendingToken = recognizer.currentToken; + } + + static String formatMessage(String? predicate, String? message) { + if (message != null) { + return message; + } + + return 'failed predicate: {$predicate}?'; + } +} diff --git a/runtime/Dart/lib/src/input_stream.dart b/runtime/Dart/lib/src/input_stream.dart new file mode 100644 index 0000000000..879915323d --- /dev/null +++ b/runtime/Dart/lib/src/input_stream.dart @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:async'; +import 'dart:convert'; +import 'dart:math'; + +import 'interval_set.dart'; +import 'token.dart'; + +import 'util/platform_stub.dart' if (dart.library.io) 'util/platform_io.dart'; + +/// A simple stream of symbols whose values are represented as integers. This +/// interface provides marked ranges with support for a minimum level +/// of buffering necessary to implement arbitrary lookahead during prediction. +/// For more information on marked ranges, see {@link #mark}. +/// +///

    Initializing Methods: Some methods in this interface have +/// unspecified behavior if no call to an initializing method has occurred after +/// the stream was constructed. The following is a list of initializing methods:

    +/// +///
      +///
    • {@link #LA}
    • +///
    • {@link #consume}
    • +///
    • {@link #size}
    • +///
    +abstract class IntStream { + /// The value returned by {@link #LA LA()} when the end of the stream is + /// reached. + static const int EOF = -1; + + /// The value returned by {@link #getSourceName} when the actual name of the + /// underlying source is not known. + static const UNKNOWN_SOURCE_NAME = ''; + + /// Consumes the current symbol in the stream. This method has the following + /// effects: + /// + ///
      + ///
    • Forward movement: The value of {@link #index index()} + /// before calling this method is less than the value of {@code index()} + /// after calling this method.
    • + ///
    • Ordered lookahead: The value of {@code LA(1)} before + /// calling this method becomes the value of {@code LA(-1)} after calling + /// this method.
    • + ///
    + /// + /// Note that calling this method does not guarantee that {@code index()} is + /// incremented by exactly 1, as that would preclude the ability to implement + /// filtering streams (e.g. [CommonTokenStream] which distinguishes + /// between "on-channel" and "off-channel" tokens). + /// + /// @throws IllegalStateException if an attempt is made to consume the + /// end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling + /// [consume]). + void consume(); + + /// Gets the value of the symbol at offset [i] from the current + /// position. When {@code i==1}, this method returns the value of the current + /// symbol in the stream (which is the next symbol to be consumed). When + /// {@code i==-1}, this method returns the value of the previously read + /// symbol in the stream. It is not valid to call this method with + /// {@code i==0}, but the specific behavior is unspecified because this + /// method is frequently called from performance-critical code. + /// + ///

    This method is guaranteed to succeed if any of the following are true:

    + /// + ///
      + ///
    • {@code i>0}
    • + ///
    • {@code i==-1} and {@link #index index()} returns a value greater + /// than the value of {@code index()} after the stream was constructed + /// and {@code LA(1)} was called in that order. Specifying the current + /// {@code index()} relative to the index after the stream was created + /// allows for filtering implementations that do not return every symbol + /// from the underlying source. Specifying the call to {@code LA(1)} + /// allows for lazily initialized streams.
    • + ///
    • {@code LA(i)} refers to a symbol consumed within a marked region + /// that has not yet been released.
    • + ///
    + /// + ///

    If [i] represents a position at or beyond the end of the stream, + /// this method returns {@link #EOF}.

    + /// + ///

    The return value is unspecified if {@code i<0} and fewer than {@code -i} + /// calls to {@link #consume consume()} have occurred from the beginning of + /// the stream before calling this method.

    + /// + /// @throws UnsupportedOperationException if the stream does not support + /// retrieving the value of the specified symbol + int? LA(int i); + + /// A mark provides a guarantee that {@link #seek seek()} operations will be + /// valid over a "marked range" extending from the index where {@code mark()} + /// was called to the current {@link #index index()}. This allows the use of + /// streaming input sources by specifying the minimum buffering requirements + /// to support arbitrary lookahead during prediction. + /// + ///

    The returned mark is an opaque handle (type [int]) which is passed + /// to {@link #release release()} when the guarantees provided by the marked + /// range are no longer necessary. When calls to + /// {@code mark()}/{@code release()} are nested, the marks must be released + /// in reverse order of which they were obtained. Since marked regions are + /// used during performance-critical sections of prediction, the specific + /// behavior of invalid usage is unspecified (i.e. a mark is not released, or + /// a mark is released twice, or marks are not released in reverse order from + /// which they were created).

    + /// + ///

    The behavior of this method is unspecified if no call to an + /// {@link IntStream initializing method} has occurred after this stream was + /// constructed.

    + /// + ///

    This method does not change the current position in the input stream.

    + /// + ///

    The following example shows the use of {@link #mark mark()}, + /// {@link #release release(mark)}, {@link #index index()}, and + /// {@link #seek seek(index)} as part of an operation to safely work within a + /// marked region, then restore the stream position to its original value and + /// release the mark.

    + ///
    +  /// IntStream stream = ...;
    +  /// int index = -1;
    +  /// int mark = stream.mark();
    +  /// try {
    +  ///   index = stream.index();
    +  ///   // perform work here...
    +  /// } finally {
    +  ///   if (index != -1) {
    +  ///     stream.seek(index);
    +  ///   }
    +  ///   stream.release(mark);
    +  /// }
    +  /// 
    + /// + /// @return An opaque marker which should be passed to + /// {@link #release release()} when the marked range is no longer required. + int mark(); + + /// This method releases a marked range created by a call to + /// {@link #mark mark()}. Calls to {@code release()} must appear in the + /// reverse order of the corresponding calls to {@code mark()}. If a mark is + /// released twice, or if marks are not released in reverse order of the + /// corresponding calls to {@code mark()}, the behavior is unspecified. + /// + ///

    For more information and an example, see {@link #mark}.

    + /// + /// @param marker A marker returned by a call to {@code mark()}. + /// @see #mark + void release(int marker); + + /// Return the index into the stream of the input symbol referred to by + /// {@code LA(1)}. + /// + ///

    The behavior of this method is unspecified if no call to an + /// {@link IntStream initializing method} has occurred after this stream was + /// constructed.

    + int get index; + + /// Set the input cursor to the position indicated by [index]. If the + /// specified index lies past the end of the stream, the operation behaves as + /// though [index] was the index of the EOF symbol. After this method + /// returns without throwing an exception, then at least one of the following + /// will be true. + /// + ///
      + ///
    • {@link #index index()} will return the index of the first symbol + /// appearing at or after the specified [index]. Specifically, + /// implementations which filter their sources should automatically + /// adjust [index] forward the minimum amount required for the + /// operation to target a non-ignored symbol.
    • + ///
    • {@code LA(1)} returns {@link #EOF}
    • + ///
    + /// + /// This operation is guaranteed to not throw an exception if [index] + /// lies within a marked region. For more information on marked regions, see + /// {@link #mark}. The behavior of this method is unspecified if no call to + /// an {@link IntStream initializing method} has occurred after this stream + /// was constructed. + /// + /// @param index The absolute index to seek to. + /// + /// @throws IllegalArgumentException if [index] is less than 0 + /// @throws UnsupportedOperationException if the stream does not support + /// seeking to the specified index + void seek(int index); + + /// Returns the total number of symbols in the stream, including a single EOF + /// symbol. + /// + /// @throws UnsupportedOperationException if the size of the stream is + /// unknown. + int get size; + + /// Gets the name of the underlying symbol source. This method returns a + /// non-null, non-empty string. If such a name is not known, this method + /// returns {@link #UNKNOWN_SOURCE_NAME}. + + String get sourceName; +} + +/// A source of characters for an ANTLR lexer. */ +abstract class CharStream extends IntStream { + /// This method returns the text for a range of characters within this input + /// stream. This method is guaranteed to not throw an exception if the + /// specified [interval] lies entirely within a marked range. For more + /// information about marked ranges, see {@link IntStream#mark}. + /// + /// @param interval an interval within the stream + /// @return the text of the specified interval + /// + /// @throws NullPointerException if [interval] is null + /// @throws IllegalArgumentException if {@code interval.a < 0}, or if + /// {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or + /// past the end of the stream + /// @throws UnsupportedOperationException if the stream does not support + /// getting the text of the specified interval + String getText(Interval interval); +} + +// Vacuum all input from a string and then treat it like a buffer. +class InputStream extends CharStream { + final name = ''; + late List data; + int _index = 0; + bool decodeToUnicodeCodePoints = false; + + InputStream(List data) { + this.data = data; + } + + InputStream.fromString(String data) { + this.data = data.runes.toList(growable: false); + } + + static Future fromStringStream(Stream stream) async { + final data = StringBuffer(); + await stream.listen((buf) { + data.write(buf); + }).asFuture(); + return InputStream.fromString(data.toString()); + } + + static Future fromStream(Stream> stream, + {Encoding encoding = utf8}) { + final data = stream.transform(encoding.decoder); + return fromStringStream(data); + } + + static Future fromPath(String path, {Encoding encoding = utf8}) { + return fromStream(readStream(path), encoding: encoding); + } + + @override + int get index { + return _index; + } + + @override + int get size { + return data.length; + } + + /// Reset the stream so that it's in the same state it was + /// when the object was created *except* the data array is not + /// touched. + void reset() { + _index = 0; + } + + @override + void consume() { + if (_index >= size) { + // assert this.LA(1) == Token.EOF + throw ('cannot consume EOF'); + } + _index += 1; + } + + @override + int LA(int offset) { + if (offset == 0) { + return 0; // undefined + } + if (offset < 0) { + offset += 1; // e.g., translate LA(-1) to use offset=0 + } + final pos = _index + offset - 1; + if (pos < 0 || pos >= size) { + // invalid + return Token.EOF; + } + return data[pos]; + } + + /// mark/release do nothing; we have entire buffer + @override + int mark() { + return -1; + } + + @override + void release(int marker) {} + + /// consume() ahead until p==_index; can't just set p=_index as we must + /// update line and column. If we seek backwards, just set p + @override + void seek(int _index) { + if (_index <= this._index) { + this._index = _index; // just jump; don't update stream state (line, + // ...) + return; + } + // seek forward + this._index = min(_index, size); + } + + @override + String getText(Interval interval) { + final startIdx = min(interval.a, size); + final len = min(interval.b - interval.a + 1, size - startIdx); + return String.fromCharCodes(data, startIdx, startIdx + len); + } + + @override + String toString() { + return String.fromCharCodes(data); + } + + @override + String get sourceName { + // TODO: implement getSourceName + return IntStream.UNKNOWN_SOURCE_NAME; + } +} diff --git a/runtime/Dart/lib/src/interval_set.dart b/runtime/Dart/lib/src/interval_set.dart new file mode 100644 index 0000000000..c9b7629db3 --- /dev/null +++ b/runtime/Dart/lib/src/interval_set.dart @@ -0,0 +1,711 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import 'package:collection/collection.dart'; + +import 'lexer.dart'; +import 'token.dart'; +import 'util/murmur_hash.dart'; +import 'vocabulary.dart'; + +/// An immutable inclusive interval a..b */ +class Interval { + static final int INTERVAL_POOL_MAX_VALUE = 1000; + + static final Interval INVALID = Interval(-1, -2); + + static List cache = + List.filled(INTERVAL_POOL_MAX_VALUE + 1, null); + + int a; + int b; + + static int creates = 0; + static int misses = 0; + static int hits = 0; + static int outOfRange = 0; + + Interval(this.a, this.b); + + /// Interval objects are used readonly so share all with the + /// same single value a==b up to some max size. Use an array as a perfect hash. + /// Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new + /// Interval object with a..a in it. On Java.g4, 218623 IntervalSets + /// have a..a (set with 1 element). + static Interval of(int a, int b) { + // cache just a..a + if (a != b || a < 0 || a > INTERVAL_POOL_MAX_VALUE) { + return Interval(a, b); + } + if (cache[a] == null) { + cache[a] = Interval(a, a); + } + return cache[a]!; + } + + /// return number of elements between a and b inclusively. x..x is length 1. + /// if b < a, then length is 0. 9..10 has length 2. + int get length { + if (b < a) return 0; + return b - a + 1; + } + + @override + bool operator ==(Object other) { + if (other is! Interval) { + return false; + } + return a == other.a && b == other.b; + } + + @override + int get hashCode { + var hash = 23; + hash = hash * 31 + a; + hash = hash * 31 + b; + return hash; + } + + /// Does this start completely before other? Disjoint */ + bool startsBeforeDisjoint(Interval other) { + return a < other.a && b < other.a; + } + + /// Does this start at or before other? Nondisjoint */ + bool startsBeforeNonDisjoint(Interval other) { + return a <= other.a && b >= other.a; + } + + /// Does this.a start after other.b? May or may not be disjoint */ + bool startsAfter(Interval other) { + return a > other.a; + } + + /// Does this start completely after other? Disjoint */ + bool startsAfterDisjoint(Interval other) { + return a > other.b; + } + + /// Does this start after other? NonDisjoint */ + bool startsAfterNonDisjoint(Interval other) { + return a > other.a && a <= other.b; // this.b>=other.b implied + } + + /// Are both ranges disjoint? I.e., no overlap? */ + bool disjoint(Interval other) { + return startsBeforeDisjoint(other) || startsAfterDisjoint(other); + } + + /// Are two intervals adjacent such as 0..41 and 42..42? */ + bool adjacent(Interval other) { + return a == other.b + 1 || b == other.a - 1; + } + + bool properlyContains(Interval other) { + return other.a >= a && other.b <= b; + } + + /// Return the interval computed from combining this and other */ + Interval union(Interval other) { + return Interval.of(min(a, other.a), max(b, other.b)); + } + + /// Return the interval in common between this and o */ + Interval intersection(Interval other) { + return Interval.of(max(a, other.a), min(b, other.b)); + } + + /// Return the interval with elements from this not in other; + /// other must not be totally enclosed (properly contained) + /// within this, which would result in two disjoint intervals + /// instead of the single one returned by this method. + Interval? differenceNotProperlyContained(Interval other) { + Interval? diff; + // other.a to left of this.a (or same) + if (other.startsBeforeNonDisjoint(this)) { + diff = Interval.of(max(a, other.b + 1), b); + } + + // other.a to right of this.a + else if (other.startsAfterNonDisjoint(this)) { + diff = Interval.of(a, other.a - 1); + } + return diff; + } + + @override + String toString() { + return '$a..$b'; + } +} + +/// This class implements the [IntervalSet] backed by a sorted array of +/// non-overlapping intervals. It is particularly efficient for representing +/// large collections of numbers, where the majority of elements appear as part +/// of a sequential range of numbers that are all part of the set. For example, +/// the set { 1, 2, 3, 4, 7, 8 } may be represented as { [1, 4], [7, 8] }. +/// +///

    +/// This class is able to represent sets containing any combination of values in +/// the range {@link int#MIN_VALUE} to {@link int#MAX_VALUE} +/// (inclusive).

    +class IntervalSet { + static final IntervalSet COMPLETE_CHAR_SET = + IntervalSet.ofRange(Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE) + ..setReadonly(true); + + static final IntervalSet EMPTY_SET = IntervalSet([])..setReadonly(true); + + /// The list of sorted, disjoint intervals. */ + List intervals = []; + + bool readonly = false; + + IntervalSet([List? intervals]) { + this.intervals = intervals ?? []; + } + + IntervalSet.ofSet(IntervalSet set) { + addAll(set); + } + +// TODO +// IntervalSet(int... els) { +//if ( els==null ) { +//intervals = new ArrayList(2); // most sets are 1 or 2 elements +//} +//else { +//intervals = new ArrayList(els.length); +//for (int e : els) add(e); +//} +//} + + /// Create a set with a single element, el. */ + + IntervalSet.ofOne(int a) { + addOne(a); + } + + /// Create a set with all ints within range [a..b] (inclusive) */ + static IntervalSet ofRange(int a, int b) { + final s = IntervalSet(); + s.addRange(a, b); + return s; + } + + void clear() { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + intervals.clear(); + } + + /// Add a single element to the set. An isolated element is stored + /// as a range el..el. + + void addOne(int el) { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + addRange(el, el); + } + + /// Add interval; i.e., add all integers from a to b to set. + /// If b<a, do nothing. + /// Keep list in sorted order (by left range value). + /// If overlap, combine ranges. For example, + /// If this is {1..5, 10..20}, adding 6..7 yields + /// {1..5, 6..7, 10..20}. Adding 4..8 yields {1..8, 10..20}. + void addRange(int a, int b) { + add(Interval.of(a, b)); + } + + // copy on write so we can cache a..a intervals and sets of that + void add(Interval addition) { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + //System.out.println("add "+addition+" to "+intervals.toString()); + if (addition.b < addition.a) { + return; + } + for (var i = 0; i < intervals.length; i++) { + final r = intervals[i]; + if (addition == r) { + return; + } + if (addition.adjacent(r) || !addition.disjoint(r)) { + // next to each other, make a single larger interval + final bigger = addition.union(r); + intervals[i] = bigger; + + // make sure we didn't just create an interval that + // should be merged with next interval in list + for (i++; i < intervals.length; i++) { + final next = intervals[i]; + if (!bigger.adjacent(next) && bigger.disjoint(next)) { + break; + } + + // if we bump up against or overlap next, merge + intervals.removeAt(i); // remove this one + intervals[i - 1] = + bigger.union(next); // set previous to 3 merged ones + } + return; + } + if (addition.startsBeforeDisjoint(r)) { + // insert before r + intervals.insert(i, addition); + return; + } + // if disjoint and after r, a future iteration will handle it + + } + // ok, must be after last interval (and disjoint from last interval) + // just add it + intervals.add(addition); + } + + /// combine all sets in the array returned the or'd value */ + static IntervalSet or(List sets) { + final r = IntervalSet(); + for (final s in sets) { + r.addAll(s); + } + return r; + } + + IntervalSet operator |(IntervalSet a) { + final o = IntervalSet(); + o.addAll(this); + o.addAll(a); + return o; + } + + IntervalSet addAll(IntervalSet? set) { + if (set == null) { + return this; + } + + final other = set; + // walk set and add each interval + final n = other.intervals.length; + for (var i = 0; i < n; i++) { + final I = other.intervals[i]; + addRange(I.a, I.b); + } + + return this; + } + + IntervalSet? complementRange(int minElement, int maxElement) { + return complement(IntervalSet.ofRange(minElement, maxElement)); + } + + /// {@inheritDoc} */ + IntervalSet? complement(IntervalSet? vocabulary) { + if (vocabulary == null || vocabulary.isNil) { + return null; // nothing in common with null set + } + return vocabulary - this; + } + + IntervalSet operator -(IntervalSet a) { + if (a.isNil) { + return IntervalSet.ofSet(this); + } + return subtract(this, a); + } + + /// Compute the set difference between two interval sets. The specific + /// operation is {@code left - right}. If either of the input sets is + /// null, it is treated as though it was an empty set. + static IntervalSet subtract(IntervalSet left, IntervalSet right) { + if (left.isNil) { + return IntervalSet(); + } + + final result = IntervalSet.ofSet(left); + if (right.isNil) { + // right set has no elements; just return the copy of the current set + return result; + } + + var resultI = 0; + var rightI = 0; + while ( + resultI < result.intervals.length && rightI < right.intervals.length) { + final resultInterval = result.intervals[resultI]; + final rightInterval = right.intervals[rightI]; + +// operation: (resultInterval - rightInterval) and update indexes + + if (rightInterval.b < resultInterval.a) { + rightI++; + continue; + } + + if (rightInterval.a > resultInterval.b) { + resultI++; + continue; + } + + Interval? beforeCurrent; + Interval? afterCurrent; + if (rightInterval.a > resultInterval.a) { + beforeCurrent = Interval(resultInterval.a, rightInterval.a - 1); + } + + if (rightInterval.b < resultInterval.b) { + afterCurrent = Interval(rightInterval.b + 1, resultInterval.b); + } + + if (beforeCurrent != null) { + if (afterCurrent != null) { +// split the current interval into two + result.intervals[resultI] = beforeCurrent; + result.intervals.insert(resultI + 1, afterCurrent); + resultI++; + rightI++; + continue; + } else { +// replace the current interval + result.intervals[resultI] = beforeCurrent; + resultI++; + continue; + } + } else { + if (afterCurrent != null) { +// replace the current interval + result.intervals[resultI] = afterCurrent; + rightI++; + continue; + } else { +// remove the current interval (thus no need to increment resultI) + result.intervals.removeAt(resultI); + continue; + } + } + } + +// If rightI reached right.intervals.length, no more intervals to subtract from result. +// If resultI reached result.intervals.length, we would be subtracting from an empty set. +// Either way, we are done. + return result; + } + + /// {@inheritDoc} */ + IntervalSet operator +(IntervalSet other) { + final myIntervals = intervals; + final theirIntervals = (other).intervals; + IntervalSet? intersection; + final mySize = myIntervals.length; + final theirSize = theirIntervals.length; + var i = 0; + var j = 0; +// iterate down both interval lists looking for nondisjoint intervals + while (i < mySize && j < theirSize) { + final mine = myIntervals[i]; + final theirs = theirIntervals[j]; +//System.out.println("mine="+mine+" and theirs="+theirs); + if (mine.startsBeforeDisjoint(theirs)) { +// move this iterator looking for interval that might overlap + i++; + } else if (theirs.startsBeforeDisjoint(mine)) { +// move other iterator looking for interval that might overlap + j++; + } else if (mine.properlyContains(theirs)) { +// overlap, add intersection, get next theirs + intersection ??= IntervalSet(); + intersection.add(mine.intersection(theirs)); + j++; + } else if (theirs.properlyContains(mine)) { +// overlap, add intersection, get next mine + intersection ??= IntervalSet(); + intersection.add(mine.intersection(theirs)); + i++; + } else if (!mine.disjoint(theirs)) { +// overlap, add intersection + intersection ??= IntervalSet(); + intersection.add(mine.intersection(theirs)); +// Move the iterator of lower range [a..b], but not +// the upper range as it may contain elements that will collide +// with the next iterator. So, if mine=[0..115] and +// theirs=[115..200], then intersection is 115 and move mine +// but not theirs as theirs may collide with the next range +// in thisIter. +// move both iterators to next ranges + if (mine.startsAfterNonDisjoint(theirs)) { + j++; + } else if (theirs.startsAfterNonDisjoint(mine)) { + i++; + } + } + } + if (intersection == null) { + return IntervalSet(); + } + return intersection; + } + + /// {@inheritDoc} */ + + bool contains(int el) { + final n = intervals.length; + var l = 0; + var r = n - 1; +// Binary search for the element in the (sorted, +// disjoint) array of intervals. + while (l <= r) { + final m = ((l + r) / 2).floor(); + final I = intervals[m]; + final a = I.a; + final b = I.b; + if (b < el) { + l = m + 1; + } else if (a > el) { + r = m - 1; + } else { + // el >= a && el <= b + return true; + } + } + return false; + } + + /// {@inheritDoc} */ + + bool get isNil { + return intervals.isEmpty; + } + + /// Returns the maximum value contained in the set if not isNil(). + /// + /// @return the maximum value contained in the set. + /// @throws RuntimeException if set is empty + int get maxElement { + if (isNil) { + throw StateError('set is empty'); + } + return intervals.last.b; + } + + /// Returns the minimum value contained in the set if not isNil(). + /// + /// @return the minimum value contained in the set. + /// @throws RuntimeException if set is empty + int get minElement { + if (isNil) { + throw StateError('set is empty'); + } + + return intervals.first.a; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + for (final I in intervals) { + hash = MurmurHash.update(hash, I.a); + hash = MurmurHash.update(hash, I.b); + } + + hash = MurmurHash.finish(hash, intervals.length * 2); + return hash; + } + + /// Are two IntervalSets equal? Because all intervals are sorted + /// and disjoint, equals is a simple linear walk over both lists + /// to make sure they are the same. Interval.equals() is used + /// by the List.equals() method to check the ranges. + + @override + bool operator ==(Object obj) { + if (obj is! IntervalSet) { + return false; + } + return ListEquality().equals(intervals, obj.intervals); + } + + @override + String toString({bool elemAreChar = false, Vocabulary? vocabulary}) { + if (intervals.isEmpty) { + return '{}'; + } + + final elemStr = intervals.map((Interval I) { + final buf = StringBuffer(); + final a = I.a; + final b = I.b; + if (a == b) { + if (vocabulary != null) { + buf.write(elementName(vocabulary, a)); + } else { + if (a == Token.EOF) { + buf.write(''); + } else if (elemAreChar) { + buf.write("'"); + buf.writeCharCode(a); + buf.write("'"); + } else { + buf.write(a); + } + } + } else { + if (vocabulary != null) { + for (var i = a; i <= b; i++) { + if (i > a) buf.write(', '); + buf.write(elementName(vocabulary, i)); + } + } else { + if (elemAreChar) { + buf.write("'"); + buf.writeCharCode(a); + buf.write("'..'"); + buf.writeCharCode(b); + buf.write("'"); + } else { + buf.write(a); + buf.write('..'); + buf.write(b); + } + } + } + return buf; + }).join(', '); + if (length > 1) { + return '{$elemStr}'; + } + return elemStr; + } + + String elementName(Vocabulary vocabulary, int a) { + if (a == Token.EOF) { + return ''; + } else if (a == Token.EPSILON) { + return ''; + } else { + return vocabulary.getDisplayName(a); + } + } + + int get length { + var n = 0; + final numIntervals = intervals.length; + if (numIntervals == 1) { + final firstInterval = intervals[0]; + return firstInterval.b - firstInterval.a + 1; + } + for (var i = 0; i < numIntervals; i++) { + final I = intervals[i]; + n += (I.b - I.a + 1); + } + return n; + } + + List toIntegerList() { + final values = []; + final n = intervals.length; + for (var i = 0; i < n; i++) { + final I = intervals[i]; + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + values.add(v); + } + } + return values; + } + + List toList() { + final values = []; + final n = intervals.length; + for (var i = 0; i < n; i++) { + final I = intervals[i]; + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + values.add(v); + } + } + return values; + } + + Set toSet() { + final s = {}; + for (final I in intervals) { + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + s.add(v); + } + } + return s; + } + + /// Get the ith element of ordered set. Used only by RandomPhrase so + /// don't bother to implement if you're not doing that for a new + /// ANTLR code gen target. + int get(int i) { + final n = intervals.length; + var index = 0; + for (var j = 0; j < n; j++) { + final I = intervals[j]; + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + if (index == i) { + return v; + } + index++; + } + } + return -1; + } + + void remove(int el) { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + final n = intervals.length; + for (var i = 0; i < n; i++) { + final I = intervals[i]; + final a = I.a; + final b = I.b; + if (el < a) { + break; // list is sorted and el is before this interval; not here + } +// if whole interval x..x, rm + if (el == a && el == b) { + intervals.removeAt(i); + break; + } +// if on left edge x..b, adjust left + if (el == a) { + I.a++; + break; + } +// if on right edge a..x, adjust right + if (el == b) { + I.b--; + break; + } +// if in middle a..x..b, split interval + if (el > a && el < b) { + // found in this interval + final oldb = I.b; + I.b = el - 1; // [a..x-1] + addRange(el + 1, oldb); // add [x+1..b] + } + } + } + + bool isReadonly() { + return readonly; + } + + void setReadonly(bool readonly) { + if (this.readonly && !readonly) { + throw StateError("can't alter readonly IntervalSet"); + } + this.readonly = readonly; + } +} diff --git a/runtime/Dart/lib/src/lexer.dart b/runtime/Dart/lib/src/lexer.dart new file mode 100644 index 0000000000..2520f827d5 --- /dev/null +++ b/runtime/Dart/lib/src/lexer.dart @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'atn/atn.dart'; +import 'error/error.dart'; +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'misc/misc.dart'; +import 'recognizer.dart'; +import 'token.dart'; +import 'token_factory.dart'; +import 'token_source.dart'; +import 'util/utils.dart'; + +abstract class Lexer extends Recognizer + implements TokenSource { + static final DEFAULT_MODE = 0; + static final MORE = -2; + static final SKIP = -3; + + static final DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL; + static final HIDDEN = Token.HIDDEN_CHANNEL; + static final MIN_CHAR_VALUE = 0x0000; + static final MAX_CHAR_VALUE = 0x10FFFF; + + CharStream _input; + + late Pair _tokenFactorySourcePair; + + @override + TokenFactory get tokenFactory { + return _factory; + } + + @override + set tokenFactory(TokenFactory factory) { + _factory = factory; + } + + TokenFactory _factory = CommonTokenFactory.DEFAULT; + + /// The goal of all lexer rules/methods is to create a token object. + /// this is an instance variable as multiple rules may collaborate to + /// create a single token. nextToken will return this object after + /// matching lexer rule(s). If you subclass to allow multiple token + /// emissions, then set this to the last token to be matched or + /// something nonnull so that the auto token emit mechanism will not + /// emit another token. + Token? _token; + + /// What character index in the stream did the current token start at? + /// Needed, for example, to get the text for current token. Set at + /// the start of nextToken. + int tokenStartCharIndex = -1; + + /// The line on which the first character of the token resides + int tokenStartLine = -1; + + /// The character position of first character within the line + int tokenStartCharPositionInLine = -1; + + /// Once we see EOF on char stream, next token will be EOF. + /// If you have DONE : EOF ; then you see DONE EOF. + bool _hitEOF = false; + + /// The channel number for the current token + int channel = Token.DEFAULT_CHANNEL; + + /// The token type for the current token + int type = Token.INVALID_TYPE; + + final List _modeStack = []; + int mode_ = Lexer.DEFAULT_MODE; + + /// You can set the text for the current token to override what is in + /// the input char buffer. Use setText() or can set this instance var. + String? _text; + + Lexer(CharStream input) : _input = input { + _tokenFactorySourcePair = Pair(this, input); + } + + void reset([bool resetInput = false]) { + // wacky Lexer state variables + if (resetInput) { + _input.seek(0); // rewind the input + } + _token = null; + type = Token.INVALID_TYPE; + channel = Token.DEFAULT_CHANNEL; + tokenStartCharIndex = -1; + tokenStartCharPositionInLine = -1; + tokenStartLine = -1; + _text = null; + + _hitEOF = false; + mode_ = Lexer.DEFAULT_MODE; + _modeStack.clear(); + + interpreter?.reset(); + } + + /// Return a token from this source; i.e., match a token on the char stream. + @override + Token nextToken() { + // Mark start location in char stream so unbuffered streams are + // guaranteed at least have text of current token + final tokenStartMarker = _input.mark(); + try { + outer: + while (true) { + if (_hitEOF) { + emitEOF(); + return _token!; + } + + _token = null; + channel = Token.DEFAULT_CHANNEL; + tokenStartCharIndex = _input.index; + tokenStartCharPositionInLine = interpreter!.charPositionInLine; + tokenStartLine = interpreter!.line; + _text = null; + do { + type = Token.INVALID_TYPE; +// System.out.println("nextToken line "+tokenStartLine+" at "+((char)input.LA(1))+ +// " in mode "+mode+ +// " at index "+input.index()); + late int ttype; + try { + ttype = interpreter!.match(_input, mode_); + } on LexerNoViableAltException catch (e) { + notifyListeners(e); // report error + recover(e); + ttype = SKIP; + } + if (_input.LA(1) == IntStream.EOF) { + _hitEOF = true; + } + if (type == Token.INVALID_TYPE) type = ttype; + if (type == SKIP) { + continue outer; + } + } while (type == MORE); + if (_token == null) emit(); + return _token!; + } + } finally { + // make sure we release marker after match or + // unbuffered char stream will keep buffering + _input.release(tokenStartMarker); + } + } + + /// Instruct the lexer to skip creating a token for current lexer rule + /// and look for another token. nextToken() knows to keep looking when + /// a lexer rule finishes with token set to SKIP_TOKEN. Recall that + /// if token==null at end of any token rule, it creates one for you + /// and emits it. + void skip() { + type = Lexer.SKIP; + } + + void more() { + type = Lexer.MORE; + } + + void mode(int m) { + mode_ = m; + } + + void pushMode(int m) { + if (LexerATNSimulator.debug) { + log('pushMode $m'); + } + _modeStack.add(mode_); + mode(m); + } + + int popMode() { + if (_modeStack.isEmpty) throw StateError(''); + if (LexerATNSimulator.debug) log('popMode back to ${_modeStack.last}'); + mode(_modeStack.removeLast()); + return mode_; + } + + /// Set the char stream and reset the lexer + @override + set inputStream(CharStream input) { + _tokenFactorySourcePair = Pair(this, null); + reset(false); + _input = input; + _tokenFactorySourcePair = Pair(this, _input); + } + + @override + String get sourceName { + return _input.sourceName; + } + + @override + CharStream get inputStream { + return _input; + } + + /// By default does not support multiple emits per nextToken invocation + /// for efficiency reasons. Subclass and override this method, nextToken, + /// and getToken (to push tokens into a list and pull from that list + /// rather than a single variable as this implementation does). + void emitToken(Token token) { + //System.err.println("emit "+token); + _token = token; + } + + /// The standard method called to automatically emit a token at the + /// outermost lexical rule. The token object should point into the + /// char buffer start..stop. If there is a text override in 'text', + /// use that to set the token's text. Override this method to emit + /// custom Token objects or provide a new factory. + Token emit() { + final t = tokenFactory.create( + type, + _text, + _tokenFactorySourcePair, + channel, + tokenStartCharIndex, + charIndex - 1, + tokenStartLine, + tokenStartCharPositionInLine); + emitToken(t); + return t; + } + + Token emitEOF() { + final cpos = charPositionInLine; + final eof = tokenFactory.create( + Token.EOF, + null, + _tokenFactorySourcePair, + Token.DEFAULT_CHANNEL, + _input.index, + _input.index - 1, + line, + cpos, + ); + emitToken(eof); + return eof; + } + + @override + int get charPositionInLine { + return interpreter!.charPositionInLine; + } + + @override + int get line { + return interpreter!.line; + } + + set line(int line) { + interpreter!.line = line; + } + + set charPositionInLine(int charPositionInLine) { + interpreter!.charPositionInLine = charPositionInLine; + } + + /// What is the index of the current character of lookahead? + int get charIndex { + return _input.index; + } + + /// Return the text matched so far for the current token or any + /// text override. + String get text { + if (_text != null) { + return _text!; + } + return interpreter!.getText(_input); + } + + /// Set the complete text of this token; it wipes any previous + /// changes to the text. + set text(String text) { + _text = text; + } + + /// Override if emitting multiple tokens. + Token? get token { + return _token; + } + + void setToken(Token _token) { + this._token = _token; + } + + List? get channelNames => null; + + List? get modeNames => null; + + /// Return a list of all Token objects in input char stream. + /// Forces load of all tokens. Does not include EOF token. + List get allTokens { + final tokens = []; + var t = nextToken(); + while (t.type != Token.EOF) { + tokens.add(t); + t = nextToken(); + } + return tokens; + } + + void notifyListeners(LexerNoViableAltException e) { + final text = _input.getText(Interval.of(tokenStartCharIndex, _input.index)); + final msg = "token recognition error at: '" + getErrorDisplay(text) + "'"; + + final listener = errorListenerDispatch; + listener.syntaxError( + this, + null, + tokenStartLine, + tokenStartCharPositionInLine, + msg, + e, + ); + } + + String getErrorDisplay(String s) { + return escapeWhitespace(s); + } + + String getCharErrorDisplay(int c) { + final s = getErrorDisplay(String.fromCharCode(c)); + return "'$s'"; + } + + /// Lexers can normally match any char in it's vocabulary after matching + /// a token, so do the easy thing and just kill a character and hope + /// it all works out. You can instead use the rule invocation stack + /// to do sophisticated error recovery if you are in a fragment rule. + void recover(RecognitionException re) { + if (re is LexerNoViableAltException) { + if (_input.LA(1) != IntStream.EOF) { + // skip a char and try again + interpreter!.consume(_input); + } + } else { + //System.out.println("consuming char "+(char)input.LA(1)+" during recovery"); + //re.printStackTrace(); + // TODO: Do we lose character or line position information? + _input.consume(); + } + } +} diff --git a/runtime/Dart/lib/src/ll1_analyzer.dart b/runtime/Dart/lib/src/ll1_analyzer.dart new file mode 100644 index 0000000000..b2821a7be6 --- /dev/null +++ b/runtime/Dart/lib/src/ll1_analyzer.dart @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import './util/bit_set.dart'; +import 'atn/atn.dart'; +import 'interval_set.dart'; +import 'prediction_context.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'util/bit_set.dart'; + +class LL1Analyzer { + /// Special value added to the lookahead sets to indicate that we hit + /// a predicate during analysis if {@code seeThruPreds==false}. + static final int HIT_PRED = Token.INVALID_TYPE; + + final ATN atn; + + LL1Analyzer(this.atn); + + /// Calculates the SLL(1) expected lookahead set for each outgoing transition + /// of an [ATNState]. The returned array has one element for each + /// outgoing transition in [s]. If the closure from transition + /// i leads to a semantic predicate before matching a symbol, the + /// element at index i of the result will be null. + /// + /// @param s the ATN state + /// @return the expected symbols for each outgoing transition of [s]. + List getDecisionLookahead(ATNState s) { +// System.out.println("LOOK("+s.stateNumber+")"); + return List.generate(s.numberOfTransitions, (n) { + final lookAlt = IntervalSet(); + final lookBusy = {}; + final seeThruPreds = false; // fail to get lookahead upon pred + _LOOK( + s.transition(n).target, + null, + EmptyPredictionContext.Instance, + lookAlt, + lookBusy, + BitSet(), + seeThruPreds, + false, + ); + + // Wipe out lookahead for this alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if (lookAlt.length == 0 || lookAlt.contains(HIT_PRED)) { + return null; + } + return lookAlt; + }); + } + + /// Compute set of tokens that can follow [s] in the ATN in the + /// specified [ctx]. + /// + ///

    If [ctx] is null and the end of the rule containing + /// [s] is reached, {@link Token#EPSILON} is added to the result set. + /// If [ctx] is not null and the end of the outermost rule is + /// reached, {@link Token#EOF} is added to the result set.

    + /// + /// @param s the ATN state + /// @param stopState the ATN state to stop at. This can be a + /// [BlockEndState] to detect epsilon paths through a closure. + /// @param ctx the complete parser context, or null if the context + /// should be ignored + /// + /// @return The set of tokens that can follow [s] in the ATN in the + /// specified [ctx]. + + IntervalSet LOOK( + ATNState s, + RuleContext? ctx, [ + ATNState? stopState, + ]) { + final r = IntervalSet(); + final seeThruPreds = true; // ignore preds; get all lookahead + final lookContext = + ctx != null ? PredictionContext.fromRuleContext(s.atn, ctx) : null; + _LOOK( + s, + stopState, + lookContext, + r, + {}, + BitSet(), + seeThruPreds, + true, + ); + return r; + } + + /// Compute set of tokens that can follow [s] in the ATN in the + /// specified [ctx]. + /// + ///

    If [ctx] is null and [stopState] or the end of the + /// rule containing [s] is reached, {@link Token#EPSILON} is added to + /// the result set. If [ctx] is not null and [addEOF] is + /// [true] and [stopState] or the end of the outermost rule is + /// reached, {@link Token#EOF} is added to the result set.

    + /// + /// @param s the ATN state. + /// @param stopState the ATN state to stop at. This can be a + /// [BlockEndState] to detect epsilon paths through a closure. + /// @param ctx The outer context, or null if the outer context should + /// not be used. + /// @param look The result lookahead set. + /// @param lookBusy A set used for preventing epsilon closures in the ATN + /// from causing a stack overflow. Outside code should pass + /// {@code new HashSet} for this argument. + /// @param calledRuleStack A set used for preventing left recursion in the + /// ATN from causing a stack overflow. Outside code should pass + /// {@code new BitSet()} for this argument. + /// @param seeThruPreds [true] to true semantic predicates as + /// implicitly [true] and "see through them", otherwise [false] + /// to treat semantic predicates as opaque and add {@link #HIT_PRED} to the + /// result if one is encountered. + /// @param addEOF Add {@link Token#EOF} to the result if the end of the + /// outermost context is reached. This parameter has no effect if [ctx] + /// is null. + void _LOOK( + ATNState s, + ATNState? stopState, + PredictionContext? ctx, + IntervalSet look, + Set lookBusy, + BitSet calledRuleStack, + bool seeThruPreds, + bool addEOF) { +// System.out.println("_LOOK("+s.stateNumber+", ctx="+ctx); + final c = ATNConfig(s, 0, ctx); + if (!lookBusy.add(c)) return; + + if (s == stopState) { + if (ctx == null) { + look.addOne(Token.EPSILON); + return; + } else if (ctx.isEmpty && addEOF) { + look.addOne(Token.EOF); + return; + } + } + + if (s is RuleStopState) { + if (ctx == null) { + look.addOne(Token.EPSILON); + return; + } else if (ctx.isEmpty && addEOF) { + look.addOne(Token.EOF); + return; + } + + if (ctx != EmptyPredictionContext.Instance) { + // run thru all possible stack tops in ctx + final removed = calledRuleStack[s.ruleIndex]; + try { + calledRuleStack.clear(s.ruleIndex); + for (var i = 0; i < ctx.length; i++) { + final returnState = atn.states[ctx.getReturnState(i)]!; +// System.out.println("popping back to "+retState); + _LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, + calledRuleStack, seeThruPreds, addEOF); + } + } finally { + if (removed) { + calledRuleStack.set(s.ruleIndex); + } + } + return; + } + } + + for (var i = 0; i < s.numberOfTransitions; i++) { + final t = s.transition(i); + if (t is RuleTransition) { + if (calledRuleStack[t.target.ruleIndex]) { + continue; + } + + PredictionContext newContext = SingletonPredictionContext.create( + ctx, + t.followState.stateNumber, + ); + + try { + calledRuleStack.set(t.target.ruleIndex); + _LOOK(t.target, stopState, newContext, look, lookBusy, + calledRuleStack, seeThruPreds, addEOF); + } finally { + calledRuleStack.clear(t.target.ruleIndex); + } + } else if (t is AbstractPredicateTransition) { + if (seeThruPreds) { + _LOOK( + t.target, + stopState, + ctx, + look, + lookBusy, + calledRuleStack, + seeThruPreds, + addEOF, + ); + } else { + look.addOne(HIT_PRED); + } + } else if (t.isEpsilon) { + _LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, + seeThruPreds, addEOF); + } else if (t is WildcardTransition) { + look.addAll( + IntervalSet.ofRange(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType), + ); + } else { +// System.out.println("adding "+ t); + var set = t.label; + if (set != null) { + if (t is NotSetTransition) { + set = set.complement( + IntervalSet.ofRange(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType), + ); + } + look.addAll(set); + } + } + } + } +} diff --git a/runtime/Dart/lib/src/misc/misc.dart b/runtime/Dart/lib/src/misc/misc.dart new file mode 100644 index 0000000000..73829bf6ef --- /dev/null +++ b/runtime/Dart/lib/src/misc/misc.dart @@ -0,0 +1,8 @@ +/* + * Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/multi_map.dart'; +export 'src/pair.dart'; diff --git a/runtime/Dart/lib/src/misc/src/multi_map.dart b/runtime/Dart/lib/src/misc/src/multi_map.dart new file mode 100644 index 0000000000..8998fd3ccf --- /dev/null +++ b/runtime/Dart/lib/src/misc/src/multi_map.dart @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import 'pair.dart'; + +class MultiMap extends DelegatingMap> { + MultiMap() : super({}); + + void put(K key, V value) { + var elementsForKey = this[key]; + if (elementsForKey == null) { + elementsForKey = []; + this[key] = elementsForKey; + } + elementsForKey.add(value); + } + + List> get pairs { + final pairs = >[]; + for (var key in keys) { + for (var value in this[key]!) { + pairs.add(Pair(key, value)); + } + } + return pairs; + } +} diff --git a/runtime/Dart/lib/src/misc/src/pair.dart b/runtime/Dart/lib/src/misc/src/pair.dart new file mode 100644 index 0000000000..50a9c2160c --- /dev/null +++ b/runtime/Dart/lib/src/misc/src/pair.dart @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../util/murmur_hash.dart'; + +class Pair { + final A a; + final B b; + + const Pair(this.a, this.b); + + @override + bool operator ==(other) { + return identical(this, other) || + other is Pair && a == other.a && b == other.b; + } + + @override + String toString() { + return '($a, $b)'; + } + + @override + int get hashCode { + MurmurHash.initialize(); + + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, a); + hash = MurmurHash.update(hash, b); + return MurmurHash.finish(hash, 2); + } +} diff --git a/runtime/Dart/lib/src/parser.dart b/runtime/Dart/lib/src/parser.dart new file mode 100644 index 0000000000..de002e377f --- /dev/null +++ b/runtime/Dart/lib/src/parser.dart @@ -0,0 +1,758 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'atn/atn.dart'; +import 'error/error.dart'; +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'lexer.dart'; +import 'parser_rule_context.dart'; +import 'recognizer.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'token_factory.dart'; +import 'token_stream.dart'; +import 'tree/tree.dart'; + +import 'util/platform_stub.dart' + if (dart.library.io) 'util/platform_io.dart' + if (dart.library.html) 'util/platform_html.dart'; + +/// This is all the parsing support code essentially; most of it is error recovery stuff. */ +abstract class Parser extends Recognizer { + /// This field maps from the serialized ATN string to the deserialized [ATN] with + /// bypass alternatives. + /// + /// @see ATNDeserializationOptions#isGenerateRuleBypassTransitions() + ATN? bypassAltsAtnCache; + + /// The error handling strategy for the parser. The default value is a new + /// instance of [DefaultErrorStrategy]. + /// + /// @see #getErrorHandler + /// @see #setErrorHandler + + ErrorStrategy errorHandler = DefaultErrorStrategy(); + + /// The input stream. + /// + /// @see #getInputStream + /// @see #setInputStream + TokenStream _input; + + final List _precedenceStack = [0]; + + /// The [ParserRuleContext] object for the currently executing rule. + /// This is always non-null during the parsing process. + ParserRuleContext? context; + + /// Specifies whether or not the parser should construct a parse tree during + /// the parsing process. The default value is [true]. + bool buildParseTree = true; + + /// When {@link #setTrace}{@code (true)} is called, a reference to the + /// [TraceListener] is stored here so it can be easily removed in a + /// later call to {@link #setTrace}{@code (false)}. The listener itself is + /// implemented as a parser listener so this field is not directly used by + /// other parser methods. + TraceListener? _tracer; + + /// The list of [ParseTreeListener] listeners registered to receive + /// events during the parse. + /// + /// @see #addParseListener + List? _parseListeners; + + /// The number of syntax errors reported during parsing. This value is + /// incremented each time {@link #notifyErrorListeners} is called. + int _syntaxErrors = 0; + + /// Indicates parser has match()ed EOF token. See {@link #exitRule()}. */ + bool matchedEOF = false; + + Parser(this._input) { + reset(false); + } + + /// reset the parser's state */ + void reset([bool resetInput = true]) { + if (resetInput) inputStream.seek(0); + errorHandler.reset(this); + context = null; + _syntaxErrors = 0; + matchedEOF = false; + setTrace(false); + _precedenceStack.clear(); + _precedenceStack.add(0); + interpreter?.reset(); + } + + /// Match current input symbol against [ttype]. If the symbol type + /// matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are + /// called to complete the match process. + /// + ///

    If the symbol type does not match, + /// {@link ANTLRErrorStrategy#recoverInline} is called on the current error + /// strategy to attempt recovery. If {@link #getBuildParseTree} is + /// [true] and the token index of the symbol returned by + /// {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to + /// the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)}.

    + /// + /// @param ttype the token type to match + /// @return the matched symbol + /// @throws RecognitionException if the current input symbol did not match + /// [ttype] and the error strategy could not recover from the + /// mismatched symbol + Token match(int ttype) { + var t = currentToken; + if (t.type == ttype) { + if (ttype == Token.EOF) { + matchedEOF = true; + } + errorHandler.reportMatch(this); + consume(); + } else { + t = errorHandler.recoverInline(this); + if (buildParseTree && t.tokenIndex == -1) { + // we must have conjured up a new token during single token insertion + // if it's not the current symbol + context!.addErrorNode(createErrorNode(context!, t)); + } + } + return t; + } + + /// Match current input symbol as a wildcard. If the symbol type matches + /// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy#reportMatch} + /// and {@link #consume} are called to complete the match process. + /// + ///

    If the symbol type does not match, + /// {@link ANTLRErrorStrategy#recoverInline} is called on the current error + /// strategy to attempt recovery. If {@link #getBuildParseTree} is + /// [true] and the token index of the symbol returned by + /// {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to + /// the parse tree by calling {@link Parser#createErrorNode(ParserRuleContext, Token)}. then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)}

    + /// + /// @return the matched symbol + /// @throws RecognitionException if the current input symbol did not match + /// a wildcard and the error strategy could not recover from the mismatched + /// symbol + Token matchWildcard() { + var t = currentToken; + if (t.type > 0) { + errorHandler.reportMatch(this); + consume(); + } else { + t = errorHandler.recoverInline(this); + if (buildParseTree && t.tokenIndex == -1) { + // we must have conjured up a new token during single token insertion + // if it's not the current symbol + context!.addErrorNode(createErrorNode(context!, t)); + } + } + + return t; + } + + /// Trim the internal lists of the parse tree during parsing to conserve memory. + /// This property is set to [false] by default for a newly constructed parser. + /// + /// @param trimParseTrees [true] to trim the capacity of the {@link ParserRuleContext#children} + /// list to its size after a rule is parsed. + set trimParseTree(bool trimParseTrees) { + if (trimParseTrees) { + if (trimParseTree) return; + addParseListener(TrimToSizeListener.INSTANCE); + } else { + removeParseListener(TrimToSizeListener.INSTANCE); + } + } + + /// @return [true] if the {@link ParserRuleContext#children} list is trimmed + /// using the default {@link Parser.TrimToSizeListener} during the parse process. + bool get trimParseTree { + return parseListeners?.contains(TrimToSizeListener.INSTANCE) ?? false; + } + + List? get parseListeners => _parseListeners; + + /// Registers [listener] to receive events during the parsing process. + /// + ///

    To support output-preserving grammar transformations (including but not + /// limited to left-recursion removal, automated left-factoring, and + /// optimized code generation), calls to listener methods during the parse + /// may differ substantially from calls made by + /// {@link ParseTreeWalker#DEFAULT} used after the parse is complete. In + /// particular, rule entry and exit events may occur in a different order + /// during the parse than after the parser. In addition, calls to certain + /// rule entry methods may be omitted.

    + /// + ///

    With the following specific exceptions, calls to listener events are + /// deterministic, i.e. for identical input the calls to listener + /// methods will be the same.

    + /// + ///
      + ///
    • Alterations to the grammar used to generate code may change the + /// behavior of the listener calls.
    • + ///
    • Alterations to the command line options passed to ANTLR 4 when + /// generating the parser may change the behavior of the listener calls.
    • + ///
    • Changing the version of the ANTLR Tool used to generate the parser + /// may change the behavior of the listener calls.
    • + ///
    + /// + /// @param listener the listener to add + /// + /// @throws NullPointerException if {@code} listener is null + void addParseListener( + ParseTreeListener listener, + ) { + _parseListeners ??= []; + + _parseListeners!.add(listener); + } + + /// Remove [listener] from the list of parse listeners. + /// + ///

    If [listener] is null or has not been added as a parse + /// listener, this method does nothing.

    + /// + /// @see #addParseListener + /// + /// @param listener the listener to remove + void removeParseListener(ParseTreeListener? listener) { + if (_parseListeners != null) { + if (_parseListeners!.remove(listener)) { + if (_parseListeners!.isEmpty) { + _parseListeners = null; + } + } + } + } + + /// Remove all parse listeners. + /// + /// @see #addParseListener + void removeParseListeners() { + _parseListeners = null; + } + + /// Notify any parse listeners of an enter rule event. + /// + /// @see #addParseListener + void triggerEnterRuleEvent() { + if (_parseListeners == null) return; + for (var listener in _parseListeners!) { + listener.enterEveryRule(context!); + context!.enterRule(listener); + } + } + + /// Notify any parse listeners of an exit rule event. + /// + /// @see #addParseListener + void triggerExitRuleEvent() { + if (_parseListeners == null) return; + // reverse order walk of listeners + for (var i = _parseListeners!.length - 1; i >= 0; i--) { + final listener = _parseListeners![i]; + context!.exitRule(listener); + listener.exitEveryRule(context!); + } + } + + /// Gets the number of syntax errors reported during parsing. This value is + /// incremented each time {@link #notifyErrorListeners} is called. + /// + /// @see #notifyErrorListeners + int get numberOfSyntaxErrors { + return _syntaxErrors; + } + + @override + TokenFactory get tokenFactory { + return _input.tokenSource.tokenFactory; + } + + /// Tell our token source and error strategy about a new way to create tokens. */ + @override + set tokenFactory(TokenFactory factory) { + _input.tokenSource.tokenFactory = factory; + } + + /// The ATN with bypass alternatives is expensive to create so we create it + /// lazily. + /// + /// @throws UnsupportedOperationException if the current parser does not + /// implement the {@link #getSerializedATN()} method. + ATN get ATNWithBypassAlts { + if (serializedATN == null) { + throw UnsupportedError( + 'The current parser does not support an ATN with bypass alternatives.'); + } + + if (bypassAltsAtnCache == null) { + final deserializationOptions = ATNDeserializationOptions(false); + deserializationOptions.setGenerateRuleBypassTransitions(true); + bypassAltsAtnCache = ATNDeserializer(deserializationOptions).deserialize(serializedATN); + } + + return bypassAltsAtnCache!; + } + + /// The preferred method of getting a tree pattern. For example, here's a + /// sample use: + /// + ///
    +  /// ParseTree t = parser.expr();
    +  /// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
    +  /// ParseTreeMatch m = p.match(t);
    +  /// String id = m.get("ID");
    +  /// 
    + ParseTreePattern compileParseTreePattern( + String pattern, + int patternRuleIndex, [ + Lexer? lexer, + ]) { + if (lexer == null) { + final tokenSource = tokenStream.tokenSource; + if (tokenSource is! Lexer) { + throw UnsupportedError("Parser can't discover a lexer to use"); + } + lexer = tokenSource; + } + + final m = ParseTreePatternMatcher(lexer, this); + return m.compile(pattern, patternRuleIndex); + } + + @override + TokenStream get inputStream => tokenStream; + + @override + set inputStream(TokenStream input) { + setTokenStream(input); + } + + TokenStream get tokenStream => _input; + + /// Set the token stream and reset the parser. */ + void setTokenStream(TokenStream input) { + reset(false); + _input = input; + } + + /// Match needs to return the current input symbol, which gets put + /// into the label for the associated token ref; e.g., x=ID. + + Token get currentToken { + return _input.LT(1)!; + } + + void notifyErrorListeners( + String msg, [ + Token? offendingToken, + RecognitionException? e, + ]) { + offendingToken = offendingToken ?? currentToken; + _syntaxErrors++; + int? line = -1; + var charPositionInLine = -1; + line = offendingToken.line; + charPositionInLine = offendingToken.charPositionInLine; + + final listener = errorListenerDispatch; + listener.syntaxError( + this, + offendingToken, + line, + charPositionInLine, + msg, + e, + ); + } + + /// Consume and return the {@linkplain #getCurrentToken current symbol}. + /// + ///

    E.g., given the following input with [A] being the current + /// lookahead symbol, this function moves the cursor to [B] and returns + /// [A].

    + /// + ///
    +  ///  A B
    +  ///  ^
    +  /// 
    + /// + /// If the parser is not in error recovery mode, the consumed symbol is added + /// to the parse tree using {@link ParserRuleContext#addChild}, and + /// {@link ParseTreeListener#visitTerminal} is called on any parse listeners. + /// If the parser is in error recovery mode, the consumed symbol is + /// added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)} and + /// {@link ParseTreeListener#visitErrorNode} is called on any parse + /// listeners. + Token consume() { + final o = currentToken; + if (o.type != IntStream.EOF) { + inputStream.consume(); + } + final hasListener = _parseListeners != null && _parseListeners!.isNotEmpty; + if (buildParseTree || hasListener) { + if (errorHandler.inErrorRecoveryMode(this)) { + final node = context!.addErrorNode(createErrorNode(context!, o)); + if (_parseListeners != null) { + for (var listener in _parseListeners!) { + listener.visitErrorNode(node); + } + } + } else { + final node = context!.addChild(createTerminalNode(context!, o)); + if (_parseListeners != null) { + for (var listener in _parseListeners!) { + listener.visitTerminal(node); + } + } + } + } + return o; + } + + /// How to create a token leaf node associated with a parent. + /// Typically, the terminal node to create is not a function of the parent. + /// + /// @since 4.7 + TerminalNode createTerminalNode(ParserRuleContext parent, Token t) { + return TerminalNodeImpl(t); + } + + /// How to create an error node, given a token, associated with a parent. + /// Typically, the error node to create is not a function of the parent. + /// + /// @since 4.7 + ErrorNode createErrorNode(ParserRuleContext parent, Token t) { + return ErrorNodeImpl(t); + } + + void addContextToParseTree() { + final parent = context?.parent; + // add current context to parent if we have a parent + if (parent != null) { + parent.addAnyChild(context!); + } + } + + /// Always called by generated parsers upon entry to a rule. Access field + /// {@link #_ctx} get the current context. + void enterRule(ParserRuleContext localctx, int state, int ruleIndex) { + this.state = state; + context = localctx; + context!.start = _input.LT(1)!; + if (buildParseTree) addContextToParseTree(); + if (_parseListeners != null) triggerEnterRuleEvent(); + } + + void exitRule() { + assert(context != null); + if (matchedEOF) { + // if we have matched EOF, it cannot consume past EOF so we use LT(1) here + context!.stop = _input.LT(1); // LT(1) will be end of file + } else { + context!.stop = _input.LT(-1); // stop node is what we just matched + } + // trigger event on _ctx, before it reverts to parent + if (_parseListeners != null) triggerExitRuleEvent(); + state = context!.invokingState; + context = context?.parent; + } + + void enterOuterAlt(ParserRuleContext localctx, int altNum) { + assert(context != null); + localctx.altNumber = altNum; + // if we have new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if (buildParseTree && context != localctx) { + final parent = context!.parent; + if (parent != null) { + parent.removeLastChild(); + parent.addAnyChild(localctx); + } + } + context = localctx; + } + + /// Get the precedence level for the top-most precedence rule. + /// + /// @return The precedence level for the top-most precedence rule, or -1 if + /// the parser context is not nested within a precedence rule. + int get precedence { + if (_precedenceStack.isEmpty) { + return -1; + } + + return _precedenceStack.last; + } + + void enterRecursionRule( + ParserRuleContext localctx, int state, int ruleIndex, int precedence) { + this.state = state; + _precedenceStack.add(precedence); + context = localctx; + context!.start = _input.LT(1)!; + if (_parseListeners != null) { + triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules + } + } + + /// Like {@link #enterRule} but for recursive rules. + /// Make the current context the child of the incoming localctx. + void pushNewRecursionContext( + ParserRuleContext localctx, + int state, + int? ruleIndex, + ) { + assert(context != null); + final previous = context!; + previous.parent = localctx; + previous.invokingState = state; + previous.stop = _input.LT(-1); + + context = localctx; + context!.start = previous.start; + if (buildParseTree) { + context!.addAnyChild(previous); + } + + if (_parseListeners != null) { + triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules + } + } + + void unrollRecursionContexts(ParserRuleContext? _parentctx) { + assert(context != null); + _precedenceStack.removeLast(); + context!.stop = _input.LT(-1); + final retctx = context!; // save current ctx (return value) + + // unroll so _ctx is as it was before call to recursive method + if (_parseListeners != null) { + while (context != _parentctx) { + triggerExitRuleEvent(); + context = context!.parent; + } + } else { + context = _parentctx; + } + + // hook into tree + retctx.parent = _parentctx; + + if (buildParseTree && _parentctx != null) { + // add return ctx into invoking rule's tree + _parentctx.addAnyChild(retctx); + } + } + + ParserRuleContext? getInvokingContext(int ruleIndex) { + var p = context; + while (p != null) { + if (p.ruleIndex == ruleIndex) return p; + p = p.parent; + } + return null; + } + + @override + bool precpred(RuleContext? localctx, int precedence) { + return precedence >= _precedenceStack.last; + } + + bool inContext(String context) { + // TODO: useful in parser? + return false; + } + + /// Checks whether or not [symbol] can follow the current state in the + /// ATN. The behavior of this method is equivalent to the following, but is + /// implemented such that the complete context-sensitive follow set does not + /// need to be explicitly constructed. + /// + ///
    +  /// return expectedTokens.contains(symbol);
    +  /// 
    + /// + /// @param symbol the symbol type to check + /// @return [true] if [symbol] can follow the current state in + /// the ATN, otherwise [false]. + bool isExpectedToken(int symbol) { +// return interpreter!.atn.nextTokens(_ctx); + final atn = interpreter!.atn; + var ctx = context; + final s = atn.states[state]; + var following = atn.nextTokens(s!); + if (following.contains(symbol)) { + return true; + } +// log("following "+s+"="+following); + if (!following.contains(Token.EPSILON)) return false; + + while (ctx != null && + ctx.invokingState >= 0 && + following.contains(Token.EPSILON)) { + final invokingState = atn.states[ctx.invokingState]!; + final rt = invokingState.transition(0) as RuleTransition; + following = atn.nextTokens(rt.followState); + if (following.contains(symbol)) { + return true; + } + + ctx = ctx.parent; + } + + if (following.contains(Token.EPSILON) && symbol == Token.EOF) { + return true; + } + + return false; + } + + bool isMatchedEOF() { + return matchedEOF; + } + + /// Computes the set of input symbols which could follow the current parser + /// state and context, as given by {@link #getState} and {@link #getContext}, + /// respectively. + /// + /// @see ATN#getExpectedTokens(int, RuleContext) + IntervalSet get expectedTokens { + return getATN().getExpectedTokens(state, context); + } + + IntervalSet get expectedTokensWithinCurrentRule { + final atn = interpreter!.atn; + final s = atn.states[state]!; + return atn.nextTokens(s); + } + + /// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. */ + int getRuleIndex(String ruleName) { + final ruleIndex = ruleIndexMap[ruleName]; + if (ruleIndex != null) return ruleIndex; + return -1; + } + + ParserRuleContext get ruleContext { + assert(context != null); + return context!; + } + + List get ruleInvocationStack => getRuleInvocationStack(); + + /// Return List<String> of the rule names in your parser instance + /// leading up to a call to the current rule. You could override if + /// you want more details such as the file/line info of where + /// in the ATN a rule is invoked. + /// + /// This is very useful for error messages. + List getRuleInvocationStack([RuleContext? p]) { + p = p ?? context; + final _ruleNames = ruleNames; + final stack = []; + while (p != null) { + // compute what follows who invoked us + final ruleIndex = p.ruleIndex; + if (ruleIndex < 0) { + stack.add('n/a'); + } else { + stack.add(_ruleNames[ruleIndex]); + } + p = p.parent; + } + return stack; + } + + /// For debugging and other purposes. */ + List get dfaStrings { + final s = []; + for (var d = 0; d < interpreter!.decisionToDFA.length; d++) { + final dfa = interpreter!.decisionToDFA[d]; + s.add(dfa.toString(vocabulary)); + } + return s; + } + + /// For debugging and other purposes. */ + void dumpDFA() { + var seenOne = false; + for (var d = 0; d < interpreter!.decisionToDFA.length; d++) { + final dfa = interpreter!.decisionToDFA[d]; + if (dfa.states.isNotEmpty) { + if (seenOne) print(''); + print('Decision ${dfa.decision}:'); + stdoutWrite(dfa.toString(vocabulary)); + seenOne = true; + } + } + } + + String get sourceName { + return _input.sourceName; + } + + @override + ParseInfo? get parseInfo { + final interp = interpreter; + if (interp is ProfilingATNSimulator) { + return ParseInfo(interp); + } + return null; + } + + /// @since 4.3 + void setProfile(bool profile) { + final interp = interpreter!; + final saveMode = interp.predictionMode; + if (profile) { + if (interp is! ProfilingATNSimulator) { + interpreter = ProfilingATNSimulator(this); + } + } else if (interp is ProfilingATNSimulator) { + final sim = ParserATNSimulator( + this, + getATN(), + interp.decisionToDFA, + interp.sharedContextCache, + ); + interpreter = sim; + } + interpreter!.predictionMode = saveMode; + } + + /// During a parse is sometimes useful to listen in on the rule entry and exit + /// events as well as token matches. This is for quick and dirty debugging. + void setTrace(bool trace) { + if (!trace) { + removeParseListener(_tracer); + _tracer = null; + } else { + if (_tracer != null) { + removeParseListener(_tracer); + } else { + _tracer = TraceListener(this); + } + addParseListener(_tracer!); + } + } + + /// Gets whether a [TraceListener] is registered as a parse listener + /// for the parser. + /// + /// @see #setTrace(bool) + bool isTrace() { + return _tracer != null; + } +} diff --git a/runtime/Dart/lib/src/parser_interpreter.dart b/runtime/Dart/lib/src/parser_interpreter.dart new file mode 100644 index 0000000000..92aca69318 --- /dev/null +++ b/runtime/Dart/lib/src/parser_interpreter.dart @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:collection'; + +import 'atn/atn.dart'; +import 'dfa/dfa.dart'; +import 'error/error.dart'; +import 'misc/misc.dart'; +import 'parser.dart'; +import 'parser_rule_context.dart'; +import 'token.dart'; +import 'token_stream.dart'; +import 'vocabulary.dart'; + +/// A parser simulator that mimics what ANTLR's generated +/// parser code does. A ParserATNSimulator is used to make +/// predictions via adaptivePredict but this class moves a pointer through the +/// ATN to simulate parsing. ParserATNSimulator just +/// makes us efficient rather than having to backtrack, for example. +/// +/// This properly creates parse trees even for left recursive rules. +/// +/// We rely on the left recursive rule invocation and special predicate +/// transitions to make left recursive rules work. +/// +/// See TestParserInterpreter for examples. +class ParserInterpreter extends Parser { + @override + final String grammarFileName; + final ATN atn; + + late List decisionToDFA; // not shared like it is for generated parsers + final PredictionContextCache sharedContextCache = PredictionContextCache(); + + @override + final List ruleNames; + + @override + final Vocabulary vocabulary; + + /// This stack corresponds to the _parentctx, _parentState pair of locals + /// that would exist on call stack frames with a recursive descent parser; + /// in the generated function for a left-recursive rule you'd see: + /// + /// EContext e(int _p) throws RecognitionException { + /// ParserRuleContext _parentctx = context; // Pair.a + /// int _parentState = state; // Pair.b + /// ... + /// } + /// + /// Those values are used to create new recursive rule invocation contexts + /// associated with left operand of an alt like "expr '*' expr". + final DoubleLinkedQueue> _parentContextStack = + DoubleLinkedQueue(); + + /// We need a map from (decision,inputIndex)->forced alt for computing ambiguous + /// parse trees. For now, we allow exactly one override. + int overrideDecision = -1; + int overrideDecisionInputIndex = -1; + int overrideDecisionAlt = -1; + bool overrideDecisionReached = + false; // latch and only override once; error might trigger infinite loop + + /// What is the current context when we override a decisions? This tells + /// us what the root of the parse tree is when using override + /// for an ambiguity/lookahead check. + InterpreterRuleContext? overrideDecisionRoot; + + /// Return the root of the parse, which can be useful if the parser + /// bails out. You still can access the top node. Note that, + /// because of the way left recursive rules add children, it's possible + /// that the root will not have any children if the start rule immediately + /// called and left recursive rule that fails. + /// + /// @since 4.5.1 + late InterpreterRuleContext rootContext; + + ParserInterpreter( + this.grammarFileName, + this.vocabulary, + this.ruleNames, + this.atn, + TokenStream input, + ) : super(input) { + // init decision DFA + final numberOfDecisions = atn.numberOfDecisions; + decisionToDFA = List.generate(numberOfDecisions, (n) { + final decisionState = atn.getDecisionState(n); + return DFA(decisionState, n); + }); + + // get atn simulator that knows how to do predictions + interpreter = ParserATNSimulator( + this, + atn, + decisionToDFA, + sharedContextCache, + ); + } + + @override + void reset([bool resetInput = true]) { + super.reset(resetInput); + overrideDecisionReached = false; + overrideDecisionRoot = null; + } + + @override + ATN getATN() { + return atn; + } + + /// Begin parsing at startRuleIndex */ + ParserRuleContext parse(int startRuleIndex) { + final startRuleStartState = atn.ruleToStartState[startRuleIndex]; + + rootContext = createInterpreterRuleContext( + null, + ATNState.INVALID_STATE_NUMBER, + startRuleIndex, + ); + if (startRuleStartState.isLeftRecursiveRule) { + enterRecursionRule( + rootContext, startRuleStartState.stateNumber, startRuleIndex, 0); + } else { + enterRule(rootContext, startRuleStartState.stateNumber, startRuleIndex); + } + + while (true) { + final p = atnState; + switch (p.stateType) { + case StateType.RULE_STOP: + // pop; return from rule + if (context!.isEmpty) { + if (startRuleStartState.isLeftRecursiveRule) { + final result = context!; + final parentContext = _parentContextStack.removeLast(); + unrollRecursionContexts(parentContext.a); + return result; + } else { + exitRule(); + return rootContext; + } + } + + visitRuleStopState(p); + break; + + default: + try { + visitState(p); + } on RecognitionException catch (e) { + state = atn.ruleToStopState[p.ruleIndex].stateNumber; + context!.exception = e; + errorHandler.reportError(this, e); + recover(e); + } + + break; + } + } + } + + @override + void enterRecursionRule( + ParserRuleContext localctx, + int state, + int ruleIndex, + int precedence, + ) { + final pair = Pair(context, localctx.invokingState); + _parentContextStack.add(pair); + super.enterRecursionRule(localctx, state, ruleIndex, precedence); + } + + ATNState get atnState { + return atn.states[state]!; + } + + void visitState(ATNState p) { + assert(context != null); +// System.out.println("visitState "+p.stateNumber); + var predictedAlt = 1; + if (p is DecisionState) { + predictedAlt = visitDecisionState(p); + } + + final transition = p.transition(predictedAlt - 1); + switch (transition.type) { + case TransitionType.EPSILON: + if (p.stateType == StateType.STAR_LOOP_ENTRY && + (p as StarLoopEntryState).isPrecedenceDecision && + (transition.target is! LoopEndState)) { + // We are at the start of a left recursive rule's (...)* loop + // and we're not taking the exit branch of loop. + final localctx = createInterpreterRuleContext( + _parentContextStack.last.a, + _parentContextStack.last.b, + context!.ruleIndex, + ); + pushNewRecursionContext( + localctx, + atn.ruleToStartState[p.ruleIndex].stateNumber, + context!.ruleIndex, + ); + } + break; + + case TransitionType.ATOM: + match((transition as AtomTransition).atomLabel); + break; + + case TransitionType.RANGE: + case TransitionType.SET: + case TransitionType.NOT_SET: + if (!transition.matches( + inputStream.LA(1)!, + Token.MIN_USER_TOKEN_TYPE, + 65535, + )) { + recoverInline(); + } + matchWildcard(); + break; + + case TransitionType.WILDCARD: + matchWildcard(); + break; + + case TransitionType.RULE: + final ruleStartState = transition.target as RuleStartState; + final ruleIndex = ruleStartState.ruleIndex; + final newctx = + createInterpreterRuleContext(context, p.stateNumber, ruleIndex); + if (ruleStartState.isLeftRecursiveRule) { + enterRecursionRule(newctx, ruleStartState.stateNumber, ruleIndex, + (transition as RuleTransition).precedence); + } else { + enterRule(newctx, transition.target.stateNumber, ruleIndex); + } + break; + + case TransitionType.PREDICATE: + final predicateTransition = transition as PredicateTransition; + if (!sempred(context, predicateTransition.ruleIndex, + predicateTransition.predIndex)) { + throw FailedPredicateException(this); + } + + break; + + case TransitionType.ACTION: + final actionTransition = transition as ActionTransition; + action( + context, + actionTransition.ruleIndex, + actionTransition.actionIndex, + ); + break; + + case TransitionType.PRECEDENCE: + if (!precpred( + context, + (transition as PrecedencePredicateTransition).precedence, + )) { + throw FailedPredicateException( + this, 'precpred(context, ${(transition).precedence})'); + } + break; + + default: + throw UnsupportedError('Unrecognized ATN transition type.'); + } + + state = transition.target.stateNumber; + } + + /// Method visitDecisionState() is called when the interpreter reaches + /// a decision state (instance of DecisionState). It gives an opportunity + /// for subclasses to track interesting things. + int visitDecisionState(DecisionState p) { + var predictedAlt = 1; + assert(context != null); + if (p.numberOfTransitions > 1) { + errorHandler.sync(this); + final decision = p.decision; + if (decision == overrideDecision && + inputStream.index == overrideDecisionInputIndex && + !overrideDecisionReached) { + predictedAlt = overrideDecisionAlt; + overrideDecisionReached = true; + } else { + predictedAlt = interpreter!.adaptivePredict( + inputStream, + decision, + context!, + ); + } + } + return predictedAlt; + } + + /// Provide simple "factory" for InterpreterRuleContext's. + /// @since 4.5.1 + InterpreterRuleContext createInterpreterRuleContext( + ParserRuleContext? parent, + int invokingStateNumber, + int ruleIndex, + ) { + return InterpreterRuleContext(parent, invokingStateNumber, ruleIndex); + } + + void visitRuleStopState(ATNState p) { + final ruleStartState = atn.ruleToStartState[p.ruleIndex]; + if (ruleStartState.isLeftRecursiveRule) { + final parentContext = _parentContextStack.removeLast(); + unrollRecursionContexts(parentContext.a); + state = parentContext.b; + } else { + exitRule(); + } + + final ruleTransition = atn.states[state]!.transition(0) as RuleTransition; + state = ruleTransition.followState.stateNumber; + } + + /// Override this parser interpreters normal decision-making process + /// at a particular decision and input token index. Instead of + /// allowing the adaptive prediction mechanism to choose the + /// first alternative within a block that leads to a successful parse, + /// force it to take the alternative, 1..n for n alternatives. + /// + /// As an implementation limitation right now, you can only specify one + /// override. This is sufficient to allow construction of different + /// parse trees for ambiguous input. It means re-parsing the entire input + /// in general because you're never sure where an ambiguous sequence would + /// live in the various parse trees. For example, in one interpretation, + /// an ambiguous input sequence would be matched completely in expression + /// but in another it could match all the way back to the root. + /// + /// s : e '!'? ; + /// e : ID + /// | ID '!' + /// ; + /// + /// Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first + /// case, the ambiguous sequence is fully contained only by the root. + /// In the second case, the ambiguous sequences fully contained within just + /// e, as in: (e ID !). + /// + /// Rather than trying to optimize this and make + /// some intelligent decisions for optimization purposes, I settled on + /// just re-parsing the whole input and then using + /// {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal + /// subtree that contains the ambiguous sequence. I originally tried to + /// record the call stack at the point the parser detected and ambiguity but + /// left recursive rules create a parse tree stack that does not reflect + /// the actual call stack. That impedance mismatch was enough to make + /// it it challenging to restart the parser at a deeply nested rule + /// invocation. + /// + /// Only parser interpreters can override decisions so as to avoid inserting + /// override checking code in the critical ALL(*) prediction execution path. + /// + /// @since 4.5.1 + void addDecisionOverride(int decision, int tokenIndex, int forcedAlt) { + overrideDecision = decision; + overrideDecisionInputIndex = tokenIndex; + overrideDecisionAlt = forcedAlt; + } + + /// Rely on the error handler for this parser but, if no tokens are consumed + /// to recover, add an error node. Otherwise, nothing is seen in the parse + /// tree. + void recover(RecognitionException e) { + final i = inputStream.index; + errorHandler.recover(this, e); + assert(this.context != null); + final context = this.context as ParserRuleContext; + if (inputStream.index == i) { + // no input consumed, better add an error node + if (e is InputMismatchException) { + final ime = e; + final tok = e.offendingToken; + var expectedTokenType = Token.INVALID_TYPE; + if (ime.expectedTokens != null && !ime.expectedTokens!.isNil) { + expectedTokenType = ime.expectedTokens!.minElement; // get any element + } + final errToken = tokenFactory.create( + expectedTokenType, + tok.text, + Pair(tok.tokenSource, tok.tokenSource?.inputStream), + Token.DEFAULT_CHANNEL, + -1, + -1, + // invalid start/stop + tok.line, + tok.charPositionInLine, + ); + context.addErrorNode(createErrorNode(context, errToken)); + } else { + // NoViableAlt + final tok = e.offendingToken; + final errToken = tokenFactory.create( + Token.INVALID_TYPE, + tok.text, + Pair(tok.tokenSource, tok.tokenSource?.inputStream), + Token.DEFAULT_CHANNEL, + -1, + -1, + // invalid start/stop + tok.line, + tok.charPositionInLine, + ); + context.addErrorNode(createErrorNode(context, errToken)); + } + } + } + + Token recoverInline() { + return errorHandler.recoverInline(this); + } +} diff --git a/runtime/Dart/lib/src/parser_rule_context.dart b/runtime/Dart/lib/src/parser_rule_context.dart new file mode 100644 index 0000000000..a1e54d0d83 --- /dev/null +++ b/runtime/Dart/lib/src/parser_rule_context.dart @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'error/error.dart'; +import 'interval_set.dart'; +import 'parser.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'tree/tree.dart'; + +/// A rule invocation record for parsing. +/// +/// Contains all of the information about the current rule not stored in the +/// RuleContext. It handles parse tree children list, Any ATN state +/// tracing, and the default values available for rule invocations: +/// start, stop, rule index, current alt number. +/// +/// Subclasses made for each rule and grammar track the parameters, +/// return values, locals, and labels specific to that rule. These +/// are the objects that are returned from rules. +/// +/// Note text is not an actual field of a rule return value; it is computed +/// from start and stop using the input stream's toString() method. I +/// could add a ctor to this so that we can pass in and store the input +/// stream, but I'm not sure we want to do that. It would seem to be undefined +/// to get the .text property anyway if the rule matches tokens from multiple +/// input streams. +/// +/// I do not use getters for fields of objects that are used simply to +/// group values such as this aggregate. The getters/setters are there to +/// satisfy the superclass interface. +class ParserRuleContext extends RuleContext { + /// If we are debugging or building a parse tree for a visitor, + /// we need to track all of the tokens and rule invocations associated + /// with this rule's context. This is empty for parsing w/o tree constr. + /// operation because we don't the need to track the details about + /// how we parse this rule. + List? children; + + /// Get the initial/final token in this context. + /// Note that the range from start to stop is inclusive, so for rules that do not consume anything + /// (for example, zero length or error productions) this token may exceed stop. + Token? start, stop; + + /// The exception that forced this rule to return. If the rule successfully + /// completed, this is null. + RecognitionException? exception; + + ParserRuleContext([RuleContext? parent, int? invokingStateNumber]) + : super(parent: parent, invokingState: invokingStateNumber); + + /// COPY a ctx (I'm deliberately not using copy constructor) to avoid + /// confusion with creating node with parent. Does not copy children + /// (except error leaves). + /// + /// This is used in the generated parser code to flip a generic XContext + /// node for rule X to a YContext for alt label Y. In that sense, it is + /// not really a generic copy function. + /// + /// If we do an error sync() at start of a rule, we might add error nodes + /// to the generic XContext so this function must copy those nodes to + /// the YContext as well else they are lost! + void copyFrom(ParserRuleContext ctx) { + parent = ctx.parent; + invokingState = ctx.invokingState; + + start = ctx.start; + stop = ctx.stop; + + // copy any error nodes to alt label node + if (ctx.children != null) { + children = []; + // reset parent pointer for any error nodes + for (var child in ctx.children!) { + if (child is ErrorNode) { + addChild(child); + } + } + } + } + + // Double dispatch methods for listeners + + void enterRule(ParseTreeListener listener) {} + + void exitRule(ParseTreeListener listener) {} + + /// Add a parse tree node to this as a child. Works for + /// internal and leaf nodes. Does not set parent link; + /// other add methods must do that. Other addChild methods + /// call this. + /// + /// We cannot set the parent pointer of the incoming node + /// because the existing interfaces do not have a setParent() + /// method and I don't want to break backward compatibility for this. + /// + /// @since 4.7 + T addAnyChild(T t) { + children ??= []; + children!.add(t); + return t; + } + + /// Add a token leaf node child and force its parent to be this node. */ + TerminalNode addChild(TerminalNode t) { + t.parent = this; + return addAnyChild(t); + } + + /// Add an error node child and force its parent to be this node. + /// + /// @since 4.7 + ErrorNode addErrorNode(ErrorNode errorNode) { + errorNode.parent = this; + return addAnyChild(errorNode); + } + + /// Used by enterOuterAlt to toss out a RuleContext previously added as + /// we entered a rule. If we have # label, we will need to remove + /// generic ruleContext object. + void removeLastChild() { + if (children != null) { + children!.removeLast(); + } + } + + // Override to make type more specific + @override + ParserRuleContext? get parent { + return super.parent as ParserRuleContext?; + } + + @override + ParseTree? getChild(int i) { + if (children == null || i < 0 || i >= children!.length) { + return null; + } + if (T == dynamic) { + return children![i]; + } + var j = -1; // what element have we found with ctxType? + for (var o in children!) { + if (o is T) { + j++; + if (j == i) { + return o; + } + } + } + return null; + } + + TerminalNode? getToken(int ttype, int i) { + if (children == null || i < 0 || i >= children!.length) { + return null; + } + + var j = -1; // what token with ttype have we found? + for (var o in children!) { + if (o is TerminalNode) { + final tnode = o; + final symbol = tnode.symbol; + if (symbol.type == ttype) { + j++; + if (j == i) { + return tnode; + } + } + } + } + + return null; + } + + List getTokens(int ttype) { + if (children == null) { + return []; + } + + var tokens = []; + for (var o in children!) { + if (o is TerminalNode) { + final tnode = o; + final symbol = tnode.symbol; + if (symbol.type == ttype) { + tokens.add(tnode); + } + } + } + + return tokens; + } + + T? getRuleContext(int i) { + return getChild(i) as T?; + } + + List getRuleContexts() { + if (children == null) { + return []; + } + + var contexts = []; + for (var o in children!) { + if (o is T) { + contexts.add(o); + } + } + + return contexts; + } + + @override + int get childCount => children?.length ?? 0; + + @override + Interval get sourceInterval { + if (stop == null || stop!.tokenIndex < start!.tokenIndex) { + return Interval(start!.tokenIndex, start!.tokenIndex - 1); // empty + } + return Interval(start!.tokenIndex, stop!.tokenIndex); + } + + /// Used for rule context info debugging during parse-time, not so much for ATN debugging */ + String toInfoString(Parser recognizer) { + final rules = recognizer.getRuleInvocationStack(this); + + return "ParserRuleContext${rules.reversed}{start=$start, stop=$stop}'"; + } + + static final EMPTY = ParserRuleContext(); +} + +/// This class extends [ParserRuleContext] by allowing the value of +/// {@link #getRuleIndex} to be explicitly set for the context. +/// +///

    +/// [ParserRuleContext] does not include field storage for the rule index +/// since the context classes created by the code generator override the +/// {@link #getRuleIndex} method to return the correct value for that context. +/// Since the parser interpreter does not use the context classes generated for a +/// parser, this class (with slightly more memory overhead per node) is used to +/// provide equivalent functionality.

    +class InterpreterRuleContext extends ParserRuleContext { + @override + int ruleIndex = -1; + + /// Constructs a new [InterpreterRuleContext] with the specified + /// parent, invoking state, and rule index. + /// + /// @param parent The parent context. + /// @param invokingStateNumber The invoking state number. + /// @param ruleIndex The rule index for the current context. + InterpreterRuleContext( + ParserRuleContext? parent, + int invokingStateNumber, + this.ruleIndex, + ) : super(parent, invokingStateNumber); +} diff --git a/runtime/Dart/lib/src/prediction_context.dart b/runtime/Dart/lib/src/prediction_context.dart new file mode 100644 index 0000000000..cd0965440c --- /dev/null +++ b/runtime/Dart/lib/src/prediction_context.dart @@ -0,0 +1,905 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import 'atn/atn.dart'; +import 'misc/misc.dart'; +import 'recognizer.dart'; +import 'rule_context.dart'; +import 'util/murmur_hash.dart'; + +abstract class PredictionContext { + /// Represents {@code $} in an array in full context mode, when {@code $} + /// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, + /// {@code $} = {@link #EMPTY_RETURN_STATE}. + static final int EMPTY_RETURN_STATE = 0x7FFFFFFF; + + static final int INITIAL_HASH = 1; + + static int globalNodeCount = 0; + int id = globalNodeCount++; + + /// Stores the computed hash code of this [PredictionContext]. The hash + /// code is computed in parts to match the following reference algorithm. + /// + ///
    +  ///   int referenceHashCode() {
    +  ///      int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
    +  ///
    +  ///      for (int i = 0; i < {@link #size()}; i++) {
    +  ///          hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
    +  ///      }
    +  ///
    +  ///      for (int i = 0; i < {@link #size()}; i++) {
    +  ///          hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
    +  ///      }
    +  ///
    +  ///      hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2 * {@link #size()});
    +  ///      return hash;
    +  ///  }
    +  /// 
    + final int cachedHashCode; + + PredictionContext(this.cachedHashCode); + + /// Convert a [RuleContext] tree to a [PredictionContext] graph. + /// Return {@link #EMPTY} if [outerContext] is empty or null. + static PredictionContext fromRuleContext(ATN atn, RuleContext? outerContext) { + outerContext ??= RuleContext.EMPTY; + + // if we are in RuleContext of start rule, s, then PredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if (outerContext.parent == null || outerContext == RuleContext.EMPTY) { + return EmptyPredictionContext.Instance; + } + + // If we have a parent, convert it to a PredictionContext graph + PredictionContext parent = EmptyPredictionContext.Instance; + parent = PredictionContext.fromRuleContext(atn, outerContext.parent); + + final state = atn.states[outerContext.invokingState]!; + final transition = state.transition(0) as RuleTransition; + return SingletonPredictionContext.create( + parent, + transition.followState.stateNumber, + ); + } + + int get length; + + PredictionContext? getParent(int index); + + int getReturnState(int index); + + /// This means only the {@link #EMPTY} (wildcard? not sure) context is in set. */ + bool get isEmpty { + return this == EmptyPredictionContext.Instance; + } + + bool hasEmptyPath() { + // since EMPTY_RETURN_STATE can only appear in the last position, we check last one + return getReturnState(length - 1) == EMPTY_RETURN_STATE; + } + + @override + int get hashCode { + return cachedHashCode; + } + + @override + bool operator ==(Object obj); + + static int calculateEmptyHashCode() { + var hash = MurmurHash.initialize(INITIAL_HASH); + hash = MurmurHash.finish(hash, 0); + return hash; + } + + static int calculateHashCode( + List parents, List returnStates) { + var hash = MurmurHash.initialize(INITIAL_HASH); + + for (var parent in parents) { + hash = MurmurHash.update(hash, parent); + } + + for (var returnState in returnStates) { + hash = MurmurHash.update(hash, returnState); + } + + hash = MurmurHash.finish(hash, 2 * parents.length); + return hash; + } + + // dispatch + static PredictionContext merge( + PredictionContext a, + PredictionContext b, + bool rootIsWildcard, + Map, PredictionContext>? + mergeCache, + ) { + // share same graph if both same + if (a == b || a == b) return a; + + if (a is SingletonPredictionContext && b is SingletonPredictionContext) { + return mergeSingletons(a, b, rootIsWildcard, mergeCache); + } + + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as * wildcard + if (rootIsWildcard) { + if (a is EmptyPredictionContext) return a; + if (b is EmptyPredictionContext) return b; + } + + // convert singleton so both are arrays to normalize + if (a is SingletonPredictionContext) { + a = ArrayPredictionContext.of(a); + } + if (b is SingletonPredictionContext) { + b = ArrayPredictionContext.of(b); + } + return mergeArrays( + a as ArrayPredictionContext, + b as ArrayPredictionContext, + rootIsWildcard, + mergeCache, + ); + } + + /// Merge two [SingletonPredictionContext] instances. + /// + ///

    Stack tops equal, parents merge is same; return left graph.
    + ///

    + /// + ///

    Same stack top, parents differ; merge parents giving array node, then + /// remainders of those graphs. A new root node is created to point to the + /// merged parents.
    + ///

    + /// + ///

    Different stack tops pointing to same parent. Make array node for the + /// root where both element in the root point to the same (original) + /// parent.
    + ///

    + /// + ///

    Different stack tops pointing to different parents. Make array node for + /// the root where each element points to the corresponding original + /// parent.
    + ///

    + /// + /// @param a the first [SingletonPredictionContext] + /// @param b the second [SingletonPredictionContext] + /// @param rootIsWildcard [true] if this is a local-context merge, + /// otherwise false to indicate a full-context merge + /// @param mergeCache + static PredictionContext mergeSingletons( + SingletonPredictionContext a, + SingletonPredictionContext b, + bool rootIsWildcard, + Map, PredictionContext>? + mergeCache, + ) { + if (mergeCache != null) { + var previous = mergeCache[Pair(a, b)]; + if (previous != null) return previous; + previous = mergeCache[Pair(b, a)]; + if (previous != null) return previous; + } + + final rootMerge = mergeRoot(a, b, rootIsWildcard); + if (rootMerge != null) { + if (mergeCache != null) mergeCache[Pair(a, b)] = rootMerge; + return rootMerge; + } + + if (a.returnState == b.returnState) { + assert(a.parent != null && + b.parent != null); // must be empty context, never null + + // a == b + final parent = merge(a.parent!, b.parent!, rootIsWildcard, mergeCache); + // if parent is same as existing a or b parent or reduced to a parent, return it + if (parent == a.parent) return a; // ax + bx = ax, if a=b + if (parent == b.parent) return b; // ax + bx = bx, if a=b + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array + // new joined parent so create new singleton pointing to it, a' + PredictionContext a_ = + SingletonPredictionContext.create(parent, a.returnState); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } else { + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + PredictionContext? singleParent; + if (a == b || (a.parent != null && a.parent == b.parent)) { + // ax + bx = [a,b]x + singleParent = a.parent; + } + if (singleParent != null) { + // parents are same + // sort payloads and use same parent + final payloads = [a.returnState, b.returnState]; + if (a.returnState > b.returnState) { + payloads[0] = b.returnState; + payloads[1] = a.returnState; + } + final parents = [singleParent, singleParent]; + PredictionContext a_ = ArrayPredictionContext(parents, payloads); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } + // parents differ and can't merge them. Just pack together + // into array; can't merge. + // ax + by = [ax,by] + final payloads = [a.returnState, b.returnState]; + var parents = [a.parent, b.parent]; + if (a.returnState > b.returnState) { + // sort by payload + payloads[0] = b.returnState; + payloads[1] = a.returnState; + parents = [b.parent, a.parent]; + } + PredictionContext a_ = ArrayPredictionContext(parents, payloads); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } + } + + /// Handle case where at least one of [a] or [b] is + /// {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used + /// to represent {@link #EMPTY}. + /// + ///

    Local-Context Merges

    + /// + ///

    These local-context merge operations are used when [rootIsWildcard] + /// is true.

    + /// + ///

    {@link #EMPTY} is superset of any graph; return {@link #EMPTY}.
    + ///

    + /// + ///

    {@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is + /// {@code #EMPTY}; return left graph.
    + ///

    + /// + ///

    Special case of last merge if local context.
    + ///

    + /// + ///

    Full-Context Merges

    + /// + ///

    These full-context merge operations are used when [rootIsWildcard] + /// is false.

    + /// + ///

    + /// + ///

    Must keep all contexts; {@link #EMPTY} in array is a special value (and + /// null parent).
    + ///

    + /// + ///

    + /// + /// @param a the first [SingletonPredictionContext] + /// @param b the second [SingletonPredictionContext] + /// @param rootIsWildcard [true] if this is a local-context merge, + /// otherwise false to indicate a full-context merge + static PredictionContext? mergeRoot( + SingletonPredictionContext a, + SingletonPredictionContext b, + bool rootIsWildcard, + ) { + if (rootIsWildcard) { + if (a == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // * + b = * + if (b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // a + * = * + } else { + if (a == EmptyPredictionContext.Instance && b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // $ + $ = $ + if (a == EmptyPredictionContext.Instance) { + // $ + x = [x,$] + final payloads = [b.returnState, EMPTY_RETURN_STATE]; + final parents = [b.parent, null]; + PredictionContext joined = ArrayPredictionContext(parents, payloads); + return joined; + } + if (b == EmptyPredictionContext.Instance) { + // x + $ = [x,$] ($ is always last if present) + final payloads = [a.returnState, EMPTY_RETURN_STATE]; + final parents = [a.parent, null]; + PredictionContext joined = ArrayPredictionContext(parents, payloads); + return joined; + } + } + return null; + } + + /// Merge two [ArrayPredictionContext] instances. + /// + ///

    Different tops, different parents.
    + ///

    + /// + ///

    Shared top, same parents.
    + ///

    + /// + ///

    Shared top, different parents.
    + ///

    + /// + ///

    Shared top, all shared parents.
    + ///

    + /// + ///

    Equal tops, merge parents and reduce top to + /// [SingletonPredictionContext].
    + ///

    + static PredictionContext mergeArrays( + ArrayPredictionContext a, + ArrayPredictionContext b, + bool rootIsWildcard, + Map, PredictionContext>? + mergeCache, + ) { + if (mergeCache != null) { + var previous = mergeCache[Pair(a, b)]; + if (previous != null) return previous; + previous = mergeCache[Pair(b, a)]; + if (previous != null) return previous; + } + + // merge sorted payloads a + b => M + var i = 0; // walks a + var j = 0; // walks b + var k = 0; // walks target M array + + var mergedReturnStates = List.filled( + a.returnStates.length + b.returnStates.length, + 0, + ); // TODO Will it grow? + var mergedParents = List.filled( + a.returnStates.length + b.returnStates.length, + null, + ); // TODO Will it grow? + // walk and merge to yield mergedParents, mergedReturnStates + while (i < a.returnStates.length && j < b.returnStates.length) { + final a_parent = a.parents[i]; + final b_parent = b.parents[j]; + if (a.returnStates[i] == b.returnStates[j]) { + // same payload (stack tops are equal), must yield merged singleton + final payload = a.returnStates[i]; + // $+$ = $ + final both$ = payload == EMPTY_RETURN_STATE && + a_parent == null && + b_parent == null; + final ax_ax = (a_parent != null && b_parent != null) && + a_parent == b_parent; // ax+ax -> ax + if (both$ || ax_ax) { + mergedParents[k] = a_parent; // choose left + mergedReturnStates[k] = payload; + } else { + // ax+ay -> a'[x,y] + final mergedParent = + merge(a_parent!, b_parent!, rootIsWildcard, mergeCache); + mergedParents[k] = mergedParent; + mergedReturnStates[k] = payload; + } + i++; // hop over left one as usual + j++; // but also skip one in right side since we merge + } else if (a.returnStates[i] < b.returnStates[j]) { + // copy a[i] to M + mergedParents[k] = a_parent; + mergedReturnStates[k] = a.returnStates[i]; + i++; + } else { + // b > a, copy b[j] to M + mergedParents[k] = b_parent; + mergedReturnStates[k] = b.returnStates[j]; + j++; + } + k++; + } + + // copy over any payloads remaining in either array + if (i < a.returnStates.length) { + for (var p = i; p < a.returnStates.length; p++) { + mergedParents[k] = a.parents[p]; + mergedReturnStates[k] = a.returnStates[p]; + k++; + } + } else { + for (var p = j; p < b.returnStates.length; p++) { + mergedParents[k] = b.parents[p]; + mergedReturnStates[k] = b.returnStates[p]; + k++; + } + } + + // trim merged if we combined a few that had same stack tops + if (k < mergedParents.length) { + // write index < last position; trim + if (k == 1) { + // for just one merged element, return singleton top + PredictionContext a_ = SingletonPredictionContext.create( + mergedParents[0]!, + mergedReturnStates[0], + ); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } + + mergedParents = List.generate(k, (n) => mergedParents[n]); + mergedReturnStates = List.generate(k, (n) => mergedReturnStates[n]); + } + + PredictionContext M = ArrayPredictionContext( + mergedParents, + mergedReturnStates, + ); + + // if we created same array as a or b, return that instead + // TODO: track whether this is possible above during merge sort for speed + if (M == a) { + if (mergeCache != null) mergeCache[Pair(a, b)] = a; + return a; + } + if (M == b) { + if (mergeCache != null) mergeCache[Pair(a, b)] = b; + return b; + } + + combineCommonParents(mergedParents); + + if (mergeCache != null) mergeCache[Pair(a, b)] = M; + return M; + } + + /// Make pass over all M [parents]; merge any {@code equals()} + /// ones. + static void combineCommonParents(List parents) { + final uniqueParents = {}; + + for (var p = 0; p < parents.length; p++) { + final parent = parents[p]; + if (parent != null && !uniqueParents.containsKey(parent)) { + // don't replace + uniqueParents[parent] = parent; + } + } + + for (var p = 0; p < parents.length; p++) { + parents[p] = uniqueParents[parents[p]]!; + } + } + + static String toDOTString(PredictionContext? context) { + if (context == null) return ''; + final buf = StringBuffer(); + buf.write('digraph G {\n'); + buf.write('rankdir=LR;\n'); + + final nodes = getAllContextNodes(context); + nodes.sort((PredictionContext o1, PredictionContext o2) { + return o1.id - o2.id; + }); + + for (var current in nodes) { + if (current is SingletonPredictionContext) { + final s = current.id.toString(); + buf.write(' s'); + buf.write(s); + var returnState = current.getReturnState(0).toString(); + if (current is EmptyPredictionContext) returnState = r'$'; + buf.write(' [label=\"'); + buf.write(returnState); + buf.write('\"];\n'); + continue; + } + final arr = current as ArrayPredictionContext; + buf.write(' s'); + buf.write(arr.id); + buf.write(' [shape=box, label=\"'); + buf.write('['); + var first = true; + for (var inv in arr.returnStates) { + if (!first) buf.write(', '); + if (inv == EMPTY_RETURN_STATE) { + buf.write(r'$'); + } else { + buf.write(inv); + } + first = false; + } + buf.write(']'); + buf.write('\"];\n'); + } + + for (var current in nodes) { + if (current == EmptyPredictionContext.Instance) continue; + for (var i = 0; i < current.length; i++) { + if (current.getParent(i) == null) continue; + final s = current.id.toString(); + buf.write(' s'); + buf.write(s); + buf.write('->'); + buf.write('s'); + buf.write(current.getParent(i)?.id); + if (current.length > 1) { + buf.write(' [label=\"parent[$i]\"];\n'); + } else { + buf.write(';\n'); + } + } + } + + buf.write('}\n'); + return buf.toString(); + } + + // From Sam + static PredictionContext getCachedContext( + PredictionContext context, + PredictionContextCache contextCache, + Map visited, + ) { + if (context.isEmpty) { + return context; + } + + var existing = visited[context]; + if (existing != null) { + return existing; + } + + existing = contextCache[context]; + if (existing != null) { + visited[context] = existing; + return existing; + } + + var changed = false; + var parents = []; + for (var i = 0; i < parents.length; i++) { + final parent = getCachedContext( + context.getParent(i)!, + contextCache, + visited, + ); + if (changed || parent != context.getParent(i)) { + if (!changed) { + parents = []; + for (var j = 0; j < context.length; j++) { + parents.add(context.getParent(j)!); + } + + changed = true; + } + + parents[i] = parent; + } + } + + if (!changed) { + contextCache.add(context); + visited[context] = context; + return context; + } + + PredictionContext updated; + if (parents.isEmpty) { + updated = EmptyPredictionContext.Instance; + } else if (parents.length == 1) { + updated = SingletonPredictionContext.create( + parents[0], context.getReturnState(0)); + } else { + final arrayPredictionContext = context as ArrayPredictionContext; + updated = ArrayPredictionContext( + parents, + arrayPredictionContext.returnStates, + ); + } + + contextCache.add(updated); + visited[updated] = updated; + visited[context] = updated; + + return updated; + } + +// // extra structures, but cut/paste/morphed works, so leave it. +// // seems to do a breadth-first walk +// static List getAllNodes(PredictionContext context) { +// Map visited = +// new IdentityHashMap(); +// Deque workList = new ArrayDeque(); +// workList.add(context); +// visited.put(context, context); +// List nodes = new ArrayList(); +// while (!workList.isEmpty) { +// PredictionContext current = workList.pop(); +// nodes.add(current); +// for (int i = 0; i < current.length; i++) { +// PredictionContext parent = current.getParent(i); +// if ( parent!=null && visited.put(parent, parent) == null) { +// workList.push(parent); +// } +// } +// } +// return nodes; +// } + + // ter's recursive version of Sam's getAllNodes() + static List getAllContextNodes(PredictionContext context) { + final nodes = []; + final visited = {}; + getAllContextNodes_(context, nodes, visited); + return nodes; + } + + static void getAllContextNodes_( + PredictionContext? context, + List nodes, + Map visited, + ) { + if (context == null || visited.containsKey(context)) return; + visited[context] = context; + nodes.add(context); + for (var i = 0; i < context.length; i++) { + getAllContextNodes_(context.getParent(i), nodes, visited); + } + } + + // FROM SAM + List toStrings( + Recognizer? recognizer, + PredictionContext stop, + int currentState, + ) { + final result = []; + + outer: + for (var perm = 0;; perm++) { + var offset = 0; + var last = true; + var p = this; + var stateNumber = currentState; + final localBuffer = StringBuffer(); + localBuffer.write('['); + while (!p.isEmpty && p != stop) { + var index = 0; + if (p.length > 0) { + var bits = 1; + while ((1 << bits) < p.length) { + bits++; + } + + final mask = (1 << bits) - 1; + index = (perm >> offset) & mask; + last &= index >= p.length - 1; + if (index >= p.length) { + continue outer; + } + offset += bits; + } + + if (recognizer != null) { + if (localBuffer.length > 1) { + // first char is '[', if more than that this isn't the first rule + localBuffer.write(' '); + } + + final atn = recognizer.getATN(); + final s = atn.states[stateNumber]!; + final ruleName = recognizer.ruleNames[s.ruleIndex]; + localBuffer.write(ruleName); + } else if (p.getReturnState(index) != EMPTY_RETURN_STATE) { + if (!p.isEmpty) { + if (localBuffer.length > 1) { + // first char is '[', if more than that this isn't the first rule + localBuffer.write(' '); + } + + localBuffer.write(p.getReturnState(index)); + } + } + stateNumber = p.getReturnState(index); + p = p.getParent(index) ?? EmptyPredictionContext.Instance; + } + localBuffer.write(']'); + result.add(localBuffer.toString()); + + if (last) { + break; + } + } + + return result; + } +} + +class SingletonPredictionContext extends PredictionContext { + final PredictionContext? parent; + final int returnState; + + SingletonPredictionContext(this.parent, this.returnState) + : super(parent != null + ? PredictionContext.calculateHashCode([parent], [returnState]) + : PredictionContext.calculateEmptyHashCode()) { + assert(returnState != ATNState.INVALID_STATE_NUMBER); + } + + static SingletonPredictionContext create( + PredictionContext? parent, + int returnState, + ) { + if (returnState == PredictionContext.EMPTY_RETURN_STATE && parent == null) { + // someone can pass in the bits of an array ctx that mean $ + return EmptyPredictionContext.Instance; + } + return SingletonPredictionContext(parent, returnState); + } + + @override + int get length { + return 1; + } + + @override + PredictionContext? getParent(int index) { + assert(index == 0); + return parent; + } + + @override + int getReturnState(int index) { + assert(index == 0); + return returnState; + } + + @override + bool operator ==(Object o) { + if (identical(this, o)) { + return true; + } else if (o is SingletonPredictionContext) { + if (hashCode != o.hashCode) { + return false; // can't be same if hash is different + } + + final s = o; + return returnState == s.returnState && + (parent != null && parent == s.parent); + } + return false; + } + + @override + String toString() { + final up = parent != null ? parent.toString() : ''; + if (up.isEmpty) { + if (returnState == PredictionContext.EMPTY_RETURN_STATE) { + return r'$'; + } + return returnState.toString(); + } + return '$returnState $up'; + } +} + +class EmptyPredictionContext extends SingletonPredictionContext { + /// Represents {@code $} in local context prediction, which means wildcard. + /// {@code *+x = *}. + static final EmptyPredictionContext Instance = EmptyPredictionContext(); + + EmptyPredictionContext() : super(null, PredictionContext.EMPTY_RETURN_STATE); + + @override + bool get isEmpty { + return true; + } + + @override + int get length { + return 1; + } + + @override + PredictionContext? getParent(int index) { + return null; + } + + @override + int getReturnState(int index) { + return returnState; + } + + @override + String toString() { + return r'$'; + } +} + +class ArrayPredictionContext extends PredictionContext { + /// Parent can be null only if full ctx mode and we make an array + /// from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and + /// returnState == {@link #EMPTY_RETURN_STATE}. + List parents; + + /// Sorted for merge, no duplicates; if present, + /// {@link #EMPTY_RETURN_STATE} is always last. + List returnStates; + + ArrayPredictionContext.of(SingletonPredictionContext a) + : this([a.parent], [a.returnState]); + + ArrayPredictionContext( + // Todo: this generic should be null this wont change + this.parents, + this.returnStates, + ) : assert(parents.isNotEmpty), + assert(returnStates.isNotEmpty), + super(PredictionContext.calculateHashCode(parents, returnStates)); + + @override + bool get isEmpty { + // since EMPTY_RETURN_STATE can only appear in the last position, we + // don't need to verify that size==1 + return returnStates[0] == PredictionContext.EMPTY_RETURN_STATE; + } + + @override + int get length { + return returnStates.length; + } + + @override + PredictionContext? getParent(int index) { + return parents[index]; + } + + @override + int getReturnState(int index) { + return returnStates[index]; + } + +// int findReturnState(int returnState) { +// return Arrays.binarySearch(returnStates, returnState); +// } + + @override + bool operator ==(Object o) { + if (identical(this, o)) { + return true; + } else if (o is ArrayPredictionContext) { + if (hashCode != o.hashCode) { + return false; // can't be same if hash is different + } + + final a = o; + return ListEquality().equals(returnStates, a.returnStates) && + ListEquality().equals(parents, a.parents); + } + return false; + } + + @override + String toString() { + if (isEmpty) return '[]'; + final buf = StringBuffer(); + buf.write('['); + for (var i = 0; i < returnStates.length; i++) { + if (i > 0) buf.write(', '); + if (returnStates[i] == PredictionContext.EMPTY_RETURN_STATE) { + buf.write(r'$'); + continue; + } + buf.write(returnStates[i]); + if (parents[i] != null) { + buf.write(' '); + buf.write(parents[i].toString()); + } else { + buf.write('null'); + } + } + buf.write(']'); + return buf.toString(); + } +} diff --git a/runtime/Dart/lib/src/recognizer.dart b/runtime/Dart/lib/src/recognizer.dart new file mode 100644 index 0000000000..a46edbe3bd --- /dev/null +++ b/runtime/Dart/lib/src/recognizer.dart @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'vocabulary.dart'; +import 'atn/atn.dart'; +import 'error/error.dart'; +import 'input_stream.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'token_factory.dart'; +import 'util/utils.dart'; + +abstract class Recognizer { + static const EOF = -1; + + static final Map> tokenTypeMapCache = {}; + static final Map, Map> ruleIndexMapCache = {}; + final List _listeners = [ConsoleErrorListener.INSTANCE]; + + /// The ATN interpreter used by the recognizer for prediction. + ATNInterpreter? interpreter; + int state = -1; + + List get ruleNames; + + /// Get the vocabulary used by the recognizer. + /// + /// @return A [Vocabulary] instance providing information about the + /// vocabulary used by the grammar. + Vocabulary get vocabulary; + + /// Get a map from token names to token types. + /// + ///

    Used for XPath and tree pattern compilation.

    + Map get tokenTypeMap { + final _vocabulary = vocabulary; + + var result = tokenTypeMapCache[_vocabulary]; + if (result == null) { + result = {}; + for (var i = 0; i <= getATN().maxTokenType; i++) { + final literalName = _vocabulary.getLiteralName(i); + if (literalName != null) { + result[literalName] = i; + } + + final symbolicName = _vocabulary.getSymbolicName(i); + if (symbolicName != null) { + result[symbolicName] = i; + } + } + + result['EOF'] = Token.EOF; + result = Map.unmodifiable(result); + tokenTypeMapCache[_vocabulary] = result; + } + + return result; + } + + /// Get a map from rule names to rule indexes. + /// + ///

    Used for XPath and tree pattern compilation.

    + Map get ruleIndexMap { + var result = ruleIndexMapCache[ruleNames]; + if (result == null) { + result = Map.unmodifiable(toMap(ruleNames)); + ruleIndexMapCache[ruleNames] = result; + } + + return result; + } + + int getTokenType(String tokenName) { + final ttype = tokenTypeMap[tokenName]; + if (ttype != null) return ttype; + return Token.INVALID_TYPE; + } + + /// If this recognizer was generated, it will have a serialized ATN + /// representation of the grammar. + /// + ///

    For interpreters, we don't know their serialized ATN despite having + /// created the interpreter from it.

    + List get serializedATN { + throw UnsupportedError('there is no serialized ATN'); + } + + /// For debugging and other purposes, might want the grammar name. + /// Have ANTLR generate an implementation for this method. + String get grammarFileName; + + /// Get the [ATN] used by the recognizer for prediction. + /// + /// @return The [ATN] used by the recognizer for prediction. + ATN getATN(); + + /// If profiling during the parse/lex, this will return DecisionInfo records + /// for each decision in recognizer in a ParseInfo object. + /// + /// @since 4.3 + ParseInfo? get parseInfo { + return null; + } + + /// What is the error header, normally line/character position information? */ + String getErrorHeader(RecognitionException e) { + final line = e.offendingToken.line; + final charPositionInLine = e.offendingToken.charPositionInLine; + return 'line $line:$charPositionInLine'; + } + + void addErrorListener( + ErrorListener listener, + ) { + _listeners.add(listener); + } + + void removeErrorListener(ErrorListener listener) { + _listeners.remove(listener); + } + + void removeErrorListeners() { + _listeners.clear(); + } + + List get errorListeners { + return _listeners; + } + + ErrorListener get errorListenerDispatch { + return ProxyErrorListener(errorListeners); + } + + // subclass needs to override these if there are sempreds or actions + // that the ATN interp needs to execute + bool sempred(RuleContext? _localctx, int ruleIndex, int actionIndex) { + return true; + } + + bool precpred(RuleContext? localctx, int precedence) { + return true; + } + + void action(RuleContext? _localctx, int ruleIndex, int actionIndex) {} + + IntStream get inputStream; + + set inputStream(covariant IntStream input); + + TokenFactory get tokenFactory; + + set tokenFactory(TokenFactory input); +} diff --git a/runtime/Dart/lib/src/rule_context.dart b/runtime/Dart/lib/src/rule_context.dart new file mode 100644 index 0000000000..3ba60db4b4 --- /dev/null +++ b/runtime/Dart/lib/src/rule_context.dart @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'atn/atn.dart'; +import 'interval_set.dart'; +import 'parser.dart'; +import 'parser_rule_context.dart'; +import 'recognizer.dart'; +import 'tree/tree.dart'; + +/// A rule context is a record of a single rule invocation. +/// +/// We form a stack of these context objects using the parent +/// pointer. A parent pointer of null indicates that the current +/// context is the bottom of the stack. The ParserRuleContext subclass +/// as a children list so that we can turn this data structure into a +/// tree. +/// +/// The root node always has a null pointer and invokingState of -1. +/// +/// Upon entry to parsing, the first invoked rule function creates a +/// context object (a subclass specialized for that rule such as +/// SContext) and makes it the root of a parse tree, recorded by field +/// Parser._ctx. +/// +/// public final SContext s() throws RecognitionException { +/// SContext _localctx = new SContext(_ctx, getState()); <-- create new node +/// enterRule(_localctx, 0, RULE_s); <-- push it +/// ... +/// exitRule(); <-- pop back to _localctx +/// return _localctx; +/// } +/// +/// A subsequent rule invocation of r from the start rule s pushes a +/// new context object for r whose parent points at s and use invoking +/// state is the state with r emanating as edge label. +/// +/// The invokingState fields from a context object to the root +/// together form a stack of rule indication states where the root +/// (bottom of the stack) has a -1 sentinel value. If we invoke start +/// symbol s then call r1, which calls r2, the would look like +/// this: +/// +/// SContext[-1] <- root node (bottom of the stack) +/// R1Context[p] <- p in rule s called r1 +/// R2Context[q] <- q in rule r1 called r2 +/// +/// So the top of the stack, _ctx, represents a call to the current +/// rule and it holds the return address from another rule that invoke +/// to this rule. To invoke a rule, we must always have a current context. +/// +/// The parent contexts are useful for computing lookahead sets and +/// getting error information. +/// +/// These objects are used during parsing and prediction. +/// For the special case of parsers, we use the subclass +/// ParserRuleContext. +/// +/// @see ParserRuleContext +abstract class RuleContext extends RuleNode { + /// What context invoked this rule? + RuleContext? _parent; + + /// What state invoked the rule associated with this context? + /// The "return address" is the followState of invokingState + /// If parent is null, this should be -1. + int invokingState; + + RuleContext({RuleContext? parent, int? invokingState}) + : _parent=parent, invokingState = invokingState ?? -1; + + int depth() { + var n = 0; + RuleContext? p = this; + while (p != null) { + p = p.parent; + n++; + } + return n; + } + + @override + // Work around for https://github.com/antlr/antlr4/issues/3248 + // ignore: unnecessary_getters_setters + RuleContext? get parent => _parent; + + @override + // Work around for https://github.com/antlr/antlr4/issues/3248 + // ignore: unnecessary_getters_setters + set parent(RuleContext? parent) { + _parent = parent; + } + + /// A context is empty if there is no invoking state; meaning nobody call + /// current context. + bool get isEmpty => invokingState == -1; + + /// satisfy the ParseTree / SyntaxTree interface + @override + Interval get sourceInterval => Interval.INVALID; + + @override + RuleContext get ruleContext => this; + + @override + RuleContext get payload => this; + + /// Return the combined text of all child nodes. This method only considers + /// tokens which have been added to the parse tree. + ///

    + /// Since tokens on hidden channels (e.g. whitespace or comments) are not + /// added to the parse trees, they will not appear in the output of this + /// method. + @override + String get text { + if (childCount == 0) { + return ''; + } + + final builder = StringBuffer(); + for (var i = 0; i < childCount; i++) { + builder.write(getChild(i)!.text); + } + + return builder.toString(); + } + + int get ruleIndex => -1; + + /// For rule associated with this parse tree internal node, return + /// the outer alternative number used to match the input. Default + /// implementation does not compute nor store this alt num. Create + /// a subclass of ParserRuleContext with backing field and set + /// option contextSuperClass. + /// to set it. + int get altNumber => ATN.INVALID_ALT_NUMBER; + + /// Set the outer alternative number for this context node. Default + /// implementation does nothing to avoid backing field overhead for + /// trees that don't need it. Create + /// a subclass of ParserRuleContext with backing field and set + /// option contextSuperClass. + set altNumber(int altNumber) {} + + @override + ParseTree? getChild(int i) { + return null; + } + + @override + int get childCount => 0; + + @override + T? accept(ParseTreeVisitor visitor) { + return visitor.visitChildren(this); + } + + /// Print out a whole tree, not just a node, in LISP format + /// (root child1 .. childN). Print just a node if this is a leaf. + /// + @override + String toStringTree({List? ruleNames, Parser? parser}) { + return Trees.toStringTree(this, ruleNames: ruleNames, recog: parser); + } + + @override + String toString({ + List? ruleNames, + Recognizer? recog, + RuleContext? stop, + }) { + ruleNames = ruleNames ?? recog?.ruleNames; + final buf = StringBuffer(); + RuleContext? p = this; + buf.write('['); + while (p != null && p != stop) { + if (ruleNames == null) { + if (!p.isEmpty) { + buf.write(p.invokingState); + } + } else { + final ruleIndex = p.ruleIndex; + final ruleName = ruleIndex >= 0 && ruleIndex < ruleNames.length + ? ruleNames[ruleIndex] + : ruleIndex.toString(); + buf.write(ruleName); + } + + if (p.parent != null && (ruleNames != null || !p.parent!.isEmpty)) { + buf.write(' '); + } + + p = p.parent; + } + + buf.write(']'); + return buf.toString(); + } + + static final EMPTY = ParserRuleContext(); +} diff --git a/runtime/Dart/lib/src/runtime_meta_data.dart b/runtime/Dart/lib/src/runtime_meta_data.dart new file mode 100644 index 0000000000..597ff94573 --- /dev/null +++ b/runtime/Dart/lib/src/runtime_meta_data.dart @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; +import 'dart:math' as math; + +import 'package:logging/logging.dart'; + +/// This class provides access to the current version of the ANTLR 4 runtime +/// library as compile-time and runtime constants, along with methods for +/// checking for matching version numbers and notifying listeners in the case +/// where a version mismatch is detected. +/// +///

    +/// The runtime version information is provided by {@link #VERSION} and +/// {@link #getRuntimeVersion()}. Detailed information about these values is +/// provided in the documentation for each member.

    +/// +///

    +/// The runtime version check is implemented by {@link #checkVersion}. Detailed +/// information about incorporating this call into user code, as well as its use +/// in generated code, is provided in the documentation for the method.

    +/// +///

    +/// Version strings x.y and x.y.z are considered "compatible" and no error +/// would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are +/// considered "compatible" because the major and minor components x.y +/// are the same in each.

    +/// +///

    +/// To trap any error messages issued by this code, use System.setErr() +/// in your main() startup code. +///

    +/// +/// @since 4.3 +class RuntimeMetaData { + /// A compile-time constant containing the current version of the ANTLR 4 + /// runtime library. + /// + ///

    + /// This compile-time constant value allows generated parsers and other + /// libraries to include a literal reference to the version of the ANTLR 4 + /// runtime library the code was compiled against. At each release, we + /// change this value.

    + /// + ///

    Version numbers are assumed to have the form + /// + /// major.minor.patch.revision-suffix, + /// + /// with the individual components defined as follows.

    + /// + ///
      + ///
    • major is a required non-negative integer, and is equal to + /// {@code 4} for ANTLR 4.
    • + ///
    • minor is a required non-negative integer.
    • + ///
    • patch is an optional non-negative integer. When + /// patch is omitted, the {@code .} (dot) appearing before it is + /// also omitted.
    • + ///
    • revision is an optional non-negative integer, and may only + /// be included when patch is also included. When revision + /// is omitted, the {@code .} (dot) appearing before it is also omitted.
    • + ///
    • suffix is an optional string. When suffix is + /// omitted, the {@code -} (hyphen-minus) appearing before it is also + /// omitted.
    • + ///
    + static final String VERSION = '4.13.2'; + + /// Gets the currently executing version of the ANTLR 4 runtime library. + /// + ///

    + /// This method provides runtime access to the [VERSION] field, as + /// opposed to directly referencing the field as a compile-time constant.

    + /// + /// @return The currently executing version of the ANTLR 4 library + static String get runtimeVersion { + return VERSION; + } + + /// This method provides the ability to detect mismatches between the version + /// of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a + /// parser was compiled against, and the version of the ANTLR runtime which + /// is currently executing. + /// + ///

    + /// The version check is designed to detect the following two specific + /// scenarios.

    + /// + ///
      + ///
    • The ANTLR Tool version used for code generation does not match the + /// currently executing runtime version.
    • + ///
    • The ANTLR Runtime version referenced at the time a parser was + /// compiled does not match the currently executing runtime version.
    • + ///
    + /// + ///

    + /// Starting with ANTLR 4.3, the code generator emits a call to this method + /// using two constants in each generated lexer and parser: a hard-coded + /// constant indicating the version of the tool used to generate the parser + /// and a reference to the compile-time constant {@link #VERSION}. At + /// runtime, this method is called during the initialization of the generated + /// parser to detect mismatched versions, and notify the registered listeners + /// prior to creating instances of the parser.

    + /// + ///

    + /// This method does not perform any detection or filtering of semantic + /// changes between tool and runtime versions. It simply checks for a + /// version match and emits an error to stderr if a difference + /// is detected.

    + /// + ///

    + /// Note that some breaking changes between releases could result in other + /// types of runtime exceptions, such as a [LinkageError], prior to + /// calling this method. In these cases, the underlying version mismatch will + /// not be reported here. This method is primarily intended to + /// notify users of potential semantic changes between releases that do not + /// result in binary compatibility problems which would be detected by the + /// class loader. As with semantic changes, changes that break binary + /// compatibility between releases are mentioned in the release notes + /// accompanying the affected release.

    + /// + ///

    + /// Additional note for target developers: The version check + /// implemented by this class is designed to address specific compatibility + /// concerns that may arise during the execution of Java applications. Other + /// targets should consider the implementation of this method in the context + /// of that target's known execution environment, which may or may not + /// resemble the design provided for the Java target.

    + /// + /// @param generatingToolVersion The version of the tool used to generate a parser. + /// This value may be null when called from user code that was not generated + /// by, and does not reference, the ANTLR 4 Tool itself. + /// @param compileTimeVersion The version of the runtime the parser was + /// compiled against. This should always be passed using a direct reference + /// to [VERSION]. + static void checkVersion( + String? generatingToolVersion, + String compileTimeVersion, + ) { + final runtimeVersion = VERSION; + var runtimeConflictsWithGeneratingTool = false; + var runtimeConflictsWithCompileTimeTool = false; + + if (generatingToolVersion != null) { + runtimeConflictsWithGeneratingTool = + !(runtimeVersion == generatingToolVersion) && + !(getMajorMinorVersion(runtimeVersion) == + getMajorMinorVersion(generatingToolVersion)); + } + + runtimeConflictsWithCompileTimeTool = + !(runtimeVersion == compileTimeVersion) && + !(getMajorMinorVersion(runtimeVersion) == + getMajorMinorVersion(compileTimeVersion)); + + if (runtimeConflictsWithGeneratingTool) { + log('ANTLR Tool version $generatingToolVersion used for code generation does not match the current runtime version $runtimeVersion', + level: Level.SEVERE.value); + } + if (runtimeConflictsWithCompileTimeTool) { + log('ANTLR Runtime version $compileTimeVersion used for parser compilation does not match the current runtime version $runtimeVersion', + level: Level.SEVERE.value); + } + } + + /// Gets the major and minor version numbers from a version string. For + /// details about the syntax of the input [version]. + /// E.g., from x.y.z return x.y. + /// + /// @param version The complete version string. + /// @return A string of the form major.minor containing + /// only the major and minor components of the version string. + static String getMajorMinorVersion(String version) { + final firstDot = version.indexOf('.'); + final secondDot = firstDot >= 0 ? version.indexOf('.', firstDot + 1) : -1; + final firstDash = version.indexOf('-'); + var referenceLength = version.length; + if (secondDot >= 0) { + referenceLength = math.min(referenceLength, secondDot); + } + + if (firstDash >= 0) { + referenceLength = math.min(referenceLength, firstDash); + } + + return version.substring(0, referenceLength); + } +} diff --git a/runtime/Dart/lib/src/token.dart b/runtime/Dart/lib/src/token.dart new file mode 100644 index 0000000000..d60ec032d0 --- /dev/null +++ b/runtime/Dart/lib/src/token.dart @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'misc/misc.dart'; +import 'token_source.dart'; + +/// A token has properties: text, type, line, character position in the line +/// (so we can ignore tabs), token channel, index, and source from which +/// we obtained this token. +abstract class Token { + static const int INVALID_TYPE = 0; + + /// During lookahead operations, this "token" signifies we hit rule end ATN state + /// and did not follow it despite needing to. + static const int EPSILON = -2; + + static const int MIN_USER_TOKEN_TYPE = 1; + + static const int EOF = IntStream.EOF; + + /// All tokens go to the parser (unless skip() is called in that rule) + /// on a particular "channel". The parser tunes to a particular channel + /// so that whitespace etc... can go to the parser on a "hidden" channel. + static const int DEFAULT_CHANNEL = 0; + + /// Anything on different channel than DEFAULT_CHANNEL is not parsed + /// by parser. + static const int HIDDEN_CHANNEL = 1; + + /// This is the minimum constant value which can be assigned to a + /// user-defined token channel. + /// + ///

    + /// The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are + /// assigned to the predefined channels {@link #DEFAULT_CHANNEL} and + /// {@link #HIDDEN_CHANNEL}.

    + /// + /// @see Token#getChannel() + static const int MIN_USER_CHANNEL_VALUE = 2; + + /// Get the text of the token. + String? get text; + + /// Get the token type of the token */ + int get type; + + /// The line number on which the 1st character of this token was matched, + /// line=1..n + int? get line; + + /// The index of the first character of this token relative to the + /// beginning of the line at which it occurs, 0..n-1 + int get charPositionInLine; + + /// Return the channel this token. Each token can arrive at the parser + /// on a different channel, but the parser only "tunes" to a single channel. + /// The parser ignores everything not on DEFAULT_CHANNEL. + int get channel; + + /// An index from 0..n-1 of the token object in the input stream. + /// This must be valid in order to print token streams and + /// use TokenRewriteStream. + /// + /// Return -1 to indicate that this token was conjured up since + /// it doesn't have a valid index. + int get tokenIndex; + + /// The starting character index of the token + /// This method is optional; return -1 if not implemented. + int get startIndex; + + /// The last character index of the token. + /// This method is optional; return -1 if not implemented. + int get stopIndex; + + /// Gets the [TokenSource] which created this token. + TokenSource? get tokenSource; + + /// Gets the [CharStream] from which this token was derived. + CharStream? get inputStream; +} + +abstract class WritableToken extends Token { + set text(String? text); + + set type(int ttype); + + set line(int? line); + + set charPositionInLine(int pos); + + set channel(int channel); + + set tokenIndex(int index); +} + +class CommonToken extends WritableToken { + /// An empty [Pair] which is used as the default value of + /// {@link #source} for tokens that do not have a source. + static const EMPTY_SOURCE = Pair(null, null); + + @override + int type; + + @override + int? line; + + @override + int charPositionInLine = -1; // set to invalid position + + @override + int channel = Token.DEFAULT_CHANNEL; + + /// These properties share a field to reduce the memory footprint of + /// [CommonToken]. Tokens created by a [CommonTokenFactory] from + /// the same source and input stream share a reference to the same + /// [Pair] containing these values.

    + late Pair source; + + /// This is the backing field for {@link #getText} when the token text is + /// explicitly set in the constructor or via {@link #setText}. + /// + /// @see #getText() + String? _text; + + @override + int tokenIndex = -1; + + @override + int startIndex; + + @override + int stopIndex; + + /// Constructs a new [CommonToken] with the specified token type and + /// text. + /// + /// @param type The token type. + /// @param text The text of the token. + CommonToken( + this.type, { + this.source = EMPTY_SOURCE, + this.channel = Token.DEFAULT_CHANNEL, + this.startIndex = -1, + this.stopIndex = -1, + text, + }) { + _text = text; + if (source.a != null) { + line = source.a!.line; + charPositionInLine = source.a!.charPositionInLine; + } + } + + /// Constructs a new [CommonToken] as a copy of another [Token]. + /// + ///

    + /// If [oldToken] is also a [CommonToken] instance, the newly + /// constructed token will share a reference to the {@link #text} field and + /// the [Pair] stored in {@link #source}. Otherwise, {@link #text} will + /// be assigned the result of calling {@link #getText}, and {@link #source} + /// will be constructed from the result of {@link Token#getTokenSource} and + /// {@link Token#getInputStream}.

    + /// + /// @param oldToken The token to copy. + CommonToken.copy(Token oldToken) + : type = oldToken.type, + line = oldToken.line, + tokenIndex = oldToken.tokenIndex, + charPositionInLine = oldToken.charPositionInLine, + channel = oldToken.channel, + startIndex = oldToken.startIndex, + stopIndex = oldToken.stopIndex { + if (oldToken is CommonToken) { + _text = oldToken.text; + source = oldToken.source; + } else { + _text = oldToken.text; + source = Pair( + oldToken.tokenSource, + oldToken.inputStream, + ); + } + } + + @override + String? get text { + if (_text != null) { + return _text; + } + + final input = inputStream; + if (input == null) return null; + final n = input.size; + + if (startIndex < n && stopIndex < n) { + return input.getText(Interval.of(startIndex, stopIndex)); + } else { + return ''; + } + } + + /// Explicitly set the text for this token. If {code text} is not + /// null, then {@link #getText} will return this value rather than + /// extracting the text from the input. + /// + /// @param text The explicit text of the token, or null if the text + /// should be obtained from the input along with the start and stop indexes + /// of the token. + @override + set text(String? text) { + _text = text; + } + + @override + TokenSource? get tokenSource { + return source.a; + } + + @override + CharStream? get inputStream { + return source.b; + } + + @override + String toString([void _]) { + var txt = text; + if (txt != null) { + txt = txt + .replaceAll('\n', r'\n') + .replaceAll('\r', r'\r') + .replaceAll('\t', r'\t'); + } else { + txt = ''; + } + return "[@$tokenIndex,$startIndex:$stopIndex='$txt',<$type>" + + (channel > 0 ? ',channel=$channel' : '') + + ',$line:$charPositionInLine]'; + } +} + +/// A [Token] object representing an entire subtree matched by a parser +/// rule; e.g., {@code }. These tokens are created for [TagChunk] +/// chunks where the tag corresponds to a parser rule. +class RuleTagToken implements Token { + /// Gets the name of the rule associated with this rule tag. + /// + /// @return The name of the parser rule associated with this rule tag. + final String ruleName; + + /// The token type for the current token. This is the token type assigned to + /// the bypass alternative for the rule during ATN deserialization. + final int bypassTokenType; + + /// Gets the label associated with the rule tag. + /// + /// @return The name of the label associated with the rule tag, or + /// null if this is an unlabeled rule tag. + final String? label; + + /// Constructs a new instance of [RuleTagToken] with the specified rule + /// name, bypass token type, and label. + /// + /// @param ruleName The name of the parser rule this rule tag matches. + /// @param bypassTokenType The bypass token type assigned to the parser rule. + /// @param label The label associated with the rule tag, or null if + /// the rule tag is unlabeled. + /// + /// @exception ArgumentError.value(value) if [ruleName] is null + /// or empty. + RuleTagToken(this.ruleName, this.bypassTokenType, [this.label]) { + if (ruleName.isEmpty) { + throw ArgumentError.value( + ruleName, + 'ruleName', + 'cannot be empty.', + ); + } + } + + /// {@inheritDoc} + /// + ///

    Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.

    + + @override + int get channel { + return Token.DEFAULT_CHANNEL; + } + + /// {@inheritDoc} + /// + ///

    This method returns the rule tag formatted with {@code <} and {@code >} + /// delimiters.

    + + @override + String get text { + if (label != null) { + return '<' + label! + ':' + ruleName + '>'; + } + + return '<' + ruleName + '>'; + } + + /// {@inheritDoc} + /// + ///

    Rule tag tokens have types assigned according to the rule bypass + /// transitions created during ATN deserialization.

    + + @override + int get type { + return bypassTokenType; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns 0.

    + + @override + int get line { + return 0; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns -1.

    + @override + int get charPositionInLine { + return -1; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns -1.

    + @override + int get tokenIndex { + return -1; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns -1.

    + @override + int get startIndex { + return -1; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns -1.

    + + @override + int get stopIndex { + return -1; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns null.

    + + @override + TokenSource? get tokenSource { + return null; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] always returns null.

    + + @override + CharStream? get inputStream { + return null; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [RuleTagToken] returns a string of the form + /// {@code ruleName:bypassTokenType}.

    + + @override + String toString() { + return ruleName + ':$bypassTokenType'; + } +} + +/// A [Token] object representing a token of a particular type; e.g., +/// {@code }. These tokens are created for [TagChunk] chunks where the +/// tag corresponds to a lexer rule or token type. +class TokenTagToken extends CommonToken { + /// Gets the token name. + /// @return The token name. + final String tokenName; + + /// Gets the label associated with the rule tag. + /// + /// @return The name of the label associated with the rule tag, or + /// null if this is an unlabeled rule tag. + final String? label; + + /// Constructs a new instance of [TokenTagToken] with the specified + /// token name, type, and label. + /// + /// @param tokenName The token name. + /// @param type The token type. + /// @param label The label associated with the token tag, or null if + /// the token tag is unlabeled. + TokenTagToken(this.tokenName, type, [this.label]) : super(type); + + /// {@inheritDoc} + /// + ///

    The implementation for [TokenTagToken] returns the token tag + /// formatted with {@code <} and {@code >} delimiters.

    + + @override + String get text { + if (label != null) { + return '<' + label! + ':' + tokenName + '>'; + } + + return '<' + tokenName + '>'; + } + + /// {@inheritDoc} + /// + ///

    The implementation for [TokenTagToken] returns a string of the form + /// {@code tokenName:type}.

    + + @override + String toString([void _]) { + return tokenName + ':$type'; + } +} diff --git a/runtime/Dart/lib/src/token_factory.dart b/runtime/Dart/lib/src/token_factory.dart new file mode 100644 index 0000000000..1e12f11994 --- /dev/null +++ b/runtime/Dart/lib/src/token_factory.dart @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'misc/misc.dart'; +import 'token.dart'; +import 'token_source.dart'; + +/// The default mechanism for creating tokens. It's used by default in Lexer and +/// the error handling strategy (to create missing tokens). Notifying the parser +/// of a new factory means that it notifies its token source and error strategy. +abstract class TokenFactory { + /// This is the method used to create tokens in the lexer and in the + /// error handling strategy. If text!=null, than the start and stop positions + /// are wiped to -1 in the text override is set in the CommonToken. + Symbol create( + int type, + String? text, + Pair source, + int channel, + int start, + int stop, + int? line, + int charPositionInLine, + ); +} + +/// This default implementation of [TokenFactory] creates +/// [CommonToken] objects. +class CommonTokenFactory implements TokenFactory { + /// The default [CommonTokenFactory] instance. + /// + ///

    + /// This token factory does not explicitly copy token text when constructing + /// tokens.

    + static final TokenFactory DEFAULT = CommonTokenFactory(); + + /// Indicates whether {@link CommonToken#setText} should be called after + /// constructing tokens to explicitly set the text. This is useful for cases + /// where the input stream might not be able to provide arbitrary substrings + /// of text from the input after the lexer creates a token (e.g. the + /// implementation of {@link CharStream#getText} in + /// [UnbufferedCharStream] throws an + /// [UnsupportedOperationException]). Explicitly setting the token text + /// allows {@link Token#getText} to be called at any time regardless of the + /// input stream implementation. + /// + ///

    + /// The default value is [false] to avoid the performance and memory + /// overhead of copying text for every token unless explicitly requested.

    + final bool copyText; + + /// Constructs a [CommonTokenFactory] with the specified value for + /// {@link #copyText}. + /// + ///

    + /// When [copyText] is [false], the {@link #DEFAULT} instance + /// should be used instead of constructing a new instance.

    + /// + /// @param copyText The value for {@link #copyText}. + CommonTokenFactory([this.copyText = false]); + + @override + CommonToken create( + int type, + String? text, + Pair? source, + int channel, + int start, + int stop, + int? line, + int charPositionInLine, + ) { + if (source == null) { + return CommonToken(type, text: text); + } + + final t = CommonToken( + type, + source: source, + channel: channel, + startIndex: start, + stopIndex: stop, + ); + t.line = line; + t.charPositionInLine = charPositionInLine; + if (text != null) { + t.text = text; + } else if (copyText && source.b != null) { + t.text = source.b!.getText(Interval.of(start, stop)); + } + + return t; + } +} diff --git a/runtime/Dart/lib/src/token_source.dart b/runtime/Dart/lib/src/token_source.dart new file mode 100644 index 0000000000..14bdb6418d --- /dev/null +++ b/runtime/Dart/lib/src/token_source.dart @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import 'input_stream.dart'; +import 'misc/misc.dart'; +import 'token.dart'; +import 'token_factory.dart'; + +/// A source of tokens must provide a sequence of tokens via {@link #nextToken()} +/// and also must reveal it's source of characters; [CommonToken]'s text is +/// computed from a [CharStream]; it only store indices into the char +/// stream. +/// +///

    Errors from the lexer are never passed to the parser. Either you want to keep +/// going or you do not upon token recognition error. If you do not want to +/// continue lexing then you do not want to continue parsing. Just throw an +/// exception not under [RecognitionException] and Java will naturally toss +/// you all the way out of the recognizers. If you want to continue lexing then +/// you should not throw an exception to the parser--it has already requested a +/// token. Keep lexing until you get a valid one. Just report errors and keep +/// going, looking for a valid token.

    +abstract class TokenSource { + /// Return a [Token] object from your input stream (usually a + /// [CharStream]). Do not fail/return upon lexing error; keep chewing + /// on the characters until you get a good one; errors are not passed through + /// to the parser. + Token nextToken(); + + /// Get the line number for the current position in the input stream. The + /// first line in the input is line 1. + /// + /// @return The line number for the current position in the input stream, or + /// 0 if the current token source does not track line numbers. + int? get line; + + /// Get the index into the current line for the current position in the input + /// stream. The first character on a line has position 0. + /// + /// @return The line number for the current position in the input stream, or + /// -1 if the current token source does not track character positions. + int get charPositionInLine; + + /// Get the [CharStream] from which this token source is currently + /// providing tokens. + /// + /// @return The [CharStream] associated with the current position in + /// the input, or null if no input stream is available for the token + /// source. + CharStream? get inputStream; + + /// Gets the name of the underlying input source. This method returns a + /// non-null, non-empty string. If such a name is not known, this method + /// returns {@link IntStream#UNKNOWN_SOURCE_NAME}. + String get sourceName; + + /// Set the [TokenFactory] this token source should use for creating + /// [Token] objects from the input. + /// + /// @param factory The [TokenFactory] to use for creating tokens. + set tokenFactory(TokenFactory factory); + + /// Gets the [TokenFactory] this token source is currently using for + /// creating [Token] objects from the input. + /// + /// @return The [TokenFactory] currently used by this token source. + TokenFactory get tokenFactory; +} + +/// Provides an implementation of [TokenSource] as a wrapper around a list +/// of [Token] objects. +/// +///

    If the final token in the list is an {@link Token#EOF} token, it will be used +/// as the EOF token for every call to {@link #nextToken} after the end of the +/// list is reached. Otherwise, an EOF token will be created.

    +class ListTokenSource implements TokenSource { + /// The wrapped collection of [Token] objects to return. + final List tokens; + + final String? _sourceName; + + /// The index into {@link #tokens} of token to return by the next call to + /// {@link #nextToken}. The end of the input is indicated by this value + /// being greater than or equal to the number of items in {@link #tokens}. + late int i; // todo: uncertain + + /// This field caches the EOF token for the token source. + Token? eofToken; + + /// This is the backing field for {@link #getTokenFactory} and + /// [setTokenFactory]. + @override + TokenFactory tokenFactory = CommonTokenFactory.DEFAULT; + + /** + * Constructs a new [ListTokenSource] instance from the specified + * collection of [Token] objects. + * + * @param tokens The collection of [Token] objects to provide as a + * [TokenSource]. + * @exception NullPointerException if [tokens] is null + */ + + /// Constructs a new [ListTokenSource] instance from the specified + /// collection of [Token] objects and source name. + /// + /// @param tokens The collection of [Token] objects to provide as a + /// [TokenSource]. + /// @param sourceName The name of the [TokenSource]. If this value is + /// null, {@link #getSourceName} will attempt to infer the name from + /// the next [Token] (or the previous token if the end of the input has + /// been reached). + /// + /// @exception NullPointerException if [tokens] is null + ListTokenSource(this.tokens, [this._sourceName]); + + /// {@inheritDoc} + + @override + int get charPositionInLine { + if (i < tokens.length) { + return tokens[i].charPositionInLine; + } else if (eofToken != null) { + return eofToken!.charPositionInLine; + } else if (tokens.isNotEmpty) { + // have to calculate the result from the line/column of the previous + // token, along with the text of the token. + final lastToken = tokens[tokens.length - 1]; + final tokenText = lastToken.text; + if (tokenText != null) { + final lastNewLine = tokenText.lastIndexOf('\n'); + if (lastNewLine >= 0) { + return tokenText.length - lastNewLine - 1; + } + } + + return lastToken.charPositionInLine + + lastToken.stopIndex - + lastToken.startIndex + + 1; + } + + // only reach this if tokens is empty, meaning EOF occurs at the first + // position in the input + return 0; + } + + /// {@inheritDoc} + + @override + Token nextToken() { + if (i >= tokens.length) { + if (eofToken == null) { + var start = -1; + if (tokens.isNotEmpty) { + final previousStop = tokens[tokens.length - 1].stopIndex; + if (previousStop != -1) { + start = previousStop + 1; + } + } + + final stop = max(-1, start - 1); + eofToken = tokenFactory.create( + Token.EOF, + 'EOF', + Pair(this, inputStream), + Token.DEFAULT_CHANNEL, + start, + stop, + line, + charPositionInLine, + ); + } + + return eofToken!; + } + + final t = tokens[i]; + if (i == tokens.length - 1 && t.type == Token.EOF) { + eofToken = t; + } + + i++; + return t; + } + + /// {@inheritDoc} + + @override + int? get line { + if (i < tokens.length) { + return tokens[i].line; + } else if (eofToken != null) { + return eofToken!.line; + } else if (tokens.isNotEmpty) { + // have to calculate the result from the line/column of the previous + // token, along with the text of the token. + final lastToken = tokens[tokens.length - 1]; + var line = lastToken.line ?? 0; + + final tokenText = lastToken.text; + if (tokenText != null) { + for (var i = 0; i < tokenText.length; i++) { + if (tokenText[i] == '\n') { + line++; + } + } + } + + // if no text is available, assume the token did not contain any newline characters. + return line; + } + + // only reach this if tokens is empty, meaning EOF occurs at the first + // position in the input + return 1; + } + + /// {@inheritDoc} + + @override + CharStream? get inputStream { + if (i < tokens.length) { + return tokens[i].inputStream; + } else if (eofToken != null) { + return eofToken!.inputStream; + } else if (tokens.isNotEmpty) { + return tokens[tokens.length - 1].inputStream; + } + + // no input stream information is available + return null; + } + + /// The name of the input source. If this value is null, a call to + /// {@link #getSourceName} should return the source name used to create the + /// the next token in {@link #tokens} (or the previous token if the end of + /// the input has been reached). + @override + String get sourceName => _sourceName ?? inputStream?.sourceName ?? 'List'; +} diff --git a/runtime/Dart/lib/src/token_stream.dart b/runtime/Dart/lib/src/token_stream.dart new file mode 100644 index 0000000000..2f1b64088f --- /dev/null +++ b/runtime/Dart/lib/src/token_stream.dart @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'lexer.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'token_source.dart'; + +/// An [IntStream] whose symbols are [Token] instances. +abstract class TokenStream extends IntStream { + /// Get the [Token] instance associated with the value returned by + /// [LA]. This method has the same pre- and post-conditions as + /// [IntStream.LA]. In addition, when the preconditions of this method + /// are met, the return value is non-null and the value of + /// `LT(k).getType()==LA(k)`. + /// + /// TODO: in this doc it says that is non-null, but the implementation says + /// otherwise + /// + /// Se also: + /// ntStream.LA + Token? LT(int k); + + /// Gets the [Token] at the specified [index] in the stream. When + /// the preconditions of this method are met, the return value is non-null. + /// + ///

    The preconditions for this method are the same as the preconditions of + /// {@link IntStream#seek}. If the behavior of {@code seek(index)} is + /// unspecified for the current state and given [index], then the + /// behavior of this method is also unspecified.

    + /// + ///

    The symbol referred to by [index] differs from {@code seek()} only + /// in the case of filtering streams where [index] lies before the end + /// of the stream. Unlike {@code seek()}, this method does not adjust + /// [index] to point to a non-ignored symbol.

    + /// + /// @throws IllegalArgumentException if {code index} is less than 0 + /// @throws UnsupportedOperationException if the stream does not support + /// retrieving the token at the specified index + Token get(int index); + + /// Gets the underlying [TokenSource] which provides tokens for this + /// stream. + TokenSource get tokenSource; + + /// Return the text of all tokens within the specified [interval]. This + /// method behaves like the following code (including potential exceptions + /// for violating preconditions of {@link #get}, but may be optimized by the + /// specific implementation. + /// + ///
    +  /// TokenStream stream = ...;
    +  /// String text = "";
    +  /// for (int i = interval.a; i <= interval.b; i++) {
    +  ///   text += stream.get(i).getText();
    +  /// }
    +  /// 
    + /// + ///
    +  /// TokenStream stream = ...;
    +  /// String text = stream.getText(new Interval(0, stream.length));
    +  /// 
    + /// + ///
    +  /// TokenStream stream = ...;
    +  /// String text = stream.getText(ctx.getSourceInterval());
    +  /// 
    + /// + /// @param interval The interval of tokens within this stream to get text + /// for. + /// @return The text of all tokens / within the specified interval in this + /// stream. + String getText([Interval interval]); + + String get text; + + /// Return the text of all tokens in the source interval of the specified + /// context. This method behaves like the following code, including potential + /// exceptions from the call to {@link #getText(Interval)}, but may be + /// optimized by the specific implementation. + /// + ///

    If {@code ctx.getSourceInterval()} does not return a valid interval of + /// tokens provided by this stream, the behavior is unspecified.

    + /// + /// @param ctx The context providing the source interval of tokens to get + /// text for. + /// @return The text of all tokens within the source interval of [ctx]. + String getTextFromCtx(RuleContext ctx); + + /// Return the text of all tokens in this stream between [start] and + /// [stop] (inclusive). + /// + ///

    If the specified [start] or [stop] token was not provided by + /// this stream, or if the [stop] occurred before the [start] + /// token, the behavior is unspecified.

    + /// + ///

    For streams which ensure that the {@link Token#getTokenIndex} method is + /// accurate for all of its provided tokens, this method behaves like the + /// following code. Other streams may implement this method in other ways + /// provided the behavior is consistent with this at a high level.

    + /// + ///
    +  /// TokenStream stream = ...;
    +  /// String text = "";
    +  /// for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
    +  ///   text += stream.get(i).getText();
    +  /// }
    +  /// 
    + /// + /// @param start The first token in the interval to get text for. + /// @param stop The last token in the interval to get text for (inclusive). + /// @return The text of all tokens lying between the specified [start] + /// and [stop] tokens. + /// + /// @throws UnsupportedOperationException if this stream does not support + /// this method for the specified tokens + String getTextRange(Token? start, Token? stop); +} + +/// This implementation of [TokenStream] loads tokens from a +/// [TokenSource] on-demand, and places the tokens in a buffer to provide +/// access to any previous token by index. +/// +///

    +/// This token stream ignores the value of {@link Token#getChannel}. If your +/// parser requires the token stream filter tokens to only those on a particular +/// channel, such as {@link Token#DEFAULT_CHANNEL} or +/// {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a +/// [CommonTokenStream].

    +class BufferedTokenStream implements TokenStream { + /// The [TokenSource] from which tokens for this stream are fetched. + TokenSource _tokenSource; + + /// A collection of all tokens fetched from the token source. The list is + /// considered a complete view of the input once {@link #fetchedEOF} is set + /// to [true]. + List tokens = []; + + /// The index into [tokens] of the current token (next token to [consume]). + /// [tokens][p] should be [LT(1)]. + /// + ///

    This field is set to -1 when the stream is first constructed or when + /// [tokenSource] is set, indicating that the first token has + /// not yet been fetched from the token source. For additional information, + /// see the documentation of [IntStream] for a description of + /// Initializing Methods.

    + int p = -1; + + /// Indicates whether the [Token.EOF] token has been fetched from + /// [tokenSource] and added to [tokens]. This field improves + /// performance for the following cases: + /// + ///

    ." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg index a5dab18bd5..b72bc74d65 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg @@ -47,7 +47,9 @@ TokenStartColumnEquals(i) ::= "tokenStartCharPositionInLine == " ImportListener(X) ::= "" -GetExpectedTokenNames() ::= "getExpectedTokens().toString(_tokenNames)" +GetExpectedTokenNames() ::= "getExpectedTokens().toString(getVocabulary())" + +ImportRuleInvocationStack() ::= "" RuleInvocationStack() ::= "Arrays::listToString(getRuleInvocationStack(), \", \")" @@ -92,8 +94,11 @@ protected: public: virtual std::unique_ptr\ nextToken() override { if (dynamic_cast\(_interpreter) == nullptr) { + const auto &atn = _interpreter->atn; + auto &decisionToDFA = dynamic_cast\(_interpreter)->_decisionToDFA; + auto &sharedContextCache = dynamic_cast\(_interpreter)->getSharedContextCache(); delete _interpreter; - _interpreter = new PositionAdjustingLexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache); + _interpreter = new PositionAdjustingLexerATNSimulator(this, atn, decisionToDFA, sharedContextCache); } return antlr4::Lexer::nextToken(); @@ -255,7 +260,7 @@ void foo() { >> Declare_foo() ::= <> @@ -271,5 +276,8 @@ bool pred(bool v) { Invoke_pred(v) ::= <)>> ContextRuleFunction(ctx, rule) ::= "->" +ContextListFunction(ctx, rule) ::= "->()" StringType() ::= "std::string" -ContextMember(ctx, subctx, member) ::= "->->" +ContextMember(ctx, member) ::= "->" +SubContextLocal(ctx, subctx, local) ::= "->->" +SubContextMember(ctx, subctx, member) ::= "->->" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg new file mode 100644 index 0000000000..b9a08af29d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg @@ -0,0 +1,323 @@ +writeln(s) ::= <);>> +write(s) ::= <);>> +writeList(s) ::= <);>> + +False() ::= "false" + +True() ::= "true" + +Not(v) ::= "!" + +Assert(s) ::= <);>> + +Cast(t,v) ::= "( as )" + +Append(a,b) ::= ".toString() + .toString()" + +AppendStr(a,b) ::= <%%> + +Concat(a,b) ::= "" + +AssertIsList(v) ::= "assert ( is List);" // just use static type system + +AssignLocal(s,v) ::= " = ;" + +InitIntMember(n,v) ::= <%int = ;%> + +InitBooleanMember(n,v) ::= <%bool = ;%> + +InitIntVar(n,v) ::= <%%> + +IntArg(n) ::= "int " + +VarRef(n) ::= "" + +GetMember(n) ::= <%this.%> + +SetMember(n,v) ::= <%this. = ;%> + +AddMember(n,v) ::= <%this. += ;%> + +MemberEquals(n,v) ::= <%this. == %> + +ModMemberEquals(n,m,v) ::= <%this. % == %> + +ModMemberNotEquals(n,m,v) ::= <%this. % != %> + +DumpDFA() ::= "this.dumpDFA();" + +Pass() ::= "" + +StringList() ::= "List\" + +BuildParseTrees() ::= "buildParseTree = true;" + +BailErrorStrategy() ::= <%errorHandler = new BailErrorStrategy();%> + +ToStringTree(s) ::= <%.toStringTree(parser: this)%> + +Column() ::= "this.charPositionInLine" + +Text() ::= "this.text" + +ValEquals(a,b) ::= <%==%> + +TextEquals(a) ::= <%this.text == ""%> + +PlusText(a) ::= <%"" + this.text%> + +InputText() ::= "tokenStream.text" + +LTEquals(i, v) ::= <%tokenStream.LT()!.text == %> + +LANotEquals(i, v) ::= <%tokenStream.LA()!=%> + +TokenStartColumnEquals(i) ::= <%this.tokenStartCharPositionInLine==%> + +ImportListener(X) ::= "" + +GetExpectedTokenNames() ::= "this.expectedTokens.toString(vocabulary: this.vocabulary)" + +ImportRuleInvocationStack() ::= "" + +RuleInvocationStack() ::= "ruleInvocationStack" + +LL_EXACT_AMBIG_DETECTION() ::= <> + +ParserToken(parser, token) ::= <%.TOKEN_%> + +Production(p) ::= <%