diff --git a/.github/workflows/sql-cli-release-workflow.yml b/.github/workflows/sql-cli-release-workflow.yml new file mode 100644 index 0000000000..78b843e1a7 --- /dev/null +++ b/.github/workflows/sql-cli-release-workflow.yml @@ -0,0 +1,55 @@ +name: Release SQL CLI Artifacts +# This workflows is triggered on creating tags to master +on: + push: + tags: + - 'v*' + +jobs: + build: + + runs-on: [ubuntu-16.04] + defaults: + run: + working-directory: sql-cli + strategy: + matrix: + python-version: [3.8] + + steps: + - name: Checkout SQL CLI + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Install Dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + # publish to S3 and PyPI + - name: Build and Publish + run: | + python setup.py sdist bdist_wheel + artifact=`ls ./dist/*.tar.gz` + wheel_artifact=`ls ./dist/*.whl` + + aws s3 cp $artifact s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-cli/ + aws s3 cp $wheel_artifact s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-cli/ + + # aws cloudfront create-invalidation --distribution-id ${{ secrets.DISTRIBUTION_ID }} --paths "/downloads/*" + + # TODO: Publish to PyPI + # env: + # TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + # TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + # run: twine upload dist/* diff --git a/.github/workflows/sql-cli-test-and-build-workflow.yml b/.github/workflows/sql-cli-test-and-build-workflow.yml new file mode 100644 index 0000000000..b74dceb02d --- /dev/null +++ b/.github/workflows/sql-cli-test-and-build-workflow.yml @@ -0,0 +1,60 @@ +name: SQL CLI Test and Build + +on: [pull_request, push] + +jobs: + build: + + runs-on: [ubuntu-16.04] + defaults: + run: + working-directory: sql-cli + strategy: + matrix: + python-version: [3.8] + + steps: + - name: Checkout SQL CLI + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python Dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements-dev.txt + pip install setuptools wheel + + - name: Set up ES and install SQL plugin + run: | + sudo add-apt-repository ppa:openjdk-r/ppa + sudo apt update + sudo apt install openjdk-11-jdk + sudo apt install unzip + wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.6.1-amd64.deb + sudo dpkg -i elasticsearch-oss-7.6.1-amd64.deb + sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install https://d3g5vo6xdbdb9a.cloudfront.net/downloads/elasticsearch-plugins/opendistro-sql/opendistro_sql-1.6.0.0.zip + sudo systemctl start elasticsearch.service + + - name: Run Tox Testing + run: tox + + - name: Stop ES + run: sudo systemctl stop elasticsearch.service + + - name: Build Artifact + run: python setup.py sdist bdist_wheel + + - name: Create Artifact Path + run: | + mkdir -p opendistro-sql-cli-builds + cp -r ./dist/*.tar.gz ./dist/*.whl opendistro-sql-cli-builds/ + + - name: Upload Artifact + uses: actions/upload-artifact@v2 + with: + name: opendistro-sql-cli + path: sql-cli/opendistro-sql-cli-builds diff --git a/.github/workflows/sql-jdbc-push-jdbc-maven.yml b/.github/workflows/sql-jdbc-push-jdbc-maven.yml new file mode 100644 index 0000000000..53f2d5d391 --- /dev/null +++ b/.github/workflows/sql-jdbc-push-jdbc-maven.yml @@ -0,0 +1,49 @@ +name: Upload sql-jdbc Jar to Maven + +# This workflow will upload the sql-jdbc jar to maven when a new tag is cut +on: + push: + tags: + - v* + +jobs: + upload-jdbc-jar: + runs-on: [ubuntu-16.04] + defaults: + run: + working-directory: sql-jdbc + name: Upload Jar to Maven + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + + - name: Configure AWS CLI + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + # Since release workflow uses java 10 + - name: Setup Java + uses: actions/setup-java@v1 + with: + java-version: '10' + + - name: Upload jdbc Jar to Maven + env: + passphrase: ${{ secrets.PASSPHRASE }} + run: | + cd .. + export JAVA10_HOME=$JAVA_HOME + aws s3 cp s3://opendistro-docs/github-actions/pgp-public-key . + aws s3 cp s3://opendistro-docs/github-actions/pgp-private-key . + + gpg --import pgp-public-key + gpg --allow-secret-key-import --import pgp-private-key + + + mkdir /home/runner/.gradle + aws s3 cp s3://opendistro-docs/github-actions/gradle.properties /home/runner/.gradle/ + cd sql-jdbc + ./gradlew publishShadowPublicationToSonatype-stagingRepository -Dcompiler.java=10 -Dbuild.snapshot=false -Djavax.net.ssl.trustStore=$JAVA_HOME/lib/security/cacerts diff --git a/.github/workflows/sql-jdbc-release-workflow.yml b/.github/workflows/sql-jdbc-release-workflow.yml new file mode 100644 index 0000000000..7c6385ba85 --- /dev/null +++ b/.github/workflows/sql-jdbc-release-workflow.yml @@ -0,0 +1,41 @@ +name: Build and Release SQL-JDBC +# This workflow is triggered on creating tags to master or an opendistro release branch +on: + push: + tags: + - 'v*' + +jobs: + Release-SQL-JDBC: + strategy: + matrix: + java: [10] + + name: Build and Release SQL Plugin + runs-on: ubuntu-latest + defaults: + run: + working-directory: sql-jdbc + + steps: + - name: Checkout SQL-JDBC + uses: actions/checkout@v1 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Setup Java ${{ matrix.java }} + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + + - name: Run build + run: | + ./gradlew publishShadowPublicationToInternal-releasesRepository '-Dorg.gradle.jvmargs=--add-modules java.xml.bind' -Dbuild.snapshot=false + artifact=`ls -1t build/libs/*.jar | grep -v "SNAPSHOT.jar" | grep -v "sources.jar" | head -1` + aws s3 cp $artifact s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-jdbc/ + aws cloudfront create-invalidation --distribution-id E2S86LO5GZAJQM --paths "/downloads/*" diff --git a/.github/workflows/sql-jdbc-test-and-build-workflow.yml b/.github/workflows/sql-jdbc-test-and-build-workflow.yml new file mode 100644 index 0000000000..819c693a06 --- /dev/null +++ b/.github/workflows/sql-jdbc-test-and-build-workflow.yml @@ -0,0 +1,33 @@ +name: SQL JDBC Java CI + +on: [push, pull_request] + +jobs: + build: + + runs-on: ubuntu-latest + defaults: + run: + working-directory: sql-jdbc + + steps: + - uses: actions/checkout@v1 + + - name: Set up JDK 1.13 + uses: actions/setup-java@v1 + with: + java-version: 1.13 + + - name: Build with Gradle + run: ./gradlew build + + - name: Create Artifact Path + run: | + mkdir -p sql-jdbc-builds + cp ./build/libs/*.jar sql-jdbc-builds + + - name: Upload Artifacts + uses: actions/upload-artifact@v1 + with: + name: sql-jdbc + path: sql-jdbc/sql-jdbc-builds diff --git a/.github/workflows/sql-odbc-main.yml b/.github/workflows/sql-odbc-main.yml new file mode 100644 index 0000000000..e2e832e0fb --- /dev/null +++ b/.github/workflows/sql-odbc-main.yml @@ -0,0 +1,192 @@ +name: Open Distro for Elasticsearch ODBC Driver + +on: [push, pull_request] + +jobs: + build-mac: + runs-on: macos-latest + defaults: + run: + working-directory: sql-odbc + steps: + - uses: actions/checkout@v1 + - name: run-cppcheck + run: | + brew install cppcheck + sh run_cppcheck.sh + - name: upload-cppcheck-results + if: failure() + uses: actions/upload-artifact@v1 + with: + name: cppcheck-results + path: sql-odbc/cppcheck-results.log + - name: get-dependencies + run: | + brew unlink unixodbc + brew install curl + brew install cmake + brew install libiodbc + - name: aws-sdk-cpp-setup + run: | + sh aws_sdk_cpp_setup.sh + - name: configure + run: | + prefix_path=$(pwd) + mkdir cmake-build + cd cmake-build + cmake ../src -DCMAKE_INSTALL_PREFIX=$prefix_path/AWSSDK/ -DCMAKE_BUILD_TYPE=Release -DBUILD_ONLY="core" -DCUSTOM_MEMORY_MANAGEMENT="OFF" -DENABLE_RTTI="OFF" -DENABLE_TESTING="OFF" + cd .. + - name: build-driver + run: | + cmake --build cmake-build + #- name: test + # run: | + # bash ./run_test_runner.sh + - name: build-installer + if: success() + run: | + cd cmake-build + cmake ../src + make + cpack . + cd .. + - name: create-output + if: success() + run: | + mkdir build + mkdir test-output + mkdir installer + cp ./lib64/*.dylib build + cp ./lib64/*.a build + cp $(ls -d bin64/* | grep -v "\.") build + cp ./cmake-build/*.pkg installer + # cp ./bin64/*.html test-output + # cp ./bin64/*.log test-output + - name: upload-build + if: success() + uses: actions/upload-artifact@v1 + with: + name: mac64-build + path: sql-odbc/build + - name: upload-installer + if: success() + uses: actions/upload-artifact@v1 + with: + name: mac64-installer + path: sql-odbc/installer + #- name: upload-test-results + # if: success() + # uses: actions/upload-artifact@v1 + # with: + # name: mac-test-results + # path: test-output + build-windows32: + runs-on: windows-latest + defaults: + run: + working-directory: sql-odbc + steps: + - uses: actions/checkout@v1 + - name: add-msbuild-to-path + uses: microsoft/setup-msbuild@v1.0.0 + - name: configure-and-build-driver + run: | + .\build_win_release32.ps1 + - name: build-installer + if: success() + run: | + $prefix_path = (pwd).path + cd cmake-build32 + cmake ..\\src -D CMAKE_INSTALL_PREFIX=$prefix_path\AWSSDK\bin -D BUILD_WITH_TESTS=OFF + msbuild .\PACKAGE.vcxproj -p:Configuration=Release + cd .. + #- name: test + # run: | + # cp .\\libraries\\VisualLeakDetector\\bin32\\*.* .\\bin32\\Release + # cp .\\libraries\\VisualLeakDetector\\lib32\\*.lib .\\lib32\\Release + # .\run_test_runner.bat + - name: create-output + if: always() + run: | + mkdir build + mkdir test-output + mkdir installer + cp .\\bin32\\Release\\*.dll build + cp .\\bin32\\Release\\*.exe build + cp .\\lib32\\Release\\*.lib build + cp .\\cmake-build32\\*.msi installer + # cp .\\bin32\\Release\\*.log test-output + # cp .\\bin32\\Release\\*.html test-output + - name: upload-build + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows32-build + path: sql-odbc/build + - name: upload-installer + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows32-installer + path: sql-odbc/installer + #- name: upload-test-results + # if: always() + # uses: actions/upload-artifact@v1 + # with: + # name: windows-test-results + # path: test-output + build-windows64: + runs-on: windows-latest + defaults: + run: + working-directory: sql-odbc + steps: + - uses: actions/checkout@v1 + - name: add-msbuild-to-path + uses: microsoft/setup-msbuild@v1.0.0 + - name: configure-and-build-driver + run: | + .\build_win_release64.ps1 + - name: build-installer + if: success() + run: | + $prefix_path = (pwd).path + cd cmake-build64 + cmake ..\\src -D CMAKE_INSTALL_PREFIX=$prefix_path\AWSSDK\bin -D BUILD_WITH_TESTS=OFF + msbuild .\PACKAGE.vcxproj -p:Configuration=Release + cd .. + #- name: test + # run: | + # cp .\\libraries\\VisualLeakDetector\\bin64\\*.* .\\bin64\\Release + # cp .\\libraries\\VisualLeakDetector\\lib64\\*.lib .\\lib64\\Release + # .\run_test_runner.bat + - name: create-output + if: always() + run: | + mkdir build + mkdir test-output + mkdir installer + cp .\\bin64\\Release\\*.dll build + cp .\\bin64\\Release\\*.exe build + cp .\\lib64\\Release\\*.lib build + cp .\\cmake-build64\\*.msi installer + # cp .\\bin64\\Release\\*.log test-output + # cp .\\bin64\\Release\\*.html test-output + - name: upload-build + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows64-build + path: sql-odbc/build + - name: upload-installer + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows64-installer + path: sql-odbc/installer + #- name: upload-test-results + # if: always() + # uses: actions/upload-artifact@v1 + # with: + # name: windows-test-results + # path: test-output diff --git a/.github/workflows/sql-odbc-release-workflow.yml b/.github/workflows/sql-odbc-release-workflow.yml new file mode 100644 index 0000000000..d736c4b790 --- /dev/null +++ b/.github/workflows/sql-odbc-release-workflow.yml @@ -0,0 +1,197 @@ +name: Build and Release SQL-ODBC +# This workflow is triggered on creating tags to master or an opendistro release branch +on: + push: + tags: + - 'v*' + +jobs: + build-mac: + runs-on: macos-latest + defaults: + run: + working-directory: sql-odbc + steps: + - uses: actions/checkout@v1 + - name: run-cppcheck + run: | + brew install cppcheck + sh run_cppcheck.sh + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + - name: upload-cppcheck-results + if: failure() + uses: actions/upload-artifact@v1 + with: + name: cppcheck-results + path: cppcheck-results.log + - name: get-dependencies + run: | + brew unlink unixodbc + brew install curl + brew install cmake + brew install libiodbc + - name: aws-sdk-cpp-setup + run: | + sh aws_sdk_cpp_setup.sh + - name: configure + run: | + prefix_path=$(pwd) + mkdir cmake-build + cd cmake-build + cmake ../src -DCMAKE_INSTALL_PREFIX=$prefix_path/AWSSDK/ -DCMAKE_BUILD_TYPE=Release -DBUILD_ONLY="core" -DCUSTOM_MEMORY_MANAGEMENT="OFF" -DENABLE_RTTI="OFF" -DENABLE_TESTING="OFF" + cd .. + - name: build-driver + run: | + cmake --build cmake-build + - name: build-installer + if: success() + run: | + cd cmake-build + cmake ../src + make + cpack . + cd .. + - name: create-output + if: success() + run: | + mkdir build + mkdir test-output + mkdir installer + cp ./lib64/*.dylib build + cp ./lib64/*.a build + cp $(ls -d bin64/* | grep -v "\.") build + cp ./cmake-build/*.pkg installer + - name: upload-build + if: success() + uses: actions/upload-artifact@v1 + with: + name: mac-build + path: build + - name: upload-installer + if: success() + uses: actions/upload-artifact@v1 + with: + name: mac-installer + path: installer + - name: upload-artifacts-s3 + if: success() + run: | + cd installer + mac_installer=`ls -1t *.pkg | grep "Open Distro for Elasticsearch SQL ODBC Driver" | head -1` + echo $mac_installer + aws s3 cp "$mac_installer" s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/mac/ + build-windows32: + runs-on: windows-latest + defaults: + run: + working-directory: sql-odbc + steps: + - uses: actions/checkout@v1 + - name: add-msbuild-to-path + uses: microsoft/setup-msbuild@v1.0.0 + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + - name: configure-and-build-driver + run: | + .\build_win_release32.ps1 + - name: build-installer + if: success() + run: | + cd cmake-build32 + cmake ..\\src -D CMAKE_INSTALL_PREFIX=$prefix_path\AWSSDK\ -D BUILD_WITH_TESTS=OFF + msbuild .\PACKAGE.vcxproj -p:Configuration=Release + cd .. + - name: create-output + if: always() + run: | + mkdir build + mkdir test-output + mkdir installer + cp .\\bin32\\Release\\*.dll build + cp .\\bin32\\Release\\*.exe build + cp .\\lib32\\Release\\*.lib build + cp .\\cmake-build32\\*.msi installer + - name: upload-build + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows32-build + path: build + - name: upload-installer + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows32-installer + path: installer + - name: upload-artifacts-s3 + if: success() + shell: bash + run: | + cd installer + windows_installer=`ls -1t *.msi | grep "ODFE SQL ODBC Driver" | head -1` + echo $windows_installer + aws s3 cp "$windows_installer" s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/windows/ + build-windows64: + runs-on: windows-latest + defaults: + run: + working-directory: sql-odbc + steps: + - uses: actions/checkout@v1 + - name: add-msbuild-to-path + uses: microsoft/setup-msbuild@v1.0.0 + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + - name: configure-and-build-driver + run: | + .\build_win_release64.ps1 + - name: build-installer + if: success() + run: | + cd cmake-build64 + cmake ..\\src -D CMAKE_INSTALL_PREFIX=$prefix_path\AWSSDK\ -D BUILD_WITH_TESTS=OFF + msbuild .\PACKAGE.vcxproj -p:Configuration=Release + cd .. + - name: create-output + if: always() + run: | + mkdir build + mkdir test-output + mkdir installer + cp .\\bin64\\Release\\*.dll build + cp .\\bin64\\Release\\*.exe build + cp .\\lib64\\Release\\*.lib build + cp .\\cmake-build64\\*.msi installer + - name: upload-build + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows64-build + path: build + - name: upload-installer + if: always() + uses: actions/upload-artifact@v1 + with: + name: windows64-installer + path: installer + - name: upload-artifacts-s3 + if: success() + shell: bash + run: | + cd installer + windows_installer=`ls -1t *.msi | grep "Open Distro for Elasticsearch SQL ODBC Driver" | head -1` + echo $windows_installer + aws s3 cp "$windows_installer" s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/windows/ diff --git a/.github/workflows/release-workflow.yml b/.github/workflows/sql-release-workflow.yml similarity index 100% rename from .github/workflows/release-workflow.yml rename to .github/workflows/sql-release-workflow.yml diff --git a/.github/workflows/test-and-build-workflow.yml b/.github/workflows/sql-test-and-build-workflow.yml similarity index 91% rename from .github/workflows/test-and-build-workflow.yml rename to .github/workflows/sql-test-and-build-workflow.yml index cddcc9a650..ab011418ce 100644 --- a/.github/workflows/test-and-build-workflow.yml +++ b/.github/workflows/sql-test-and-build-workflow.yml @@ -1,4 +1,4 @@ -name: Java CI +name: SQL Java CI on: [push, pull_request] @@ -27,4 +27,4 @@ jobs: uses: actions/upload-artifact@v1 with: name: opendistro-sql - path: opendistro-sql-builds \ No newline at end of file + path: opendistro-sql-builds diff --git a/.github/workflows/sql-workbench-release-workflow.yml b/.github/workflows/sql-workbench-release-workflow.yml new file mode 100644 index 0000000000..d31ad6e706 --- /dev/null +++ b/.github/workflows/sql-workbench-release-workflow.yml @@ -0,0 +1,58 @@ +name: Release SQL-Workbench Artifacts + +on: + push: + tags: + - 'v*' + +jobs: + + build: + + runs-on: ubuntu-latest + + steps: + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Checkout Plugin + uses: actions/checkout@v1 + + - name: Checkout Kibana + uses: actions/checkout@v1 + with: + repository: opendistro-for-elasticsearch/kibana-oss + ref: 7.8.0 + token: ${{secrets.OD_ACCESS}} + path: kibana + + - name: Setup Node + uses: actions/setup-node@v1 + with: + node-version: '10.21.0' + + - name: Move Workbench to Plugins Dir + run: | + mkdir kibana/plugins + mv sql-workbench kibana/plugins + + - name: Kibana Plugin Bootstrap + uses: nick-invision/retry@v1 + with: + timeout_minutes: 60 + max_attempts: 3 + command: cd kibana/plugins/sql-workbench; yarn kbn bootstrap + + - name: Build Artifact + run: | + cd kibana/plugins/sql-workbench + yarn build + artifact=`ls ./build/*.zip` + + aws s3 cp $artifact s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/kibana-plugins/opendistro-sql-workbench/ + aws cloudfront create-invalidation --distribution-id ${{ secrets.DISTRIBUTION_ID }} --paths "/downloads/*" diff --git a/.github/workflows/sql-workbench-test-and-build-workflow.yml b/.github/workflows/sql-workbench-test-and-build-workflow.yml new file mode 100644 index 0000000000..487dfc9e8f --- /dev/null +++ b/.github/workflows/sql-workbench-test-and-build-workflow.yml @@ -0,0 +1,46 @@ +name: SQL Workbench Test and Build + +on: [pull_request, push] + +jobs: + + build: + + runs-on: ubuntu-latest + + steps: + - name: Checkout Plugin + uses: actions/checkout@v1 + - name: Checkout Kibana + uses: actions/checkout@v1 + with: + repository: elastic/kibana + ref: v7.8.0 + path: sql/kibana + - name: Setup Node + uses: actions/setup-node@v1 + with: + node-version: '10.21.0' + - name: Move Workbench to Plugins Dir + run: | + mkdir kibana/plugins + mv sql-workbench kibana/plugins + - name: Kibana Plugin Bootstrap + uses: nick-invision/retry@v1 + with: + timeout_minutes: 60 + max_attempts: 3 + command: cd kibana/plugins/sql-workbench; yarn kbn bootstrap + - name: Test + run: | + cd kibana/plugins/sql-workbench + yarn test:jest + - name: Build Artifact + run: | + cd kibana/plugins/sql-workbench + yarn build + - name: Upload Artifact + uses: actions/upload-artifact@v1 + with: + name: sql-workbench + path: kibana/plugins/sql-workbench/build diff --git a/sql-cli/.gitignore b/sql-cli/.gitignore new file mode 100644 index 0000000000..34fb496618 --- /dev/null +++ b/sql-cli/.gitignore @@ -0,0 +1,73 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +pyvenv/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +.pytest_cache + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# PyCharm +.idea/ +*.iml + +# Vagrant +.vagrant/ + +# Generated Packages +*.deb +*.rpm + +.vscode/ +venv/ + +.DS_Store diff --git a/sql-cli/CODE_OF_CONDUCT.md b/sql-cli/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8543edd1cc --- /dev/null +++ b/sql-cli/CODE_OF_CONDUCT.md @@ -0,0 +1,2 @@ +## Code of Conduct +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). diff --git a/sql-cli/CONTRIBUTING.md b/sql-cli/CONTRIBUTING.md new file mode 100644 index 0000000000..5cdffe3fd4 --- /dev/null +++ b/sql-cli/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/opendistro-for-elasticsearch/sql-cli/issues), or [recently closed](https://github.com/opendistro-for-elasticsearch/sql-cli/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/opendistro-for-elasticsearch/sql-cli/labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](https://github.com/opendistro-for-elasticsearch/sql-cli/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. diff --git a/sql-cli/CONTRIBUTORS.md b/sql-cli/CONTRIBUTORS.md new file mode 100644 index 0000000000..d19fcc1234 --- /dev/null +++ b/sql-cli/CONTRIBUTORS.md @@ -0,0 +1,11 @@ +Contributors in order of last name: + +Abbas Hussain + +Zhongnan Su + +Chloe Zhang + +Anirudh Jadhav + +Alolita Sharma diff --git a/sql-cli/LICENSE.TXT b/sql-cli/LICENSE.TXT new file mode 100644 index 0000000000..7a4a3ea242 --- /dev/null +++ b/sql-cli/LICENSE.TXT @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/sql-cli/NOTICE b/sql-cli/NOTICE new file mode 100644 index 0000000000..713f227952 --- /dev/null +++ b/sql-cli/NOTICE @@ -0,0 +1,2 @@ +Open Distro for Elasticsearch SQL CLI +Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. \ No newline at end of file diff --git a/sql-cli/README.md b/sql-cli/README.md new file mode 100644 index 0000000000..6425739606 --- /dev/null +++ b/sql-cli/README.md @@ -0,0 +1,143 @@ +[![Test and Build Workflow](https://github.com/opendistro-for-elasticsearch/sql-cli/workflows/Test%20and%20Build/badge.svg)](https://github.com/opendistro-for-elasticsearch/sql-cli/actions) +[![Latest Version](https://img.shields.io/pypi/v/odfe-sql-cli.svg)](https://pypi.python.org/pypi/odfe-sql-cli/) +[![Documentation](https://img.shields.io/badge/documentation-blue.svg)](https://opendistro.github.io/for-elasticsearch-docs/docs/sql/cli/) +[![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://discuss.opendistrocommunity.dev/c/sql/) +![PyPi Downloads](https://img.shields.io/pypi/dm/odfe-sql-cli.svg) +![PRs welcome!](https://img.shields.io/badge/PRs-welcome!-success) + +# Open Distro for Elasticsearch SQL CLI + +The SQL CLI component in Open Distro for Elasticsearch (ODFE) is a stand-alone Python application and can be launched by a 'wake' word `odfesql`. + +It only supports [Open Distro for Elasticsearch (ODFE) SQL Plugin](https://opendistro.github.io/for-elasticsearch-docs/docs/sql/) +You must have the ODFE SQL plugin installed to your Elasticsearch instance to connect. +Users can run this CLI from MacOS and Linux, and connect to any valid Elasticsearch end-point such as Amazon Elasticsearch Service (AES). + +![](./screenshots/usage.gif) + + + +## Features + +* Multi-line input +* Autocomplete for SQL syntax and index names +* Syntax highlighting +* Formatted output: +* Tabular format +* Field names with color +* Enabled horizontal display (by default) and vertical display when output is too wide for your terminal, for better visualization +* Pagination for large output +* Connect to Elasticsearch with/without security enabled on either **Elasticsearch OSS or Amazon Elasticsearch Service domains**. +* Supports loading configuration files +* Supports all SQL plugin queries + +## Install + +Launch your local Elasticsearch instance and make sure you have the Open Distro for Elasticsearch SQL plugin installed. + +To install the SQL CLI: + + +1. We suggest you install and activate a python3 virtual environment to avoid changing your local environment: + + ``` + pip install virtualenv + virtualenv venv + cd venv + source ./bin/activate + ``` + + +1. Install the CLI: + + ``` + pip3 install odfe-sql-cli + ``` + + The SQL CLI only works with Python 3, since Python 2 is no longer maintained since 01/01/2020. See https://pythonclock.org/ + + +1. To launch the CLI, run: + + ``` + odfesql https://localhost:9200 --username admin —password admin + ``` + By default, the `odfesql` command connects to [http://localhost:9200](http://localhost:9200/). + + + +## Configure + +When you first launch the SQL CLI, a configuration file is automatically created at `~/.config/odfesql-cli/config` (for MacOS and Linux), the configuration is auto-loaded thereafter. + +You can also configure the following connection properties: + + +* `endpoint`: You do not need to specify an option, anything that follows the launch command `odfesql` is considered as the endpoint. If you do not provide an endpoint, by default, the SQL CLI connects to [http://localhost:9200](http://localhost:9200/). +* `-u/-w`: Supports username and password for HTTP basic authentication, such as: + * Elasticsearch OSS with [Open Distro for Elasticsearch Security Plugin](https://opendistro.github.io/for-elasticsearch-docs/docs/install/plugins/) installed + * Amazon Elasticsearch Service domain with [Fine Grained Access Control](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/fgac.html) enabled + * Elasticsearch with X-pack security enabled +* `--aws-auth`: Turns on AWS sigV4 authentication to connect to an Amazon Elasticsearch Service endpoint. Use with the AWS CLI (`aws configure`) to retrieve the local AWS configuration to authenticate and connect. + +For a list of all available configurations, see [clirc](https://github.com/opendistro-for-elasticsearch/sql-cli/blob/master/src/conf/clirc). + + + +## Using the CLI + +1. Save the sample [accounts test data](https://github.com/opendistro-for-elasticsearch/sql/blob/master/src/test/resources/doctest/testdata/accounts.json) file. +2. Index the sample data. + + ``` + curl -H "Content-Type: application/x-ndjson" -POST https://localhost:9200/data/_bulk -u admin:admin --insecure —data-binary "@accounts.json" + ``` + + +1. Run a simple SQL command in ODFE SQL CLI: + + ``` + SELECT * FROM accounts; + ``` + + By default, you see a maximum output of 200 rows. To show more results, add a `LIMIT` clause with the desired value. + +The CLI supports all types of query that ODFE SQL supports. Refer to [ODFE SQL basic usage documentation.](https://github.com/opendistro-for-elasticsearch/sql#basic-usage) + + +## Query options + +Run single query from command line with options + + +* `--help`: help page for options +* `-q`: follow by a single query +* `-f`: support *jdbc/raw* format output +* `-v`: display data vertically +* `-e`: translate sql to DSL + +## CLI Options + +* `-p`: always use pager to display output +* `--clirc`: provide path of config file to load + +## Code of Conduct + +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). + + + +## Security issue notifications + +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue for security bugs you report. + +## Licensing + +See the [LICENSE](https://github.com/opendistro-for-elasticsearch/sql-cli/blob/master/LICENSE.TXT) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + + + +## Copyright + +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + diff --git a/sql-cli/THIRD-PARTY b/sql-cli/THIRD-PARTY new file mode 100644 index 0000000000..689a668190 --- /dev/null +++ b/sql-cli/THIRD-PARTY @@ -0,0 +1,590 @@ +** Boto3; version 1.9.187 -- https://github.com/boto/boto3/ +Copyright 2013-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +** coverage; version 4.5.3 -- https://github.com/nedbat/coveragepy +Copyright 2001 Gareth Rees. All rights reserved. +Copyright 2004-2019 Ned Batchelder. All rights reserved. + +Except where noted otherwise, this software is licensed under the Apache +License, Version 2.0 (the "License"); you may not use this work except in +compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +** elasticsearch 6.5.4; version 6.5.4 -- +https://github.com/elastic/elasticsearch/tree/v6.5.4 +Elasticsearch +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). +** twine; version 1.13.0 -- https://github.com/pypa/twine +none + +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND +DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, and + distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by the + copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all other + entities that control, are controlled by, or are under common control + with that entity. For the purposes of this definition, "control" means + (i) the power, direct or indirect, to cause the direction or management + of such entity, whether by contract or otherwise, or (ii) ownership of + fifty percent (50%) or more of the outstanding shares, or (iii) + beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity exercising + permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation source, + and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but not limited + to compiled object code, generated documentation, and conversions to + other media types. + + "Work" shall mean the work of authorship, whether in Source or Object + form, made available under the License, as indicated by a copyright + notice that is included in or attached to the work (an example is + provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object form, + that is based on (or derived from) the Work and for which the editorial + revisions, annotations, elaborations, or other modifications represent, + as a whole, an original work of authorship. For the purposes of this + License, Derivative Works shall not include works that remain separable + from, or merely link (or bind by name) to the interfaces of, the Work and + Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including the original + version of the Work and any modifications or additions to that Work or + Derivative Works thereof, that is intentionally submitted to Licensor for + inclusion in the Work by the copyright owner or by an individual or Legal + Entity authorized to submit on behalf of the copyright owner. For the + purposes of this definition, "submitted" means any form of electronic, + verbal, or written communication sent to the Licensor or its + representatives, including but not limited to communication on electronic + mailing lists, source code control systems, and issue tracking systems + that are managed by, or on behalf of, the Licensor for the purpose of + discussing and improving the Work, but excluding communication that is + conspicuously marked or otherwise designated in writing by the copyright + owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity on + behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of this + License, each Contributor hereby grants to You a perpetual, worldwide, + non-exclusive, no-charge, royalty-free, irrevocable copyright license to + reproduce, prepare Derivative Works of, publicly display, publicly perform, + sublicense, and distribute the Work and such Derivative Works in Source or + Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of this + License, each Contributor hereby grants to You a perpetual, worldwide, + non-exclusive, no-charge, royalty-free, irrevocable (except as stated in + this section) patent license to make, have made, use, offer to sell, sell, + import, and otherwise transfer the Work, where such license applies only to + those patent claims licensable by such Contributor that are necessarily + infringed by their Contribution(s) alone or by combination of their + Contribution(s) with the Work to which such Contribution(s) was submitted. + If You institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work or a + Contribution incorporated within the Work constitutes direct or contributory + patent infringement, then any patent licenses granted to You under this + License for that Work shall terminate as of the date such litigation is + filed. + + 4. Redistribution. You may reproduce and distribute copies of the Work or + Derivative Works thereof in any medium, with or without modifications, and + in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a + copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating + that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You + distribute, all copyright, patent, trademark, and attribution notices + from the Source form of the Work, excluding those notices that do not + pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must include + a readable copy of the attribution notices contained within such NOTICE + file, excluding those notices that do not pertain to any part of the + Derivative Works, in at least one of the following places: within a + NOTICE text file distributed as part of the Derivative Works; within the + Source form or documentation, if provided along with the Derivative + Works; or, within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents of the + NOTICE file are for informational purposes only and do not modify the + License. You may add Your own attribution notices within Derivative Works + that You distribute, alongside or as an addendum to the NOTICE text from + the Work, provided that such additional attribution notices cannot be + construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may + provide additional or different license terms and conditions for use, + reproduction, or distribution of Your modifications, or for any such + Derivative Works as a whole, provided Your use, reproduction, and + distribution of the Work otherwise complies with the conditions stated in + this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, any + Contribution intentionally submitted for inclusion in the Work by You to the + Licensor shall be under the terms and conditions of this License, without + any additional terms or conditions. Notwithstanding the above, nothing + herein shall supersede or modify the terms of any separate license agreement + you may have executed with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, except + as required for reasonable and customary use in describing the origin of the + Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in + writing, Licensor provides the Work (and each Contributor provides its + Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied, including, without limitation, any + warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or + FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining + the appropriateness of using or redistributing the Work and assume any risks + associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, whether + in tort (including negligence), contract, or otherwise, unless required by + applicable law (such as deliberate and grossly negligent acts) or agreed to + in writing, shall any Contributor be liable to You for damages, including + any direct, indirect, special, incidental, or consequential damages of any + character arising as a result of this License or out of the use or inability + to use the Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all other + commercial damages or losses), even if such Contributor has been advised of + the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing the Work + or Derivative Works thereof, You may choose to offer, and charge a fee for, + acceptance of support, warranty, indemnity, or other liability obligations + and/or rights consistent with this License. However, in accepting such + obligations, You may act only on Your own behalf and on Your sole + responsibility, not on behalf of any other Contributor, and only if You + agree to indemnify, defend, and hold each Contributor harmless for any + liability incurred by, or claims asserted against, such Contributor by + reason of your accepting any such warranty or additional liability. END OF + TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification +within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software + +distributed under the License is distributed on an "AS IS" BASIS, + +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and + +limitations under the License. + +* For Boto3 see also this required NOTICE: + Copyright 2013-2017 Amazon.com, Inc. or its affiliates. All Rights + Reserved. +* For coverage see also this required NOTICE: + Copyright 2001 Gareth Rees. All rights reserved. + Copyright 2004-2019 Ned Batchelder. All rights reserved. + + Except where noted otherwise, this software is licensed under the Apache + License, Version 2.0 (the "License"); you may not use this work except in + compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +* For elasticsearch 6.5.4 see also this required NOTICE: + Elasticsearch + Copyright 2009-2018 Elasticsearch + + This product includes software developed by The Apache Software + Foundation (http://www.apache.org/). +* For twine see also this required NOTICE: + none + +------ + +** mock; version 3.0.5 -- https://github.com/testing-cabal/mock +Copyright (c) 2003-2013, Michael Foord & the mock team +All rights reserved. + +Copyright (c) 2003-2013, Michael Foord & the mock team +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------ + +** cli-helpers; version 1.2.1 -- https://github.com/dbcli/cli_helpers +Copyright (c) 2017, dbcli +All rights reserved. + +Copyright (c) 2017, dbcli +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of dbcli nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------ + +** Pygments; version 2.4.2 -- +https://bitbucket.org/birkenfeld/pygments-main/src/default/ +Copyright (c) 2006-2019 by the respective authors (see AUTHORS file). +All rights reserved. + +Copyright (c) 2006-2019 by the respective authors (see AUTHORS file). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------ + +** prompt-toolkit; version 2 -- +https://github.com/prompt-toolkit/python-prompt-toolkit +Copyright (c) 2014, Jonathan Slenders +All rights reserved. + +Copyright (c) 2014, Jonathan Slenders +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------ + +** click; version 7.0 -- https://click.palletsprojects.com/en/7.x/ +Copyright © 2014 by the Pallets team. + +Copyright © 2014 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as well as +documentation, with or without modification, are permitted provided that the +following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. +Neither the name of the copyright holder nor the names of its contributors may +be used to endorse or promote products derived from this software without +specific prior written permission. +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +Click uses parts of optparse written by Gregory P. Ward and maintained by the +Python Software Foundation. This is limited to code in parser.py. + +Copyright © 2001-2006 Gregory P. Ward. All rights reserved. Copyright © +2002-2006 Python Software Foundation. All rights reserved. + +------ + +** pexpect; version 3.3 -- https://github.com/pexpect/pexpect +http://opensource.org/licenses/isc-license.txt + +Copyright (c) 2013-2016, Pexpect development team +Copyright (c) 2012, Noah Spurrier + +ISC LICENSE + + This license is approved by the OSI and FSF as GPL-compatible. + http://opensource.org/licenses/isc-license.txt + + Copyright (c) 2013-2014, Pexpect development team + Copyright (c) 2012, Noah Spurrier + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +------ + +** pyfiglet; version 0.8.post1 -- https://github.com/pwaller/pyfiglet +Copyright © 2007-2018 + Christopher Jones + Stefano Rivera + Peter Waller + And various contributors (see git history) + +PyFiglet: An implementation of figlet written in Python + +The MIT License (MIT) + +Copyright © 2007-2018 + Christopher Jones + Stefano Rivera + Peter Waller + And various contributors (see git history). + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +------ + +** pytest; version 4.6.3 -- https://docs.pytest.org/en/latest/ +Copyright (c) 2004-2017 Holger Krekel and others + +The MIT License (MIT) + +Copyright (c) 2004-2017 Holger Krekel and others + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------ + +** setuptools; version v40.0.0 -- https://github.com/pypa/setuptools +Copyright (C) 2016 Jason R Coombs + +Copyright (C) 2016 Jason R Coombs + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------ + +** requests-aws4auth; version 0.9 -- +https://github.com/sam-washington/requests-aws4auth +requests-aws4auth includes the six library. + +six License +=========== + +This is the MIT license: http://www.opensource.org/licenses/mit-license.php + +Copyright (c) 2010-2015 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +The MIT License (MIT) + +Copyright (c) 2015 Sam Washington + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sql-cli/development_guide.md b/sql-cli/development_guide.md new file mode 100644 index 0000000000..b5dc61210c --- /dev/null +++ b/sql-cli/development_guide.md @@ -0,0 +1,61 @@ +## Development Guide +### Development Environment Set Up +- `pip install virtualenv` +- `virtualenv venv` to create virtual environment for **Python 3** +- `source ./venv/bin/activate` activate virtual env. +- `cd` into project root folder. +- `pip install --editable .` will install all dependencies from `setup.py`. + +### Run CLI +- Start an Elasticsearch instance from either local, Docker with Open Distro SQL plugin, or AWS Elasticsearch +- To launch the cli, use 'wake' word `odfesql` followed by endpoint of your running ES instance. If not specifying +any endpoint, it uses http://localhost:9200 by default. If not provided with port number, http endpoint uses 9200 and +https uses 443 by default. + +### Testing +- Prerequisites + - Build the application + - Start a local Elasticsearch instance (OSS) with + [Open Distro SQL plugin for Elasticsearch](https://opendistro.github.io/for-elasticsearch-docs/docs/sql/) installed + and listening at http://localhost:9200. +- Pytest + - `pip install -r requirements-dev.txt` Install test frameworks including Pytest and mock. + - `cd` into `tests` and run `pytest` +- Refer to [test_plan](./tests/test_plan.md) for manual test guidance. + +### Style +- Use [black](https://github.com/psf/black) to format code, with option of `--line-length 120` + +## Release guide + +- Package Manager: pip +- Repository of software for Python: PyPI + +### Workflow + +1. Update version number + 1. Modify the version number in `__init__.py` under `src` package. It will be used by `setup.py` for release. +2. Create/Update `setup.py` (if needed) + 1. For more details refer to https://packaging.python.org/tutorials/packaging-projects/#creating-setup-py +3. Update README.md, Legal and copyright files(if needed) + 1. Update README.md when there is a critical feature added. + 2. Update `THIRD-PARTY` files if there is a new dependency added. +4. Generate distribution archives + 1. Make sure you have the latest versions of `setuptools` and `wheel` installed: `python3 -m pip install --user --upgrade setuptools wheel` + 2. Run this command from the same directory where `setup.py` is located: `python3 setup.py sdist bdist_wheel` + 3. Check artifacts under `sql-cli/dist/`, there should be a `.tar.gz` file and a `.whi` file with correct version. Remove other deprecated artifacts. +5. Upload the distribution archives to TestPyPI + 1. Register an account on [testPyPI](https://test.pypi.org/) + 2. `python3 -m pip install --user --upgrade twine` + 3. `python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/*` +6. Install your package from TestPyPI and do manual test + 1. `pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple odfe-sql-cli` +7. Upload to PyPI + 1. Register an account on [PyPI](https://pypi.org/), note that these are two separate servers and the credentials from the test server are not shared with the main server. + 2. Use `twine upload dist/*` to upload your package and enter your credentials for the account you registered on PyPI.You don’t need to specify --repository; the package will upload to https://pypi.org/ by default. +8. Install your package from PyPI using `pip install [your-package-name]` + +### Reference +- https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 +- https://packaging.python.org/tutorials/packaging-projects/ +- https://packaging.python.org/guides/using-testpypi/ \ No newline at end of file diff --git a/sql-cli/release-notes/odfe-sql-cli.release-notes-1.7.0.0.md b/sql-cli/release-notes/odfe-sql-cli.release-notes-1.7.0.0.md new file mode 100644 index 0000000000..3fd095347b --- /dev/null +++ b/sql-cli/release-notes/odfe-sql-cli.release-notes-1.7.0.0.md @@ -0,0 +1,66 @@ +## 2020-05-04 Version 1.7.0.0 + +This is the first official release of Open Distro for Elasticsearch SQL CLI + +ODFE SQL CLI is a stand alone Python application and can be launched by a wake word `odfesql`. It serves as a support only for +[Open Distro SQL plugin for Elasticsearch](https://opendistro.github.io/for-elasticsearch-docs/docs/sql/). User must have ODFE SQL +plugin installed to the Elasticsearch instance for connection. Usr can run this CLI from MacOS and Linux, and connect to any valid Elasticsearch +endpoint such as AWS Elasticsearch. + +### Features +#### CLI Features +* Feature [#12](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/12): Initial development for SQL CLI + * prototype launch: app -> check connection -> take input -> query ES -> serve raw results(format=jdbc) + * enrich auto-completion corpus + * Convert to vertical output format if fields length is larger than terminal window + * Add style to output fields name. Add logic to confirm choice from user for vertical output + * Add single query without getting into console. Integrate "_explain" api + * Add config base logic. Add pagination for long output + * Add nice little welcome banner. + * Add params -f for format_output (jdbc/raw/csv), -v for vertical display + * Initial implementation of connection to OD cluster and AES with auth + * Create test module and write first test + * Add fake test data. Add test utils to set up connection + * [Test] Add pagination test and query test + * Add Test plan and dependency list + * [Test] Add test case for ConnectionFailExeption + * [Feature] initial implementation of index suggestion during auto-completion + * [Feature] display (data retrieved / total hits), and tell user to use "limit" to get more than 200 lines of data + * Added legal and copyright files, + * Added THIRD PARTY file + * Added setup.py for packaging and releasing +* Feature [#24](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/24): Provide user option to toggle to use AWS sigV4 authentication +(issue: [#23](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/23)) + +#### Testing +* Feature [#28](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/28) :Added tox scripts for testing automation + +#### Documentation +* Change [#22](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/22): Update documentation and CLI naming +(issues: [#21](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/21), [#7](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/17)) +* Change [#32](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/32): Update copyright to 2020 +* Change [#33](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/33): Updated package naming and created folder for release notes +* Change [#34](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/34): Added CONTRIBUTORS.md +* Change [#36](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/36): Polish README.md and test_plan.md + + +### Enhancements +* Enhancement [#31](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/31): Added github action workflow for CI/CD +(issue: [#20](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/21)) +* Enhancement [#35](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/35): Update github action test and build workflow to spin up ES instance + + +### BugFixes +* BugFix[#12](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/12): Initial development for SQL CLI + * Fix the logic of passing self-constructed settings + * [Fix] get rid of unicode warning. Fix meta info display + * [fix] Refactor executor code + * [Fix] Fix test cases corresponding to fraction display. + * [Fix] fix code style using Black, update documentation and comments +* BugFix[#18](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/18): Fix typos, remove unused dependencies, add .gitignore and legal file +(issue: [#15](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/15)) +* BugFix[#19](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/19): Fix test failures +(issue: [#16](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/16)) +* BugFix[#26](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/26): Update usage gif, fix http/https issue when connect to AWS Elasticsearch (issue: [#25](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/25)) + + diff --git a/sql-cli/release-notes/odfe-sql-cli.release-notes-1.8.0.0.md b/sql-cli/release-notes/odfe-sql-cli.release-notes-1.8.0.0.md new file mode 100644 index 0000000000..92d7184127 --- /dev/null +++ b/sql-cli/release-notes/odfe-sql-cli.release-notes-1.8.0.0.md @@ -0,0 +1,5 @@ +## 2020-05-18 Version 1.8.0.0 + +### Features +#### Elasticsearch and ODFE SQL Plugin Compatibility +* Feature [#41](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/41): Elasticsearch 7.7.0 and ODFE SQL Plugin 1.8.0 compatibility (issue: [#40](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/40)) diff --git a/sql-cli/release-notes/odfe-sql-cli.release-notes-1.9.0.0.md b/sql-cli/release-notes/odfe-sql-cli.release-notes-1.9.0.0.md new file mode 100644 index 0000000000..26855244d4 --- /dev/null +++ b/sql-cli/release-notes/odfe-sql-cli.release-notes-1.9.0.0.md @@ -0,0 +1,13 @@ +## 2020-06-24 Version 1.9.0.0 (Current) + +### Features +#### Elasticsearch and ODFE SQL Plugin Compatibility +* Feature [#55](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/55): Elasticsearch 7.8.0 and ODFE SQL Plugin 1.9.0 compatibility +(issue: [#54](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/54)) + +#### Documentation +* Feature [#48](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/48): Added README badges + +### Enhancements +* Enhancement [#45](https://github.com/opendistro-for-elasticsearch/sql-cli/pull/45): Update project layout for better module import +(issue: [#43](https://github.com/opendistro-for-elasticsearch/sql-cli/issues/43)) diff --git a/sql-cli/requirements-dev.txt b/sql-cli/requirements-dev.txt new file mode 100644 index 0000000000..e9a7532743 --- /dev/null +++ b/sql-cli/requirements-dev.txt @@ -0,0 +1,5 @@ +pytest==4.6.3 +mock==3.0.5 +pexpect==3.3 +twine==1.13.0 +tox>=1.9.2 \ No newline at end of file diff --git a/sql-cli/screenshots/usage.gif b/sql-cli/screenshots/usage.gif new file mode 100644 index 0000000000..e55c47ba18 Binary files /dev/null and b/sql-cli/screenshots/usage.gif differ diff --git a/sql-cli/setup.py b/sql-cli/setup.py new file mode 100644 index 0000000000..08a5f90b7e --- /dev/null +++ b/sql-cli/setup.py @@ -0,0 +1,77 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import re +import ast + +from setuptools import setup, find_packages + +install_requirements = [ + "click == 7.1.1", + "prompt_toolkit == 2.0.6", + "Pygments == 2.6.1", + "cli_helpers[styles] == 1.2.1", + "elasticsearch == 7.5.1", + "pyfiglet == 0.8.post1", + "boto3 == 1.9.181", + "requests-aws4auth == 0.9", +] + +_version_re = re.compile(r"__version__\s+=\s+(.*)") + +with open("src/odfe_sql_cli/__init__.py", "rb") as f: + version = str( + ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)) + ) + +description = "Open Distro for Elasticsearch SQL CLI with auto-completion and syntax highlighting" + +with open("README.md", "r") as fh: + long_description = fh.read() + +setup( + name="odfe-sql-cli", + author="Open Distro for Elasticsearch", + author_email="odfe-infra@amazon.com", + version=version, + license="Apache 2.0", + url="https://opendistro.github.io/for-elasticsearch-docs/docs/sql/cli/", + packages=find_packages('src'), + package_dir={'': 'src'}, + package_data={"odfe_sql_cli": ["conf/clirc", "esliterals/esliterals.json"]}, + description=description, + long_description=long_description, + long_description_content_type="text/markdown", + install_requires=install_requirements, + entry_points={"console_scripts": ["odfesql=odfe_sql_cli.main:cli"]}, + classifiers=[ + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: Unix", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: SQL", + "Topic :: Database", + "Topic :: Database :: Front-Ends", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries :: Python Modules", + ], + python_requires='>=3.0' +) diff --git a/sql-cli/src/odfe_sql_cli/__init__.py b/sql-cli/src/odfe_sql_cli/__init__.py new file mode 100644 index 0000000000..3851f216ed --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/__init__.py @@ -0,0 +1,15 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +__version__ = "1.9.0.0" diff --git a/sql-cli/src/odfe_sql_cli/conf/__init__.py b/sql-cli/src/odfe_sql_cli/conf/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sql-cli/src/odfe_sql_cli/conf/clirc b/sql-cli/src/odfe_sql_cli/conf/clirc new file mode 100644 index 0000000000..133dd2e19b --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/conf/clirc @@ -0,0 +1,94 @@ +# Copyright 2020, Amazon Web Services Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# vi: ft=dosini +[main] + +# Multi-line mode allows breaking up the sql statements into multiple lines. If +# this is set to True, then the end of the statements must have a semi-colon. +# If this is set to False then sql statements can't be split into multiple +# lines. End of line (return) is considered as the end of the statement. +multi_line = True + +# If multi_line_mode is set to "odfesql_cli", in multi-line mode, [Enter] will execute +# the current input if the input ends in a semicolon. +# If multi_line_mode is set to "safe", in multi-line mode, [Enter] will always +# insert a newline, and [Esc] [Enter] or [Alt]-[Enter] must be used to execute +# a command. +multi_line_mode = odfesql_cli + +# log_file location. +# In Unix/Linux: ~/.conf/odfesql-cli/log +# In Windows: %USERPROFILE%\AppData\Local\dbcli\odfesql-cli\log +# %USERPROFILE% is typically C:\Users\{username} +log_file = default + +# history_file location. +# In Unix/Linux: ~/.conf/odfesql-cli/history +# In Windows: %USERPROFILE%\AppData\Local\dbcli\odfesql-cli\history +# %USERPROFILE% is typically C:\Users\{username} +history_file = default + +# Default log level. Possible values: "CRITICAL", "ERROR", "WARNING", "INFO" +# and "DEBUG". "NONE" disables logging. +log_level = INFO + +# Table format. Possible values: psql, plain, simple, grid, fancy_grid, pipe, +# ascii, double, github, orgtbl, rst, mediawiki, html, latex, latex_booktabs, +# textile, moinmoin, jira, vertical, tsv, csv. +# Recommended: psql, fancy_grid and grid. +table_format = psql + +# Syntax Style. Possible values: manni, igor, xcode, vim, autumn, vs, rrt, +# native, perldoc, borland, tango, emacs, friendly, monokai, paraiso-dark, +# colorful, murphy, bw, pastie, paraiso-light, trac, default, fruity +syntax_style = default + +# Set threshold for row limit prompt. Use 0 to disable prompt. +# maybe not now, since elasticsearch opendisto sql plugin returns 200 rows of data by default if not +# using LIMIT. +row_limit = 1000 + +# Character used to left pad multi-line queries to match the prompt size. +multiline_continuation_char = '.' + +# The string used in place of a null value. +null_string = 'null' + +# Custom colors for the completion menu, toolbar, etc. +[colors] +completion-menu.completion.current = 'bg:#ffffff #000000' +completion-menu.completion = 'bg:#008888 #ffffff' +completion-menu.meta.completion.current = 'bg:#44aaaa #000000' +completion-menu.meta.completion = 'bg:#448888 #ffffff' +completion-menu.multi-column-meta = 'bg:#aaffff #000000' +scrollbar.arrow = 'bg:#003333' +scrollbar = 'bg:#00aaaa' +selected = '#ffffff bg:#6666aa' +search = '#ffffff bg:#4444aa' +search.current = '#ffffff bg:#44aa44' +bottom-toolbar = 'bg:#222222 #aaaaaa' +bottom-toolbar.off = 'bg:#222222 #888888' +bottom-toolbar.on = 'bg:#222222 #ffffff' +search-toolbar = 'noinherit bold' +search-toolbar.text = 'nobold' +system-toolbar = 'noinherit bold' +arg-toolbar = 'noinherit bold' +arg-toolbar.text = 'nobold' +bottom-toolbar.transaction.valid = 'bg:#222222 #00ff5f bold' +bottom-toolbar.transaction.failed = 'bg:#222222 #ff005f bold' + +# style classes for colored table output +output.header = "#00ff5f bold" +output.odd-row = "" +output.even-row = "" \ No newline at end of file diff --git a/sql-cli/src/odfe_sql_cli/config.py b/sql-cli/src/odfe_sql_cli/config.py new file mode 100644 index 0000000000..571c9a65a7 --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/config.py @@ -0,0 +1,85 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import errno +import os +import platform +import shutil + +from os.path import expanduser, exists, dirname +from configobj import ConfigObj + + +def config_location(): + """Return absolute conf file path according to different OS.""" + if "XDG_CONFIG_HOME" in os.environ: + return "%s/odfesql-cli/" % expanduser(os.environ["XDG_CONFIG_HOME"]) + elif platform.system() == "Windows": + # USERPROFILE is typically C:\Users\{username} + return "%s\\AppData\\Local\\dbcli\\odfesql-cli\\" % os.getenv("USERPROFILE") + else: + return expanduser("~/.config/odfesql-cli/") + + +def _load_config(user_config, default_config=None): + config = ConfigObj() + config.merge(ConfigObj(default_config, interpolation=False)) + config.merge(ConfigObj(expanduser(user_config), interpolation=False, encoding="utf-8")) + config.filename = expanduser(user_config) + + return config + + +def ensure_dir_exists(path): + """ + Try to create config file in OS. + + Ignore existing destination. Raise error for other OSError, such as errno.EACCES (Permission denied), + errno.ENOSPC (No space left on device) + """ + parent_dir = expanduser(dirname(path)) + try: + os.makedirs(parent_dir) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def _write_default_config(source, destination, overwrite=False): + destination = expanduser(destination) + if not overwrite and exists(destination): + return + + ensure_dir_exists(destination) + shutil.copyfile(source, destination) + + +# https://stackoverflow.com/questions/40193112/python-setuptools-distribute-configuration-files-to-os-specific-directories +def get_config(clirc_file=None): + """ + Get config for odfesql cli. + + This config comes from either existing config in the OS, or create a config file in the OS, and write default config + including in the package to it. + """ + from .conf import __file__ as package_root + + package_root = os.path.dirname(package_root) + + clirc_file = clirc_file or "%sconfig" % config_location() + default_config = os.path.join(package_root, "clirc") + + _write_default_config(default_config, clirc_file) + + return _load_config(clirc_file, default_config) diff --git a/sql-cli/src/odfe_sql_cli/esbuffer.py b/sql-cli/src/odfe_sql_cli/esbuffer.py new file mode 100644 index 0000000000..651df4ee8f --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/esbuffer.py @@ -0,0 +1,46 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +from __future__ import unicode_literals + +from prompt_toolkit.enums import DEFAULT_BUFFER +from prompt_toolkit.filters import Condition +from prompt_toolkit.application import get_app + + +def es_is_multiline(odfesql_cli): + """Return function that returns boolean to enable/unable multiline mode.""" + + @Condition + def cond(): + doc = get_app().layout.get_buffer_by_name(DEFAULT_BUFFER).document + + if not odfesql_cli.multi_line: + return False + if odfesql_cli.multiline_mode == "safe": + return True + else: + return not _multiline_exception(doc.text) + + return cond + + +def _is_complete(sql): + # A complete command is an sql statement that ends with a semicolon + return sql.endswith(";") + + +def _multiline_exception(text): + text = text.strip() + return _is_complete(text) diff --git a/sql-cli/src/odfe_sql_cli/esconnection.py b/sql-cli/src/odfe_sql_cli/esconnection.py new file mode 100644 index 0000000000..ec4559e0d9 --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/esconnection.py @@ -0,0 +1,171 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import boto3 +import click +import logging +import ssl +import sys +import urllib3 + +from elasticsearch import Elasticsearch, RequestsHttpConnection +from elasticsearch.exceptions import ConnectionError, RequestError +from elasticsearch.connection import create_ssl_context +from requests_aws4auth import AWS4Auth + + +class ESConnection: + """ESConnection instances are used to set up and maintain client to Elasticsearch cluster, + as well as send user's SQL query to Elasticsearch. + """ + + def __init__(self, endpoint=None, http_auth=None, use_aws_authentication=False): + """Initialize an ESConnection instance. + + Set up client and get indices list. + + :param endpoint: an url in the format of "http:localhost:9200" + :param http_auth: a tuple in the format of (username, password) + """ + self.client = None + self.ssl_context = None + self.es_version = None + self.plugins = None + self.aws_auth = None + self.indices_list = [] + self.endpoint = endpoint + self.http_auth = http_auth + self.use_aws_authentication = use_aws_authentication + + def get_indices(self): + if self.client: + res = self.client.indices.get_alias().keys() + self.indices_list = list(res) + + def get_aes_client(self): + service = "es" + session = boto3.Session() + credentials = session.get_credentials() + region = session.region_name + + if credentials is not None: + self.aws_auth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service) + else: + click.secho(message="Can not retrieve your AWS credentials, check your AWS config", fg="red") + + aes_client = Elasticsearch( + hosts=[self.endpoint], + http_auth=self.aws_auth, + use_ssl=True, + verify_certs=True, + connection_class=RequestsHttpConnection, + ) + + return aes_client + + def get_open_distro_client(self): + ssl_context = self.ssl_context = create_ssl_context() + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + + open_distro_client = Elasticsearch( + [self.endpoint], http_auth=self.http_auth, verify_certs=False, ssl_context=ssl_context + ) + + return open_distro_client + + def is_sql_plugin_installed(self, es_client): + self.plugins = es_client.cat.plugins(params={"s": "component", "v": "true"}) + sql_plugin_name_list = ["opendistro-sql", "opendistro_sql"] + return any(x in self.plugins for x in sql_plugin_name_list) + + def set_connection(self, is_reconnect=False): + urllib3.disable_warnings() + logging.captureWarnings(True) + + if self.http_auth: + es_client = self.get_open_distro_client() + + elif self.use_aws_authentication: + es_client = self.get_aes_client() + else: + es_client = Elasticsearch([self.endpoint], verify_certs=True) + + # check connection. check Open Distro Elasticsearch SQL plugin availability. + try: + if not self.is_sql_plugin_installed(es_client): + click.secho( + message="Must have Open Distro SQL plugin installed in your Elasticsearch " + "instance!\nCheck this out: https://github.com/opendistro-for-elasticsearch/sql", + fg="red", + ) + click.echo(self.plugins) + sys.exit() + + # info() may throw ConnectionError, if connection fails to establish + info = es_client.info() + self.es_version = info["version"]["number"] + self.client = es_client + self.get_indices() + + except ConnectionError as error: + if is_reconnect: + # re-throw error + raise error + else: + click.secho(message="Can not connect to endpoint %s" % self.endpoint, fg="red") + click.echo(repr(error)) + sys.exit(0) + + def handle_server_close_connection(self): + """Used during CLI execution.""" + try: + click.secho(message="Reconnecting...", fg="green") + self.set_connection(is_reconnect=True) + click.secho(message="Reconnected! Please run query again", fg="green") + except ConnectionError as reconnection_err: + click.secho(message="Connection Failed. Check your ES is running and then come back", fg="red") + click.secho(repr(reconnection_err), err=True, fg="red") + + def execute_query(self, query, output_format="jdbc", explain=False, use_console=True): + """ + Handle user input, send SQL query and get response. + + :param use_console: use console to interact with user, otherwise it's single query + :param query: SQL query + :param output_format: jdbc/csv + :param explain: if True, use _explain API. + :return: raw http response + """ + + # TODO: consider add evaluator/handler to filter obviously-invalid input, + # to save cost of http client. + # deal with input + final_query = query.strip().strip(";") + + try: + data = self.client.transport.perform_request( + url="/_opendistro/_sql/_explain" if explain else "/_opendistro/_sql/", + method="POST", + params=None if explain else {"format": output_format}, + body={"query": final_query}, + ) + return data + + # handle client lost during execution + except ConnectionError: + if use_console: + self.handle_server_close_connection() + except RequestError as error: + click.secho(message=str(error.info["error"]), fg="red") diff --git a/sql-cli/src/odfe_sql_cli/esliterals/__init__.py b/sql-cli/src/odfe_sql_cli/esliterals/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sql-cli/src/odfe_sql_cli/esliterals/esliterals.json b/sql-cli/src/odfe_sql_cli/esliterals/esliterals.json new file mode 100644 index 0000000000..59f4f45fbf --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/esliterals/esliterals.json @@ -0,0 +1,58 @@ +{ + "keywords": [ + "ADD", + "ALIASES", + "ALL", + "AND", + "AS", + "ASC", + "BETWEEN", + "BY", + "DATE", + "DELETE", + "DESC", + "DESCRIBE", + "FROM", + "FULL", + "GROUP BY", + "HAVING", + "IN", + "INTO", + "IS", + "INNER", + "JOIN", + "KEY", + "LEFT", + "LIKE", + "LIMIT", + "MINUS", + "NOT", + "NULLS", + "ON", + "OR", + "ORDER BY", + "SELECT", + "SHOW", + "TABLES", + "UNION", + "WHEN", + "WHERE" + ], + "functions": [ + "AVG", + "CONCAT_WS", + "COUNT", + "DISTINCT", + "FLOOR", + "ISNULL", + "LOG", + "LOG10", + "MAX", + "MID", + "MIN", + "ROUND", + "SUBSTRING", + "SUM", + "SQRT" + ] +} \ No newline at end of file diff --git a/sql-cli/src/odfe_sql_cli/esstyle.py b/sql-cli/src/odfe_sql_cli/esstyle.py new file mode 100644 index 0000000000..77806cacbd --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/esstyle.py @@ -0,0 +1,95 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +from __future__ import unicode_literals + +import logging + +import pygments.styles +from pygments.token import string_to_tokentype, Token +from pygments.style import Style as PygmentsStyle +from pygments.util import ClassNotFound +from prompt_toolkit.styles.pygments import style_from_pygments_cls +from prompt_toolkit.styles import merge_styles, Style + +logger = logging.getLogger(__name__) + +# map Pygments tokens (ptk 1.0) to class names (ptk 2.0). +TOKEN_TO_PROMPT_STYLE = { + Token.Menu.Completions.Completion.Current: "completion-menu.completion.current", + Token.Menu.Completions.Completion: "completion-menu.completion", + Token.Menu.Completions.Meta.Current: "completion-menu.meta.completion.current", + Token.Menu.Completions.Meta: "completion-menu.meta.completion", + Token.Menu.Completions.MultiColumnMeta: "completion-menu.multi-column-meta", + Token.Menu.Completions.ProgressButton: "scrollbar.arrow", # best guess + Token.Menu.Completions.ProgressBar: "scrollbar", # best guess + Token.SelectedText: "selected", + Token.SearchMatch: "search", + Token.SearchMatch.Current: "search.current", + Token.Toolbar: "bottom-toolbar", + Token.Toolbar.Off: "bottom-toolbar.off", + Token.Toolbar.On: "bottom-toolbar.on", + Token.Toolbar.Search: "search-toolbar", + Token.Toolbar.Search.Text: "search-toolbar.text", + Token.Toolbar.System: "system-toolbar", + Token.Toolbar.Arg: "arg-toolbar", + Token.Toolbar.Arg.Text: "arg-toolbar.text", + Token.Toolbar.Transaction.Valid: "bottom-toolbar.transaction.valid", + Token.Toolbar.Transaction.Failed: "bottom-toolbar.transaction.failed", + Token.Output.Header: "output.header", + Token.Output.OddRow: "output.odd-row", + Token.Output.EvenRow: "output.even-row", +} + +# reverse dict for cli_helpers, because they still expect Pygments tokens. +PROMPT_STYLE_TO_TOKEN = {v: k for k, v in TOKEN_TO_PROMPT_STYLE.items()} + + +def style_factory(name, cli_style): + try: + style = pygments.styles.get_style_by_name(name) + except ClassNotFound: + style = pygments.styles.get_style_by_name("native") + + prompt_styles = [] + + for token in cli_style: + # treat as prompt style name (2.0). See default style names here: + # https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/prompt_toolkit/styles/defaults.py + prompt_styles.append((token, cli_style[token])) + + override_style = Style([("bottom-toolbar", "noreverse")]) + return merge_styles([style_from_pygments_cls(style), override_style, Style(prompt_styles)]) + + +def style_factory_output(name, cli_style): + try: + style = pygments.styles.get_style_by_name(name).styles + except ClassNotFound: + style = pygments.styles.get_style_by_name("native").styles + + for token in cli_style: + + if token in PROMPT_STYLE_TO_TOKEN: + token_type = PROMPT_STYLE_TO_TOKEN[token] + style.update({token_type: cli_style[token]}) + else: + # TODO: cli helpers will have to switch to ptk.Style + logger.error("Unhandled style / class name: %s", token) + + class OutputStyle(PygmentsStyle): + default_style = "" + styles = style + + return OutputStyle diff --git a/sql-cli/src/odfe_sql_cli/formatter.py b/sql-cli/src/odfe_sql_cli/formatter.py new file mode 100644 index 0000000000..acfb401fe2 --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/formatter.py @@ -0,0 +1,99 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import click +import itertools + +from cli_helpers.tabular_output import TabularOutputFormatter +from cli_helpers.tabular_output.preprocessors import format_numbers + +click.disable_unicode_literals_warning = True + + +class Formatter: + """Formatter instance is used to format the data retrieved from Elasticsearch.""" + + def __init__(self, settings): + """A formatter can be customized by passing settings as a parameter.""" + self.settings = settings + self.table_format = "vertical" if self.settings.is_vertical else self.settings.table_format + self.max_width = self.settings.max_width + + def format_array(val): + if val is None: + return self.settings.missingval + if not isinstance(val, list): + return val + return "[" + ",".join(str(format_array(e)) for e in val) + "]" + + def format_arrays(field_data, headers, **_): + field_data = list(field_data) + for row in field_data: + row[:] = [format_array(val) if isinstance(val, list) else val for val in row] + + return field_data, headers + + self.output_kwargs = { + "sep_title": "RECORD {n}", + "sep_character": "-", + "sep_length": (1, 25), + "missing_value": self.settings.missingval, + "preprocessors": (format_numbers, format_arrays), + "disable_numparse": True, + "preserve_whitespace": True, + "style": self.settings.style_output, + } + + def format_output(self, data): + """Format data. + + :param data: raw data get from ES + :return: formatted output, it's either table or vertical format + """ + formatter = TabularOutputFormatter(format_name=self.table_format) + + # parse response data + datarows = data["datarows"] + schema = data["schema"] + total_hits = data["total"] + cur_size = data["size"] + # unused data for now, + fields = [] + types = [] + + # get header and type as lists, for future usage + for i in schema: + fields.append(i["name"]) + types.append(i["type"]) + + output = formatter.format_output(datarows, fields, **self.output_kwargs) + output_message = "fetched rows / total rows = %d/%d" % (cur_size, total_hits) + + # Open Distro for ES sql has a restriction of retrieving 200 rows of data by default + if total_hits > 200 == cur_size: + output_message += "\n" + "Attention: Use LIMIT keyword when retrieving more than 200 rows of data" + + # check width overflow, change format_name for better visual effect + first_line = next(output) + output = itertools.chain([output_message], [first_line], output) + + if len(first_line) > self.max_width: + click.secho(message="Output longer than terminal width", fg="red") + if click.confirm("Do you want to display data vertically for better visual effect?"): + output = formatter.format_output(datarows, fields, format_name="vertical", **self.output_kwargs) + output = itertools.chain([output_message], output) + + # TODO: if decided to add row_limit. Refer to pgcli -> main -> line 866. + + return output diff --git a/sql-cli/src/odfe_sql_cli/main.py b/sql-cli/src/odfe_sql_cli/main.py new file mode 100644 index 0000000000..726e5c9406 --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/main.py @@ -0,0 +1,112 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +from __future__ import unicode_literals + +import click +import sys + +from .config import config_location +from .esconnection import ESConnection +from .utils import OutputSettings +from .odfesql_cli import OdfeSqlCli +from .formatter import Formatter + +click.disable_unicode_literals_warning = True + + +@click.command() +@click.argument("endpoint", default="http://localhost:9200") +@click.option("-q", "--query", "query", type=click.STRING, help="Run single query in non-interactive mode") +@click.option("-e", "--explain", "explain", is_flag=True, help="Explain SQL to ES DSL") +@click.option( + "--clirc", + default=config_location() + "config", + envvar="CLIRC", + help="Location of clirc file.", + type=click.Path(dir_okay=False), +) +@click.option( + "-f", + "--format", + "result_format", + type=click.STRING, + default="jdbc", + help="Specify format of output, jdbc/csv. By default, it's jdbc", +) +@click.option( + "-v", + "--vertical", + "is_vertical", + is_flag=True, + default=False, + help="Convert output from horizontal to vertical. Only used for non-interactive mode", +) +@click.option("-u", "--username", help="Username to connect to the Elasticsearch") +@click.option("-w", "--password", help="password corresponding to username") +@click.option( + "-p", + "--pager", + "always_use_pager", + is_flag=True, + default=False, + help="Always use pager to display output. If not specified, smart pager mode will be used according to the \ + length/width of output", +) +@click.option( + "--aws-auth", + "use_aws_authentication", + is_flag=True, + default=False, + help="Use AWS sigV4 to connect to AWS ELasticsearch domain", +) +def cli(endpoint, query, explain, clirc, result_format, is_vertical, username, password, always_use_pager, + use_aws_authentication): + """ + Provide endpoint for Elasticsearch client. + By default, it uses http://localhost:9200 to connect. + """ + + if username and password: + http_auth = (username, password) + else: + http_auth = None + + # TODO add validation for endpoint to avoid the cost of connecting to some obviously invalid endpoint + + # handle single query without more interaction with user + if query: + es_executor = ESConnection(endpoint, http_auth, use_aws_authentication) + es_executor.set_connection() + if explain: + output = es_executor.execute_query(query, explain=True, use_console=False) + else: + output = es_executor.execute_query(query, output_format=result_format, use_console=False) + if output and result_format == "jdbc": + settings = OutputSettings(table_format="psql", is_vertical=is_vertical) + formatter = Formatter(settings) + output = formatter.format_output(output) + output = "\n".join(output) + + click.echo(output) + sys.exit(0) + + # use console to interact with user + odfesql_cli = OdfeSqlCli(clirc_file=clirc, always_use_pager=always_use_pager, use_aws_authentication=use_aws_authentication) + odfesql_cli.connect(endpoint, http_auth) + odfesql_cli.run_cli() + + +if __name__ == "__main__": + cli() diff --git a/sql-cli/src/odfe_sql_cli/odfesql_cli.py b/sql-cli/src/odfe_sql_cli/odfesql_cli.py new file mode 100644 index 0000000000..184a1649c1 --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/odfesql_cli.py @@ -0,0 +1,184 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +from __future__ import unicode_literals + +import click +import re +import pyfiglet +import os +import json + +from prompt_toolkit.completion import WordCompleter +from prompt_toolkit.enums import DEFAULT_BUFFER +from prompt_toolkit.shortcuts import PromptSession +from prompt_toolkit.filters import HasFocus, IsDone +from prompt_toolkit.lexers import PygmentsLexer +from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor +from prompt_toolkit.auto_suggest import AutoSuggestFromHistory +from pygments.lexers.sql import SqlLexer + +from .config import get_config +from .esconnection import ESConnection +from .esbuffer import es_is_multiline +from .esstyle import style_factory, style_factory_output +from .formatter import Formatter +from .utils import OutputSettings +from . import __version__ + + +# Ref: https://stackoverflow.com/questions/30425105/filter-special-chars-such-as-color-codes-from-shell-output +COLOR_CODE_REGEX = re.compile(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))") + +click.disable_unicode_literals_warning = True + + +class OdfeSqlCli: + """OdfeSqlCli instance is used to build and run the ODFE SQL CLI.""" + + def __init__(self, clirc_file=None, always_use_pager=False, use_aws_authentication=False): + # Load conf file + config = self.config = get_config(clirc_file) + literal = self.literal = self._get_literals() + + self.prompt_app = None + self.es_executor = None + self.always_use_pager = always_use_pager + self.use_aws_authentication = use_aws_authentication + self.keywords_list = literal["keywords"] + self.functions_list = literal["functions"] + self.syntax_style = config["main"]["syntax_style"] + self.cli_style = config["colors"] + self.table_format = config["main"]["table_format"] + self.multiline_continuation_char = config["main"]["multiline_continuation_char"] + self.multi_line = config["main"].as_bool("multi_line") + self.multiline_mode = config["main"].get("multi_line_mode", "src") + self.null_string = config["main"].get("null_string", "null") + self.style_output = style_factory_output(self.syntax_style, self.cli_style) + + def build_cli(self): + # TODO: Optimize index suggestion to serve indices options only at the needed position, such as 'from' + indices_list = self.es_executor.indices_list + sql_completer = WordCompleter(self.keywords_list + self.functions_list + indices_list, ignore_case=True) + + # https://stackoverflow.com/a/13726418 denote multiple unused arguments of callback in Python + def get_continuation(width, *_): + continuation = self.multiline_continuation_char * (width - 1) + " " + return [("class:continuation", continuation)] + + prompt_app = PromptSession( + lexer=PygmentsLexer(SqlLexer), + completer=sql_completer, + complete_while_typing=True, + # TODO: add history, refer to pgcli approach + # history=history, + style=style_factory(self.syntax_style, self.cli_style), + prompt_continuation=get_continuation, + multiline=es_is_multiline(self), + auto_suggest=AutoSuggestFromHistory(), + input_processors=[ + ConditionalProcessor( + processor=HighlightMatchingBracketProcessor(chars="[](){}"), + filter=HasFocus(DEFAULT_BUFFER) & ~IsDone(), + ) + ], + tempfile_suffix=".sql", + ) + + return prompt_app + + def run_cli(self): + """ + Print welcome page, goodbye message. + + Run the CLI and keep listening to user's input. + """ + self.prompt_app = self.build_cli() + + settings = OutputSettings( + max_width=self.prompt_app.output.get_size().columns, + style_output=self.style_output, + table_format=self.table_format, + missingval=self.null_string, + ) + + # print Banner + banner = pyfiglet.figlet_format("Open Distro", font="slant") + print(banner) + + # print info on the welcome page + print("Server: Open Distro for ES %s" % self.es_executor.es_version) + print("CLI Version: %s" % __version__) + print("Endpoint: %s" % self.es_executor.endpoint) + + while True: + try: + text = self.prompt_app.prompt(message="odfesql> ") + except KeyboardInterrupt: + continue # Control-C pressed. Try again. + except EOFError: + break # Control-D pressed. + + try: + output = self.es_executor.execute_query(text) + if output: + formatter = Formatter(settings) + formatted_output = formatter.format_output(output) + self.echo_via_pager("\n".join(formatted_output)) + + except Exception as e: + print(repr(e)) + + print("See you next search!") + + def is_too_wide(self, line): + """Will this line be too wide to fit into terminal?""" + if not self.prompt_app: + return False + return len(COLOR_CODE_REGEX.sub("", line)) > self.prompt_app.output.get_size().columns + + def is_too_tall(self, lines): + """Are there too many lines to fit into terminal?""" + if not self.prompt_app: + return False + return len(lines) >= (self.prompt_app.output.get_size().rows - 4) + + def echo_via_pager(self, text, color=None): + lines = text.split("\n") + if self.always_use_pager: + click.echo_via_pager(text, color=color) + + elif self.is_too_tall(lines) or any(self.is_too_wide(l) for l in lines): + click.echo_via_pager(text, color=color) + else: + click.echo(text, color=color) + + def connect(self, endpoint, http_auth=None): + self.es_executor = ESConnection(endpoint, http_auth, self.use_aws_authentication) + self.es_executor.set_connection() + + def _get_literals(self): + """Parse "esliterals.json" with literal type of SQL "keywords" and "functions", which + are SQL keywords and functions supported by Open Distro SQL Plugin. + + :return: a dict that is parsed from esliterals.json + """ + from .esliterals import __file__ as package_root + + package_root = os.path.dirname(package_root) + + literal_file = os.path.join(package_root, "esliterals.json") + with open(literal_file) as f: + literals = json.load(f) + return literals diff --git a/sql-cli/src/odfe_sql_cli/utils.py b/sql-cli/src/odfe_sql_cli/utils.py new file mode 100644 index 0000000000..e3fe2bd3f5 --- /dev/null +++ b/sql-cli/src/odfe_sql_cli/utils.py @@ -0,0 +1,21 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import sys + +from collections import namedtuple + +OutputSettings = namedtuple("OutputSettings", "table_format is_vertical max_width style_output missingval") + +OutputSettings.__new__.__defaults__ = (None, False, sys.maxsize, None, "null") diff --git a/sql-cli/tests/__init__.py b/sql-cli/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sql-cli/tests/conftest.py b/sql-cli/tests/conftest.py new file mode 100644 index 0000000000..9d60f35ce1 --- /dev/null +++ b/sql-cli/tests/conftest.py @@ -0,0 +1,49 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" + +""" +We can define the fixture functions in this file to make them +accessible across multiple test modules. +""" +import os +import pytest + +from .utils import create_index, delete_index, get_connection + + +@pytest.fixture(scope="function") +def connection(): + test_connection = get_connection() + create_index(test_connection) + + yield test_connection + delete_index(test_connection) + + +@pytest.fixture(scope="function") +def default_config_location(): + from src.odfe_sql_cli.conf import __file__ as package_root + + package_root = os.path.dirname(package_root) + default_config = os.path.join(package_root, "clirc") + + yield default_config + + +@pytest.fixture(scope="session", autouse=True) +def temp_config(tmpdir_factory): + # this function runs on start of test session. + # use temporary directory for conf home so user conf will not be used + os.environ["XDG_CONFIG_HOME"] = str(tmpdir_factory.mktemp("data")) diff --git a/sql-cli/tests/pytest.ini b/sql-cli/tests/pytest.ini new file mode 100644 index 0000000000..f78774051e --- /dev/null +++ b/sql-cli/tests/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +addopts=--capture=sys --showlocals \ No newline at end of file diff --git a/sql-cli/tests/test_config.py b/sql-cli/tests/test_config.py new file mode 100644 index 0000000000..d94ec91eaf --- /dev/null +++ b/sql-cli/tests/test_config.py @@ -0,0 +1,42 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import os +import stat +import pytest + +from src.odfe_sql_cli.config import ensure_dir_exists + + +class TestConfig: + def test_ensure_file_parent(self, tmpdir): + subdir = tmpdir.join("subdir") + rcfile = subdir.join("rcfile") + ensure_dir_exists(str(rcfile)) + + def test_ensure_existing_dir(self, tmpdir): + rcfile = str(tmpdir.mkdir("subdir").join("rcfile")) + + # should just not raise + ensure_dir_exists(rcfile) + + def test_ensure_other_create_error(self, tmpdir): + subdir = tmpdir.join("subdir") + rcfile = subdir.join("rcfile") + + # trigger an oserror that isn't "directory already exists" + os.chmod(str(tmpdir), stat.S_IREAD) + + with pytest.raises(OSError): + ensure_dir_exists(str(rcfile)) diff --git a/sql-cli/tests/test_data/accounts.json b/sql-cli/tests/test_data/accounts.json new file mode 100644 index 0000000000..22254e3cd6 --- /dev/null +++ b/sql-cli/tests/test_data/accounts.json @@ -0,0 +1,1000 @@ +{"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} +{"account_number":6,"balance":5686,"firstname":"Hattie","lastname":"Bond","age":36,"gender":"M","address":"671 Bristol Street","employer":"Netagy","email":"hattiebond@netagy.com","city":"Dante","state":"TN"} +{"account_number":13,"balance":32838,"firstname":"Nanette","lastname":"Bates","age":28,"gender":"F","address":"789 Madison Street","employer":"Quility","email":"nanettebates@quility.com","city":"Nogal","state":"VA"} +{"account_number":18,"balance":4180,"firstname":"Dale","lastname":"Adams","age":33,"gender":"M","address":"467 Hutchinson Court","employer":"Boink","email":"daleadams@boink.com","city":"Orick","state":"MD"} +{"account_number":20,"balance":16418,"firstname":"Elinor","lastname":"Ratliff","age":36,"gender":"M","address":"282 Kings Place","employer":"Scentric","email":"elinorratliff@scentric.com","city":"Ribera","state":"WA"} +{"account_number":25,"balance":40540,"firstname":"Virginia","lastname":"Ayala","age":39,"gender":"F","address":"171 Putnam Avenue","employer":"Filodyne","email":"virginiaayala@filodyne.com","city":"Nicholson","state":"PA"} +{"account_number":32,"balance":48086,"firstname":"Dillard","lastname":"Mcpherson","age":34,"gender":"F","address":"702 Quentin Street","employer":"Quailcom","email":"dillardmcpherson@quailcom.com","city":"Veguita","state":"IN"} +{"account_number":37,"balance":18612,"firstname":"Mcgee","lastname":"Mooney","age":39,"gender":"M","address":"826 Fillmore Place","employer":"Reversus","email":"mcgeemooney@reversus.com","city":"Tooleville","state":"OK"} +{"account_number":44,"balance":34487,"firstname":"Aurelia","lastname":"Harding","age":37,"gender":"M","address":"502 Baycliff Terrace","employer":"Orbalix","email":"aureliaharding@orbalix.com","city":"Yardville","state":"DE"} +{"account_number":49,"balance":29104,"firstname":"Fulton","lastname":"Holt","age":23,"gender":"F","address":"451 Humboldt Street","employer":"Anocha","email":"fultonholt@anocha.com","city":"Sunriver","state":"RI"} +{"account_number":51,"balance":14097,"firstname":"Burton","lastname":"Meyers","age":31,"gender":"F","address":"334 River Street","employer":"Bezal","email":"burtonmeyers@bezal.com","city":"Jacksonburg","state":"MO"} +{"account_number":56,"balance":14992,"firstname":"Josie","lastname":"Nelson","age":32,"gender":"M","address":"857 Tabor Court","employer":"Emtrac","email":"josienelson@emtrac.com","city":"Sunnyside","state":"UT"} +{"account_number":63,"balance":6077,"firstname":"Hughes","lastname":"Owens","age":30,"gender":"F","address":"510 Sedgwick Street","employer":"Valpreal","email":"hughesowens@valpreal.com","city":"Guilford","state":"KS"} +{"account_number":68,"balance":44214,"firstname":"Hall","lastname":"Key","age":25,"gender":"F","address":"927 Bay Parkway","employer":"Eventex","email":"hallkey@eventex.com","city":"Shawmut","state":"CA"} +{"account_number":70,"balance":38172,"firstname":"Deidre","lastname":"Thompson","age":33,"gender":"F","address":"685 School Lane","employer":"Netplode","email":"deidrethompson@netplode.com","city":"Chestnut","state":"GA"} +{"account_number":75,"balance":40500,"firstname":"Sandoval","lastname":"Kramer","age":22,"gender":"F","address":"166 Irvington Place","employer":"Overfork","email":"sandovalkramer@overfork.com","city":"Limestone","state":"NH"} +{"account_number":82,"balance":41412,"firstname":"Concetta","lastname":"Barnes","age":39,"gender":"F","address":"195 Bayview Place","employer":"Fitcore","email":"concettabarnes@fitcore.com","city":"Summerfield","state":"NC"} +{"account_number":87,"balance":1133,"firstname":"Hewitt","lastname":"Kidd","age":22,"gender":"M","address":"446 Halleck Street","employer":"Isologics","email":"hewittkidd@isologics.com","city":"Coalmont","state":"ME"} +{"account_number":94,"balance":41060,"firstname":"Brittany","lastname":"Cabrera","age":30,"gender":"F","address":"183 Kathleen Court","employer":"Mixers","email":"brittanycabrera@mixers.com","city":"Cornucopia","state":"AZ"} +{"account_number":99,"balance":47159,"firstname":"Ratliff","lastname":"Heath","age":39,"gender":"F","address":"806 Rockwell Place","employer":"Zappix","email":"ratliffheath@zappix.com","city":"Shaft","state":"ND"} +{"account_number":102,"balance":29712,"firstname":"Dena","lastname":"Olson","age":27,"gender":"F","address":"759 Newkirk Avenue","employer":"Hinway","email":"denaolson@hinway.com","city":"Choctaw","state":"NJ"} +{"account_number":107,"balance":48844,"firstname":"Randi","lastname":"Rich","age":28,"gender":"M","address":"694 Jefferson Street","employer":"Netplax","email":"randirich@netplax.com","city":"Bellfountain","state":"SC"} +{"account_number":114,"balance":43045,"firstname":"Josephine","lastname":"Joseph","age":31,"gender":"F","address":"451 Oriental Court","employer":"Turnabout","email":"josephinejoseph@turnabout.com","city":"Sedley","state":"AL"} +{"account_number":119,"balance":49222,"firstname":"Laverne","lastname":"Johnson","age":28,"gender":"F","address":"302 Howard Place","employer":"Senmei","email":"lavernejohnson@senmei.com","city":"Herlong","state":"DC"} +{"account_number":121,"balance":19594,"firstname":"Acevedo","lastname":"Dorsey","age":32,"gender":"M","address":"479 Nova Court","employer":"Netropic","email":"acevedodorsey@netropic.com","city":"Islandia","state":"CT"} +{"account_number":126,"balance":3607,"firstname":"Effie","lastname":"Gates","age":39,"gender":"F","address":"620 National Drive","employer":"Digitalus","email":"effiegates@digitalus.com","city":"Blodgett","state":"MD"} +{"account_number":133,"balance":26135,"firstname":"Deena","lastname":"Richmond","age":36,"gender":"F","address":"646 Underhill Avenue","employer":"Sunclipse","email":"deenarichmond@sunclipse.com","city":"Austinburg","state":"SC"} +{"account_number":138,"balance":9006,"firstname":"Daniel","lastname":"Arnold","age":39,"gender":"F","address":"422 Malbone Street","employer":"Ecstasia","email":"danielarnold@ecstasia.com","city":"Gardiner","state":"MO"} +{"account_number":140,"balance":26696,"firstname":"Cotton","lastname":"Christensen","age":32,"gender":"M","address":"878 Schermerhorn Street","employer":"Prowaste","email":"cottonchristensen@prowaste.com","city":"Mayfair","state":"LA"} +{"account_number":145,"balance":47406,"firstname":"Rowena","lastname":"Wilkinson","age":32,"gender":"M","address":"891 Elton Street","employer":"Asimiline","email":"rowenawilkinson@asimiline.com","city":"Ripley","state":"NH"} +{"account_number":152,"balance":8088,"firstname":"Wolfe","lastname":"Rocha","age":21,"gender":"M","address":"457 Guernsey Street","employer":"Hivedom","email":"wolferocha@hivedom.com","city":"Adelino","state":"MS"} +{"account_number":157,"balance":39868,"firstname":"Claudia","lastname":"Terry","age":20,"gender":"F","address":"132 Gunnison Court","employer":"Lumbrex","email":"claudiaterry@lumbrex.com","city":"Castleton","state":"MD"} +{"account_number":164,"balance":9101,"firstname":"Cummings","lastname":"Little","age":26,"gender":"F","address":"308 Schaefer Street","employer":"Comtrak","email":"cummingslittle@comtrak.com","city":"Chaparrito","state":"WI"} +{"account_number":169,"balance":45953,"firstname":"Hollie","lastname":"Osborn","age":34,"gender":"M","address":"671 Seaview Court","employer":"Musaphics","email":"hollieosborn@musaphics.com","city":"Hanover","state":"GA"} +{"account_number":171,"balance":7091,"firstname":"Nelda","lastname":"Hopper","age":39,"gender":"M","address":"742 Prospect Place","employer":"Equicom","email":"neldahopper@equicom.com","city":"Finderne","state":"SC"} +{"account_number":176,"balance":18607,"firstname":"Kemp","lastname":"Walters","age":28,"gender":"F","address":"906 Howard Avenue","employer":"Eyewax","email":"kempwalters@eyewax.com","city":"Why","state":"KY"} +{"account_number":183,"balance":14223,"firstname":"Hudson","lastname":"English","age":26,"gender":"F","address":"823 Herkimer Place","employer":"Xinware","email":"hudsonenglish@xinware.com","city":"Robbins","state":"ND"} +{"account_number":188,"balance":41504,"firstname":"Tia","lastname":"Miranda","age":24,"gender":"F","address":"583 Ainslie Street","employer":"Jasper","email":"tiamiranda@jasper.com","city":"Summerset","state":"UT"} +{"account_number":190,"balance":3150,"firstname":"Blake","lastname":"Davidson","age":30,"gender":"F","address":"636 Diamond Street","employer":"Quantasis","email":"blakedavidson@quantasis.com","city":"Crumpler","state":"KY"} +{"account_number":195,"balance":5025,"firstname":"Kaye","lastname":"Gibson","age":31,"gender":"M","address":"955 Hopkins Street","employer":"Zork","email":"kayegibson@zork.com","city":"Ola","state":"WY"} +{"account_number":203,"balance":21890,"firstname":"Eve","lastname":"Wyatt","age":33,"gender":"M","address":"435 Furman Street","employer":"Assitia","email":"evewyatt@assitia.com","city":"Jamestown","state":"MN"} +{"account_number":208,"balance":40760,"firstname":"Garcia","lastname":"Hess","age":26,"gender":"F","address":"810 Nostrand Avenue","employer":"Quiltigen","email":"garciahess@quiltigen.com","city":"Brooktrails","state":"GA"} +{"account_number":210,"balance":33946,"firstname":"Cherry","lastname":"Carey","age":24,"gender":"M","address":"539 Tiffany Place","employer":"Martgo","email":"cherrycarey@martgo.com","city":"Fairacres","state":"AK"} +{"account_number":215,"balance":37427,"firstname":"Copeland","lastname":"Solomon","age":20,"gender":"M","address":"741 McDonald Avenue","employer":"Recognia","email":"copelandsolomon@recognia.com","city":"Edmund","state":"ME"} +{"account_number":222,"balance":14764,"firstname":"Rachelle","lastname":"Rice","age":36,"gender":"M","address":"333 Narrows Avenue","employer":"Enaut","email":"rachellerice@enaut.com","city":"Wright","state":"AZ"} +{"account_number":227,"balance":19780,"firstname":"Coleman","lastname":"Berg","age":22,"gender":"M","address":"776 Little Street","employer":"Exoteric","email":"colemanberg@exoteric.com","city":"Eagleville","state":"WV"} +{"account_number":234,"balance":44207,"firstname":"Betty","lastname":"Hall","age":37,"gender":"F","address":"709 Garfield Place","employer":"Miraclis","email":"bettyhall@miraclis.com","city":"Bendon","state":"NY"} +{"account_number":239,"balance":25719,"firstname":"Chang","lastname":"Boyer","age":36,"gender":"M","address":"895 Brigham Street","employer":"Qaboos","email":"changboyer@qaboos.com","city":"Belgreen","state":"NH"} +{"account_number":241,"balance":25379,"firstname":"Schroeder","lastname":"Harrington","age":26,"gender":"M","address":"610 Tapscott Avenue","employer":"Otherway","email":"schroederharrington@otherway.com","city":"Ebro","state":"TX"} +{"account_number":246,"balance":28405,"firstname":"Katheryn","lastname":"Foster","age":21,"gender":"F","address":"259 Kane Street","employer":"Quantalia","email":"katherynfoster@quantalia.com","city":"Bath","state":"TX"} +{"account_number":253,"balance":20240,"firstname":"Melissa","lastname":"Gould","age":31,"gender":"M","address":"440 Fuller Place","employer":"Buzzopia","email":"melissagould@buzzopia.com","city":"Lumberton","state":"MD"} +{"account_number":258,"balance":5712,"firstname":"Lindsey","lastname":"Hawkins","age":37,"gender":"M","address":"706 Frost Street","employer":"Enormo","email":"lindseyhawkins@enormo.com","city":"Gardners","state":"AK"} +{"account_number":260,"balance":2726,"firstname":"Kari","lastname":"Skinner","age":30,"gender":"F","address":"735 Losee Terrace","employer":"Singavera","email":"kariskinner@singavera.com","city":"Rushford","state":"WV"} +{"account_number":265,"balance":46910,"firstname":"Marion","lastname":"Schneider","age":26,"gender":"F","address":"574 Everett Avenue","employer":"Evidends","email":"marionschneider@evidends.com","city":"Maplewood","state":"WY"} +{"account_number":272,"balance":19253,"firstname":"Lilly","lastname":"Morgan","age":25,"gender":"F","address":"689 Fleet Street","employer":"Biolive","email":"lillymorgan@biolive.com","city":"Sunbury","state":"OH"} +{"account_number":277,"balance":29564,"firstname":"Romero","lastname":"Lott","age":31,"gender":"M","address":"456 Danforth Street","employer":"Plasto","email":"romerolott@plasto.com","city":"Vincent","state":"VT"} +{"account_number":284,"balance":22806,"firstname":"Randolph","lastname":"Banks","age":29,"gender":"M","address":"875 Hamilton Avenue","employer":"Caxt","email":"randolphbanks@caxt.com","city":"Crawfordsville","state":"WA"} +{"account_number":289,"balance":7798,"firstname":"Blair","lastname":"Church","age":29,"gender":"M","address":"370 Sutton Street","employer":"Cubix","email":"blairchurch@cubix.com","city":"Nile","state":"NH"} +{"account_number":291,"balance":19955,"firstname":"Lynn","lastname":"Pollard","age":40,"gender":"F","address":"685 Pierrepont Street","employer":"Slambda","email":"lynnpollard@slambda.com","city":"Mappsville","state":"ID"} +{"account_number":296,"balance":24606,"firstname":"Rosa","lastname":"Oliver","age":34,"gender":"M","address":"168 Woodbine Street","employer":"Idetica","email":"rosaoliver@idetica.com","city":"Robinson","state":"WY"} +{"account_number":304,"balance":28647,"firstname":"Palmer","lastname":"Clark","age":35,"gender":"M","address":"866 Boulevard Court","employer":"Maximind","email":"palmerclark@maximind.com","city":"Avalon","state":"NH"} +{"account_number":309,"balance":3830,"firstname":"Rosemarie","lastname":"Nieves","age":30,"gender":"M","address":"206 Alice Court","employer":"Zounds","email":"rosemarienieves@zounds.com","city":"Ferney","state":"AR"} +{"account_number":311,"balance":13388,"firstname":"Vinson","lastname":"Ballard","age":23,"gender":"F","address":"960 Glendale Court","employer":"Gynk","email":"vinsonballard@gynk.com","city":"Fairforest","state":"WY"} +{"account_number":316,"balance":8214,"firstname":"Anita","lastname":"Ewing","age":32,"gender":"M","address":"396 Lombardy Street","employer":"Panzent","email":"anitaewing@panzent.com","city":"Neahkahnie","state":"WY"} +{"account_number":323,"balance":42230,"firstname":"Chelsea","lastname":"Gamble","age":34,"gender":"F","address":"356 Dare Court","employer":"Isosphere","email":"chelseagamble@isosphere.com","city":"Dundee","state":"MD"} +{"account_number":328,"balance":12523,"firstname":"Good","lastname":"Campbell","age":27,"gender":"F","address":"438 Hicks Street","employer":"Gracker","email":"goodcampbell@gracker.com","city":"Marion","state":"CA"} +{"account_number":330,"balance":41620,"firstname":"Yvette","lastname":"Browning","age":34,"gender":"F","address":"431 Beekman Place","employer":"Marketoid","email":"yvettebrowning@marketoid.com","city":"Talpa","state":"CO"} +{"account_number":335,"balance":35433,"firstname":"Vera","lastname":"Hansen","age":24,"gender":"M","address":"252 Bushwick Avenue","employer":"Zanilla","email":"verahansen@zanilla.com","city":"Manila","state":"TN"} +{"account_number":342,"balance":33670,"firstname":"Vivian","lastname":"Wells","age":36,"gender":"M","address":"570 Cobek Court","employer":"Nutralab","email":"vivianwells@nutralab.com","city":"Fontanelle","state":"OK"} +{"account_number":347,"balance":36038,"firstname":"Gould","lastname":"Carson","age":24,"gender":"F","address":"784 Pulaski Street","employer":"Mobildata","email":"gouldcarson@mobildata.com","city":"Goochland","state":"MI"} +{"account_number":354,"balance":21294,"firstname":"Kidd","lastname":"Mclean","age":22,"gender":"M","address":"691 Saratoga Avenue","employer":"Ronbert","email":"kiddmclean@ronbert.com","city":"Tioga","state":"ME"} +{"account_number":359,"balance":29927,"firstname":"Vanessa","lastname":"Harvey","age":28,"gender":"F","address":"679 Rutledge Street","employer":"Zentime","email":"vanessaharvey@zentime.com","city":"Williston","state":"IL"} +{"account_number":361,"balance":23659,"firstname":"Noreen","lastname":"Shelton","age":36,"gender":"M","address":"702 Tillary Street","employer":"Medmex","email":"noreenshelton@medmex.com","city":"Derwood","state":"NH"} +{"account_number":366,"balance":42368,"firstname":"Lydia","lastname":"Cooke","age":31,"gender":"M","address":"470 Coleman Street","employer":"Comstar","email":"lydiacooke@comstar.com","city":"Datil","state":"TN"} +{"account_number":373,"balance":9671,"firstname":"Simpson","lastname":"Carpenter","age":21,"gender":"M","address":"837 Horace Court","employer":"Snips","email":"simpsoncarpenter@snips.com","city":"Tolu","state":"MA"} +{"account_number":378,"balance":27100,"firstname":"Watson","lastname":"Simpson","age":36,"gender":"F","address":"644 Thomas Street","employer":"Wrapture","email":"watsonsimpson@wrapture.com","city":"Keller","state":"TX"} +{"account_number":380,"balance":35628,"firstname":"Fernandez","lastname":"Reid","age":33,"gender":"F","address":"154 Melba Court","employer":"Cosmosis","email":"fernandezreid@cosmosis.com","city":"Boyd","state":"NE"} +{"account_number":385,"balance":11022,"firstname":"Rosalinda","lastname":"Valencia","age":22,"gender":"M","address":"933 Lloyd Street","employer":"Zoarere","email":"rosalindavalencia@zoarere.com","city":"Waverly","state":"GA"} +{"account_number":392,"balance":31613,"firstname":"Dotson","lastname":"Dean","age":35,"gender":"M","address":"136 Ford Street","employer":"Petigems","email":"dotsondean@petigems.com","city":"Chical","state":"SD"} +{"account_number":397,"balance":37418,"firstname":"Leonard","lastname":"Gray","age":36,"gender":"F","address":"840 Morgan Avenue","employer":"Recritube","email":"leonardgray@recritube.com","city":"Edenburg","state":"AL"} +{"account_number":400,"balance":20685,"firstname":"Kane","lastname":"King","age":21,"gender":"F","address":"405 Cornelia Street","employer":"Tri@Tribalog","email":"kaneking@tri@tribalog.com","city":"Gulf","state":"VT"} +{"account_number":405,"balance":5679,"firstname":"Strickland","lastname":"Fuller","age":26,"gender":"M","address":"990 Concord Street","employer":"Digique","email":"stricklandfuller@digique.com","city":"Southmont","state":"NV"} +{"account_number":412,"balance":27436,"firstname":"Ilene","lastname":"Abbott","age":26,"gender":"M","address":"846 Vine Street","employer":"Typhonica","email":"ileneabbott@typhonica.com","city":"Cedarville","state":"VT"} +{"account_number":417,"balance":1788,"firstname":"Wheeler","lastname":"Ayers","age":35,"gender":"F","address":"677 Hope Street","employer":"Fortean","email":"wheelerayers@fortean.com","city":"Ironton","state":"PA"} +{"account_number":424,"balance":36818,"firstname":"Tracie","lastname":"Gregory","age":34,"gender":"M","address":"112 Hunterfly Place","employer":"Comstruct","email":"traciegregory@comstruct.com","city":"Onton","state":"TN"} +{"account_number":429,"balance":46970,"firstname":"Cantu","lastname":"Lindsey","age":31,"gender":"M","address":"404 Willoughby Avenue","employer":"Inquala","email":"cantulindsey@inquala.com","city":"Cowiche","state":"IA"} +{"account_number":431,"balance":13136,"firstname":"Laurie","lastname":"Shaw","age":26,"gender":"F","address":"263 Aviation Road","employer":"Zillanet","email":"laurieshaw@zillanet.com","city":"Harmon","state":"WV"} +{"account_number":436,"balance":27585,"firstname":"Alexander","lastname":"Sargent","age":23,"gender":"M","address":"363 Albemarle Road","employer":"Fangold","email":"alexandersargent@fangold.com","city":"Calpine","state":"OR"} +{"account_number":443,"balance":7588,"firstname":"Huff","lastname":"Thomas","age":23,"gender":"M","address":"538 Erskine Loop","employer":"Accufarm","email":"huffthomas@accufarm.com","city":"Corinne","state":"AL"} +{"account_number":448,"balance":22776,"firstname":"Adriana","lastname":"Mcfadden","age":35,"gender":"F","address":"984 Woodside Avenue","employer":"Telequiet","email":"adrianamcfadden@telequiet.com","city":"Darrtown","state":"WI"} +{"account_number":450,"balance":2643,"firstname":"Bradford","lastname":"Nielsen","age":25,"gender":"M","address":"487 Keen Court","employer":"Exovent","email":"bradfordnielsen@exovent.com","city":"Hamilton","state":"DE"} +{"account_number":455,"balance":39556,"firstname":"Lynn","lastname":"Tran","age":36,"gender":"M","address":"741 Richmond Street","employer":"Optyk","email":"lynntran@optyk.com","city":"Clinton","state":"WV"} +{"account_number":462,"balance":10871,"firstname":"Calderon","lastname":"Day","age":27,"gender":"M","address":"810 Milford Street","employer":"Cofine","email":"calderonday@cofine.com","city":"Kula","state":"OK"} +{"account_number":467,"balance":6312,"firstname":"Angelica","lastname":"May","age":32,"gender":"F","address":"384 Karweg Place","employer":"Keeg","email":"angelicamay@keeg.com","city":"Tetherow","state":"IA"} +{"account_number":474,"balance":35896,"firstname":"Obrien","lastname":"Walton","age":40,"gender":"F","address":"192 Ide Court","employer":"Suremax","email":"obrienwalton@suremax.com","city":"Crucible","state":"UT"} +{"account_number":479,"balance":31865,"firstname":"Cameron","lastname":"Ross","age":40,"gender":"M","address":"904 Bouck Court","employer":"Telpod","email":"cameronross@telpod.com","city":"Nord","state":"MO"} +{"account_number":481,"balance":20024,"firstname":"Lina","lastname":"Stanley","age":33,"gender":"M","address":"361 Hanover Place","employer":"Strozen","email":"linastanley@strozen.com","city":"Wyoming","state":"NC"} +{"account_number":486,"balance":35902,"firstname":"Dixie","lastname":"Fuentes","age":22,"gender":"F","address":"991 Applegate Court","employer":"Portico","email":"dixiefuentes@portico.com","city":"Salix","state":"VA"} +{"account_number":493,"balance":5871,"firstname":"Campbell","lastname":"Best","age":24,"gender":"M","address":"297 Friel Place","employer":"Fanfare","email":"campbellbest@fanfare.com","city":"Kidder","state":"GA"} +{"account_number":498,"balance":10516,"firstname":"Stella","lastname":"Hinton","age":39,"gender":"F","address":"649 Columbia Place","employer":"Flyboyz","email":"stellahinton@flyboyz.com","city":"Crenshaw","state":"SC"} +{"account_number":501,"balance":16572,"firstname":"Kelley","lastname":"Ochoa","age":36,"gender":"M","address":"451 Clifton Place","employer":"Bluplanet","email":"kelleyochoa@bluplanet.com","city":"Gouglersville","state":"CT"} +{"account_number":506,"balance":43440,"firstname":"Davidson","lastname":"Salas","age":28,"gender":"M","address":"731 Cleveland Street","employer":"Sequitur","email":"davidsonsalas@sequitur.com","city":"Lloyd","state":"ME"} +{"account_number":513,"balance":30040,"firstname":"Maryellen","lastname":"Rose","age":37,"gender":"F","address":"428 Durland Place","employer":"Waterbaby","email":"maryellenrose@waterbaby.com","city":"Kiskimere","state":"RI"} +{"account_number":518,"balance":48954,"firstname":"Finch","lastname":"Curtis","age":29,"gender":"F","address":"137 Ryder Street","employer":"Viagrand","email":"finchcurtis@viagrand.com","city":"Riverton","state":"MO"} +{"account_number":520,"balance":27987,"firstname":"Brandy","lastname":"Calhoun","age":32,"gender":"M","address":"818 Harden Street","employer":"Maxemia","email":"brandycalhoun@maxemia.com","city":"Sidman","state":"OR"} +{"account_number":525,"balance":23545,"firstname":"Holly","lastname":"Miles","age":25,"gender":"M","address":"746 Ludlam Place","employer":"Xurban","email":"hollymiles@xurban.com","city":"Harold","state":"AR"} +{"account_number":532,"balance":17207,"firstname":"Hardin","lastname":"Kirk","age":26,"gender":"M","address":"268 Canarsie Road","employer":"Exposa","email":"hardinkirk@exposa.com","city":"Stouchsburg","state":"IL"} +{"account_number":537,"balance":31069,"firstname":"Morin","lastname":"Frost","age":29,"gender":"M","address":"910 Lake Street","employer":"Primordia","email":"morinfrost@primordia.com","city":"Rivera","state":"DE"} +{"account_number":544,"balance":41735,"firstname":"Short","lastname":"Dennis","age":21,"gender":"F","address":"908 Glen Street","employer":"Minga","email":"shortdennis@minga.com","city":"Dale","state":"KY"} +{"account_number":549,"balance":1932,"firstname":"Jacqueline","lastname":"Maxwell","age":40,"gender":"M","address":"444 Schenck Place","employer":"Fuelworks","email":"jacquelinemaxwell@fuelworks.com","city":"Oretta","state":"OR"} +{"account_number":551,"balance":21732,"firstname":"Milagros","lastname":"Travis","age":27,"gender":"F","address":"380 Murdock Court","employer":"Sloganaut","email":"milagrostravis@sloganaut.com","city":"Homeland","state":"AR"} +{"account_number":556,"balance":36420,"firstname":"Collier","lastname":"Odonnell","age":35,"gender":"M","address":"591 Nolans Lane","employer":"Sultraxin","email":"collierodonnell@sultraxin.com","city":"Fulford","state":"MD"} +{"account_number":563,"balance":43403,"firstname":"Morgan","lastname":"Torres","age":30,"gender":"F","address":"672 Belvidere Street","employer":"Quonata","email":"morgantorres@quonata.com","city":"Hollymead","state":"KY"} +{"account_number":568,"balance":36628,"firstname":"Lesa","lastname":"Maynard","age":29,"gender":"F","address":"295 Whitty Lane","employer":"Coash","email":"lesamaynard@coash.com","city":"Broadlands","state":"VT"} +{"account_number":570,"balance":26751,"firstname":"Church","lastname":"Mercado","age":24,"gender":"F","address":"892 Wyckoff Street","employer":"Xymonk","email":"churchmercado@xymonk.com","city":"Gloucester","state":"KY"} +{"account_number":575,"balance":12588,"firstname":"Buchanan","lastname":"Pope","age":39,"gender":"M","address":"581 Sumner Place","employer":"Stucco","email":"buchananpope@stucco.com","city":"Ellerslie","state":"MD"} +{"account_number":582,"balance":33371,"firstname":"Manning","lastname":"Guthrie","age":24,"gender":"F","address":"271 Jodie Court","employer":"Xerex","email":"manningguthrie@xerex.com","city":"Breinigsville","state":"NM"} +{"account_number":587,"balance":3468,"firstname":"Carly","lastname":"Johns","age":33,"gender":"M","address":"390 Noll Street","employer":"Gallaxia","email":"carlyjohns@gallaxia.com","city":"Emison","state":"DC"} +{"account_number":594,"balance":28194,"firstname":"Golden","lastname":"Donovan","age":26,"gender":"M","address":"199 Jewel Street","employer":"Organica","email":"goldendonovan@organica.com","city":"Macdona","state":"RI"} +{"account_number":599,"balance":11944,"firstname":"Joanna","lastname":"Jennings","age":36,"gender":"F","address":"318 Irving Street","employer":"Extremo","email":"joannajennings@extremo.com","city":"Bartley","state":"MI"} +{"account_number":602,"balance":38699,"firstname":"Mcgowan","lastname":"Mcclain","age":33,"gender":"M","address":"361 Stoddard Place","employer":"Oatfarm","email":"mcgowanmcclain@oatfarm.com","city":"Kapowsin","state":"MI"} +{"account_number":607,"balance":38350,"firstname":"White","lastname":"Small","age":38,"gender":"F","address":"736 Judge Street","employer":"Immunics","email":"whitesmall@immunics.com","city":"Fairfield","state":"HI"} +{"account_number":614,"balance":13157,"firstname":"Salazar","lastname":"Howard","age":35,"gender":"F","address":"847 Imlay Street","employer":"Retrack","email":"salazarhoward@retrack.com","city":"Grill","state":"FL"} +{"account_number":619,"balance":48755,"firstname":"Grimes","lastname":"Reynolds","age":36,"gender":"M","address":"378 Denton Place","employer":"Frenex","email":"grimesreynolds@frenex.com","city":"Murillo","state":"LA"} +{"account_number":621,"balance":35480,"firstname":"Leslie","lastname":"Sloan","age":26,"gender":"F","address":"336 Kansas Place","employer":"Dancity","email":"lesliesloan@dancity.com","city":"Corriganville","state":"AR"} +{"account_number":626,"balance":19498,"firstname":"Ava","lastname":"Richardson","age":31,"gender":"F","address":"666 Nautilus Avenue","employer":"Cinaster","email":"avarichardson@cinaster.com","city":"Sutton","state":"AL"} +{"account_number":633,"balance":35874,"firstname":"Conner","lastname":"Ramos","age":34,"gender":"M","address":"575 Agate Court","employer":"Insource","email":"connerramos@insource.com","city":"Madaket","state":"OK"} +{"account_number":638,"balance":2658,"firstname":"Bridget","lastname":"Gallegos","age":31,"gender":"M","address":"383 Wogan Terrace","employer":"Songlines","email":"bridgetgallegos@songlines.com","city":"Linganore","state":"WA"} +{"account_number":640,"balance":35596,"firstname":"Candace","lastname":"Hancock","age":25,"gender":"M","address":"574 Riverdale Avenue","employer":"Animalia","email":"candacehancock@animalia.com","city":"Blandburg","state":"KY"} +{"account_number":645,"balance":29362,"firstname":"Edwina","lastname":"Hutchinson","age":26,"gender":"F","address":"892 Pacific Street","employer":"Essensia","email":"edwinahutchinson@essensia.com","city":"Dowling","state":"NE"} +{"account_number":652,"balance":17363,"firstname":"Bonner","lastname":"Garner","age":26,"gender":"M","address":"219 Grafton Street","employer":"Utarian","email":"bonnergarner@utarian.com","city":"Vandiver","state":"PA"} +{"account_number":657,"balance":40475,"firstname":"Kathleen","lastname":"Wilder","age":34,"gender":"F","address":"286 Sutter Avenue","employer":"Solgan","email":"kathleenwilder@solgan.com","city":"Graniteville","state":"MI"} +{"account_number":664,"balance":16163,"firstname":"Hart","lastname":"Mccormick","age":40,"gender":"M","address":"144 Guider Avenue","employer":"Dyno","email":"hartmccormick@dyno.com","city":"Carbonville","state":"ID"} +{"account_number":669,"balance":16934,"firstname":"Jewel","lastname":"Estrada","age":28,"gender":"M","address":"896 Meeker Avenue","employer":"Zilla","email":"jewelestrada@zilla.com","city":"Goodville","state":"PA"} +{"account_number":671,"balance":29029,"firstname":"Antoinette","lastname":"Cook","age":34,"gender":"M","address":"375 Cumberland Street","employer":"Harmoney","email":"antoinettecook@harmoney.com","city":"Bergoo","state":"VT"} +{"account_number":676,"balance":23842,"firstname":"Lisa","lastname":"Dudley","age":34,"gender":"M","address":"506 Vanderveer Street","employer":"Tropoli","email":"lisadudley@tropoli.com","city":"Konterra","state":"NY"} +{"account_number":683,"balance":4381,"firstname":"Matilda","lastname":"Berger","age":39,"gender":"M","address":"884 Noble Street","employer":"Fibrodyne","email":"matildaberger@fibrodyne.com","city":"Shepardsville","state":"TN"} +{"account_number":688,"balance":17931,"firstname":"Freeman","lastname":"Zamora","age":22,"gender":"F","address":"114 Herzl Street","employer":"Elemantra","email":"freemanzamora@elemantra.com","city":"Libertytown","state":"NM"} +{"account_number":690,"balance":18127,"firstname":"Russo","lastname":"Swanson","age":35,"gender":"F","address":"256 Roebling Street","employer":"Zaj","email":"russoswanson@zaj.com","city":"Hoagland","state":"MI"} +{"account_number":695,"balance":36800,"firstname":"Gonzales","lastname":"Mcfarland","age":26,"gender":"F","address":"647 Louisa Street","employer":"Songbird","email":"gonzalesmcfarland@songbird.com","city":"Crisman","state":"ID"} +{"account_number":703,"balance":27443,"firstname":"Dona","lastname":"Burton","age":29,"gender":"M","address":"489 Flatlands Avenue","employer":"Cytrex","email":"donaburton@cytrex.com","city":"Reno","state":"VA"} +{"account_number":708,"balance":34002,"firstname":"May","lastname":"Ortiz","age":28,"gender":"F","address":"244 Chauncey Street","employer":"Syntac","email":"mayortiz@syntac.com","city":"Munjor","state":"ID"} +{"account_number":710,"balance":33650,"firstname":"Shelton","lastname":"Stark","age":37,"gender":"M","address":"404 Ovington Avenue","employer":"Kraggle","email":"sheltonstark@kraggle.com","city":"Ogema","state":"TN"} +{"account_number":715,"balance":23734,"firstname":"Tammi","lastname":"Hodge","age":24,"gender":"M","address":"865 Church Lane","employer":"Netur","email":"tammihodge@netur.com","city":"Lacomb","state":"KS"} +{"account_number":722,"balance":27256,"firstname":"Roberts","lastname":"Beasley","age":34,"gender":"F","address":"305 Kings Hwy","employer":"Quintity","email":"robertsbeasley@quintity.com","city":"Hayden","state":"PA"} +{"account_number":727,"balance":27263,"firstname":"Natasha","lastname":"Knapp","age":36,"gender":"M","address":"723 Hubbard Street","employer":"Exostream","email":"natashaknapp@exostream.com","city":"Trexlertown","state":"LA"} +{"account_number":734,"balance":20325,"firstname":"Keri","lastname":"Kinney","age":23,"gender":"M","address":"490 Balfour Place","employer":"Retrotex","email":"kerikinney@retrotex.com","city":"Salunga","state":"PA"} +{"account_number":739,"balance":39063,"firstname":"Gwen","lastname":"Hardy","age":33,"gender":"F","address":"733 Stuart Street","employer":"Exozent","email":"gwenhardy@exozent.com","city":"Drytown","state":"NY"} +{"account_number":741,"balance":33074,"firstname":"Nielsen","lastname":"Good","age":22,"gender":"M","address":"404 Norfolk Street","employer":"Kiggle","email":"nielsengood@kiggle.com","city":"Cumberland","state":"WA"} +{"account_number":746,"balance":15970,"firstname":"Marguerite","lastname":"Wall","age":28,"gender":"F","address":"364 Crosby Avenue","employer":"Aquoavo","email":"margueritewall@aquoavo.com","city":"Jeff","state":"MI"} +{"account_number":753,"balance":33340,"firstname":"Katina","lastname":"Alford","age":21,"gender":"F","address":"690 Ross Street","employer":"Intrawear","email":"katinaalford@intrawear.com","city":"Grimsley","state":"OK"} +{"account_number":758,"balance":15739,"firstname":"Berta","lastname":"Short","age":28,"gender":"M","address":"149 Surf Avenue","employer":"Ozean","email":"bertashort@ozean.com","city":"Odessa","state":"UT"} +{"account_number":760,"balance":40996,"firstname":"Rhea","lastname":"Blair","age":37,"gender":"F","address":"440 Hubbard Place","employer":"Bicol","email":"rheablair@bicol.com","city":"Stockwell","state":"LA"} +{"account_number":765,"balance":31278,"firstname":"Knowles","lastname":"Cunningham","age":23,"gender":"M","address":"753 Macdougal Street","employer":"Thredz","email":"knowlescunningham@thredz.com","city":"Thomasville","state":"WA"} +{"account_number":772,"balance":37849,"firstname":"Eloise","lastname":"Sparks","age":21,"gender":"M","address":"608 Willow Street","employer":"Satiance","email":"eloisesparks@satiance.com","city":"Richford","state":"NY"} +{"account_number":777,"balance":48294,"firstname":"Adkins","lastname":"Mejia","age":32,"gender":"M","address":"186 Oxford Walk","employer":"Datagen","email":"adkinsmejia@datagen.com","city":"Faywood","state":"OK"} +{"account_number":784,"balance":25291,"firstname":"Mabel","lastname":"Thornton","age":21,"gender":"M","address":"124 Louisiana Avenue","employer":"Zolavo","email":"mabelthornton@zolavo.com","city":"Lynn","state":"AL"} +{"account_number":789,"balance":8760,"firstname":"Cunningham","lastname":"Kerr","age":27,"gender":"F","address":"154 Sharon Street","employer":"Polarium","email":"cunninghamkerr@polarium.com","city":"Tuskahoma","state":"MS"} +{"account_number":791,"balance":48249,"firstname":"Janine","lastname":"Huber","age":38,"gender":"F","address":"348 Porter Avenue","employer":"Viocular","email":"janinehuber@viocular.com","city":"Fivepointville","state":"MA"} +{"account_number":796,"balance":23503,"firstname":"Mona","lastname":"Craft","age":35,"gender":"F","address":"511 Henry Street","employer":"Opticom","email":"monacraft@opticom.com","city":"Websterville","state":"IN"} +{"account_number":804,"balance":23610,"firstname":"Rojas","lastname":"Oneal","age":27,"gender":"M","address":"669 Sandford Street","employer":"Glukgluk","email":"rojasoneal@glukgluk.com","city":"Wheaton","state":"ME"} +{"account_number":809,"balance":47812,"firstname":"Christie","lastname":"Strickland","age":30,"gender":"M","address":"346 Bancroft Place","employer":"Anarco","email":"christiestrickland@anarco.com","city":"Baden","state":"NV"} +{"account_number":811,"balance":26007,"firstname":"Walls","lastname":"Rogers","age":28,"gender":"F","address":"352 Freeman Street","employer":"Geekmosis","email":"wallsrogers@geekmosis.com","city":"Caroleen","state":"NV"} +{"account_number":816,"balance":9567,"firstname":"Cornelia","lastname":"Lane","age":20,"gender":"F","address":"384 Bainbridge Street","employer":"Sulfax","email":"cornelialane@sulfax.com","city":"Elizaville","state":"MS"} +{"account_number":823,"balance":48726,"firstname":"Celia","lastname":"Bernard","age":33,"gender":"F","address":"466 Amboy Street","employer":"Mitroc","email":"celiabernard@mitroc.com","city":"Skyland","state":"GA"} +{"account_number":828,"balance":44890,"firstname":"Blanche","lastname":"Holmes","age":33,"gender":"F","address":"605 Stryker Court","employer":"Motovate","email":"blancheholmes@motovate.com","city":"Loomis","state":"KS"} +{"account_number":830,"balance":45210,"firstname":"Louella","lastname":"Chan","age":23,"gender":"M","address":"511 Heath Place","employer":"Conferia","email":"louellachan@conferia.com","city":"Brookfield","state":"OK"} +{"account_number":835,"balance":46558,"firstname":"Glover","lastname":"Rutledge","age":25,"gender":"F","address":"641 Royce Street","employer":"Ginkogene","email":"gloverrutledge@ginkogene.com","city":"Dixonville","state":"VA"} +{"account_number":842,"balance":49587,"firstname":"Meagan","lastname":"Buckner","age":23,"gender":"F","address":"833 Bushwick Court","employer":"Biospan","email":"meaganbuckner@biospan.com","city":"Craig","state":"TX"} +{"account_number":847,"balance":8652,"firstname":"Antonia","lastname":"Duncan","age":23,"gender":"M","address":"644 Stryker Street","employer":"Talae","email":"antoniaduncan@talae.com","city":"Dawn","state":"MO"} +{"account_number":854,"balance":49795,"firstname":"Jimenez","lastname":"Barry","age":25,"gender":"F","address":"603 Cooper Street","employer":"Verton","email":"jimenezbarry@verton.com","city":"Moscow","state":"AL"} +{"account_number":859,"balance":20734,"firstname":"Beulah","lastname":"Stuart","age":24,"gender":"F","address":"651 Albemarle Terrace","employer":"Hatology","email":"beulahstuart@hatology.com","city":"Waiohinu","state":"RI"} +{"account_number":861,"balance":44173,"firstname":"Jaime","lastname":"Wilson","age":35,"gender":"M","address":"680 Richardson Street","employer":"Temorak","email":"jaimewilson@temorak.com","city":"Fidelis","state":"FL"} +{"account_number":866,"balance":45565,"firstname":"Araceli","lastname":"Woodward","age":28,"gender":"M","address":"326 Meadow Street","employer":"Olympix","email":"araceliwoodward@olympix.com","city":"Dana","state":"KS"} +{"account_number":873,"balance":43931,"firstname":"Tisha","lastname":"Cotton","age":39,"gender":"F","address":"432 Lincoln Road","employer":"Buzzmaker","email":"tishacotton@buzzmaker.com","city":"Bluetown","state":"GA"} +{"account_number":878,"balance":49159,"firstname":"Battle","lastname":"Blackburn","age":40,"gender":"F","address":"234 Hendrix Street","employer":"Zilphur","email":"battleblackburn@zilphur.com","city":"Wanamie","state":"PA"} +{"account_number":880,"balance":22575,"firstname":"Christian","lastname":"Myers","age":35,"gender":"M","address":"737 Crown Street","employer":"Combogen","email":"christianmyers@combogen.com","city":"Abrams","state":"OK"} +{"account_number":885,"balance":31661,"firstname":"Valdez","lastname":"Roberson","age":40,"gender":"F","address":"227 Scholes Street","employer":"Delphide","email":"valdezroberson@delphide.com","city":"Chilton","state":"MT"} +{"account_number":892,"balance":44974,"firstname":"Hill","lastname":"Hayes","age":29,"gender":"M","address":"721 Dooley Street","employer":"Fuelton","email":"hillhayes@fuelton.com","city":"Orason","state":"MT"} +{"account_number":897,"balance":45973,"firstname":"Alyson","lastname":"Irwin","age":25,"gender":"M","address":"731 Poplar Street","employer":"Quizka","email":"alysonirwin@quizka.com","city":"Singer","state":"VA"} +{"account_number":900,"balance":6124,"firstname":"Gonzalez","lastname":"Watson","age":23,"gender":"M","address":"624 Sullivan Street","employer":"Marvane","email":"gonzalezwatson@marvane.com","city":"Wikieup","state":"IL"} +{"account_number":905,"balance":29438,"firstname":"Schultz","lastname":"Moreno","age":20,"gender":"F","address":"761 Cedar Street","employer":"Paragonia","email":"schultzmoreno@paragonia.com","city":"Glenshaw","state":"SC"} +{"account_number":912,"balance":13675,"firstname":"Flora","lastname":"Alvarado","age":26,"gender":"M","address":"771 Vandervoort Avenue","employer":"Boilicon","email":"floraalvarado@boilicon.com","city":"Vivian","state":"ID"} +{"account_number":917,"balance":47782,"firstname":"Parks","lastname":"Hurst","age":24,"gender":"M","address":"933 Cozine Avenue","employer":"Pyramis","email":"parkshurst@pyramis.com","city":"Lindcove","state":"GA"} +{"account_number":924,"balance":3811,"firstname":"Hilary","lastname":"Leonard","age":24,"gender":"M","address":"235 Hegeman Avenue","employer":"Metroz","email":"hilaryleonard@metroz.com","city":"Roosevelt","state":"ME"} +{"account_number":929,"balance":34708,"firstname":"Willie","lastname":"Hickman","age":35,"gender":"M","address":"430 Devoe Street","employer":"Apextri","email":"williehickman@apextri.com","city":"Clay","state":"MS"} +{"account_number":931,"balance":8244,"firstname":"Ingrid","lastname":"Garcia","age":23,"gender":"F","address":"674 Indiana Place","employer":"Balooba","email":"ingridgarcia@balooba.com","city":"Interlochen","state":"AZ"} +{"account_number":936,"balance":22430,"firstname":"Beth","lastname":"Frye","age":36,"gender":"M","address":"462 Thatford Avenue","employer":"Puria","email":"bethfrye@puria.com","city":"Hiseville","state":"LA"} +{"account_number":943,"balance":24187,"firstname":"Wagner","lastname":"Griffin","age":23,"gender":"M","address":"489 Ellery Street","employer":"Gazak","email":"wagnergriffin@gazak.com","city":"Lorraine","state":"HI"} +{"account_number":948,"balance":37074,"firstname":"Sargent","lastname":"Powers","age":40,"gender":"M","address":"532 Fiske Place","employer":"Accuprint","email":"sargentpowers@accuprint.com","city":"Umapine","state":"AK"} +{"account_number":950,"balance":30916,"firstname":"Sherrie","lastname":"Patel","age":32,"gender":"F","address":"658 Langham Street","employer":"Futurize","email":"sherriepatel@futurize.com","city":"Garfield","state":"OR"} +{"account_number":955,"balance":41621,"firstname":"Klein","lastname":"Kemp","age":33,"gender":"M","address":"370 Vanderbilt Avenue","employer":"Synkgen","email":"kleinkemp@synkgen.com","city":"Bonanza","state":"FL"} +{"account_number":962,"balance":32096,"firstname":"Trujillo","lastname":"Wilcox","age":21,"gender":"F","address":"914 Duffield Street","employer":"Extragene","email":"trujillowilcox@extragene.com","city":"Golconda","state":"MA"} +{"account_number":967,"balance":19161,"firstname":"Carrie","lastname":"Huffman","age":36,"gender":"F","address":"240 Sands Street","employer":"Injoy","email":"carriehuffman@injoy.com","city":"Leroy","state":"CA"} +{"account_number":974,"balance":38082,"firstname":"Deborah","lastname":"Yang","age":26,"gender":"F","address":"463 Goodwin Place","employer":"Entogrok","email":"deborahyang@entogrok.com","city":"Herald","state":"KY"} +{"account_number":979,"balance":43130,"firstname":"Vaughn","lastname":"Pittman","age":29,"gender":"M","address":"446 Tompkins Place","employer":"Phormula","email":"vaughnpittman@phormula.com","city":"Fingerville","state":"WI"} +{"account_number":981,"balance":20278,"firstname":"Nolan","lastname":"Warner","age":29,"gender":"F","address":"753 Channel Avenue","employer":"Interodeo","email":"nolanwarner@interodeo.com","city":"Layhill","state":"MT"} +{"account_number":986,"balance":35086,"firstname":"Norris","lastname":"Hubbard","age":31,"gender":"M","address":"600 Celeste Court","employer":"Printspan","email":"norrishubbard@printspan.com","city":"Cassel","state":"MI"} +{"account_number":993,"balance":26487,"firstname":"Campos","lastname":"Olsen","age":37,"gender":"M","address":"873 Covert Street","employer":"Isbol","email":"camposolsen@isbol.com","city":"Glendale","state":"AK"} +{"account_number":998,"balance":16869,"firstname":"Letha","lastname":"Baker","age":40,"gender":"F","address":"206 Llama Court","employer":"Dognosis","email":"lethabaker@dognosis.com","city":"Dunlo","state":"WV"} +{"account_number":2,"balance":28838,"firstname":"Roberta","lastname":"Bender","age":22,"gender":"F","address":"560 Kingsway Place","employer":"Chillium","email":"robertabender@chillium.com","city":"Bennett","state":"LA"} +{"account_number":7,"balance":39121,"firstname":"Levy","lastname":"Richard","age":22,"gender":"M","address":"820 Logan Street","employer":"Teraprene","email":"levyrichard@teraprene.com","city":"Shrewsbury","state":"MO"} +{"account_number":14,"balance":20480,"firstname":"Erma","lastname":"Kane","age":39,"gender":"F","address":"661 Vista Place","employer":"Stockpost","email":"ermakane@stockpost.com","city":"Chamizal","state":"NY"} +{"account_number":19,"balance":27894,"firstname":"Schwartz","lastname":"Buchanan","age":28,"gender":"F","address":"449 Mersereau Court","employer":"Sybixtex","email":"schwartzbuchanan@sybixtex.com","city":"Greenwich","state":"KS"} +{"account_number":21,"balance":7004,"firstname":"Estella","lastname":"Paul","age":38,"gender":"M","address":"859 Portal Street","employer":"Zillatide","email":"estellapaul@zillatide.com","city":"Churchill","state":"WV"} +{"account_number":26,"balance":14127,"firstname":"Lorraine","lastname":"Mccullough","age":39,"gender":"F","address":"157 Dupont Street","employer":"Zosis","email":"lorrainemccullough@zosis.com","city":"Dennard","state":"NH"} +{"account_number":33,"balance":35439,"firstname":"Savannah","lastname":"Kirby","age":30,"gender":"F","address":"372 Malta Street","employer":"Musanpoly","email":"savannahkirby@musanpoly.com","city":"Muse","state":"AK"} +{"account_number":38,"balance":10511,"firstname":"Erna","lastname":"Fields","age":32,"gender":"M","address":"357 Maple Street","employer":"Eweville","email":"ernafields@eweville.com","city":"Twilight","state":"MS"} +{"account_number":40,"balance":33882,"firstname":"Pace","lastname":"Molina","age":40,"gender":"M","address":"263 Ovington Court","employer":"Cytrak","email":"pacemolina@cytrak.com","city":"Silkworth","state":"OR"} +{"account_number":45,"balance":44478,"firstname":"Geneva","lastname":"Morin","age":21,"gender":"F","address":"357 Herkimer Street","employer":"Ezent","email":"genevamorin@ezent.com","city":"Blanco","state":"AZ"} +{"account_number":52,"balance":46425,"firstname":"Kayla","lastname":"Bradshaw","age":31,"gender":"M","address":"449 Barlow Drive","employer":"Magnemo","email":"kaylabradshaw@magnemo.com","city":"Wawona","state":"AZ"} +{"account_number":57,"balance":8705,"firstname":"Powell","lastname":"Herring","age":21,"gender":"M","address":"263 Merit Court","employer":"Digiprint","email":"powellherring@digiprint.com","city":"Coral","state":"MT"} +{"account_number":64,"balance":44036,"firstname":"Miles","lastname":"Battle","age":35,"gender":"F","address":"988 Homecrest Avenue","employer":"Koffee","email":"milesbattle@koffee.com","city":"Motley","state":"ID"} +{"account_number":69,"balance":14253,"firstname":"Desiree","lastname":"Harrison","age":24,"gender":"M","address":"694 Garland Court","employer":"Barkarama","email":"desireeharrison@barkarama.com","city":"Hackneyville","state":"GA"} +{"account_number":71,"balance":38201,"firstname":"Sharpe","lastname":"Hoffman","age":39,"gender":"F","address":"450 Conklin Avenue","employer":"Centree","email":"sharpehoffman@centree.com","city":"Urbana","state":"WY"} +{"account_number":76,"balance":38345,"firstname":"Claudette","lastname":"Beard","age":24,"gender":"F","address":"748 Dorset Street","employer":"Repetwire","email":"claudettebeard@repetwire.com","city":"Caln","state":"TX"} +{"account_number":83,"balance":35928,"firstname":"Mayo","lastname":"Cleveland","age":28,"gender":"M","address":"720 Brooklyn Road","employer":"Indexia","email":"mayocleveland@indexia.com","city":"Roberts","state":"ND"} +{"account_number":88,"balance":26418,"firstname":"Adela","lastname":"Tyler","age":21,"gender":"F","address":"737 Clove Road","employer":"Surelogic","email":"adelatyler@surelogic.com","city":"Boling","state":"SD"} +{"account_number":90,"balance":25332,"firstname":"Herman","lastname":"Snyder","age":22,"gender":"F","address":"737 College Place","employer":"Lunchpod","email":"hermansnyder@lunchpod.com","city":"Flintville","state":"IA"} +{"account_number":95,"balance":1650,"firstname":"Dominguez","lastname":"Le","age":20,"gender":"M","address":"539 Grace Court","employer":"Portica","email":"dominguezle@portica.com","city":"Wollochet","state":"KS"} +{"account_number":103,"balance":11253,"firstname":"Calhoun","lastname":"Bruce","age":33,"gender":"F","address":"731 Clarkson Avenue","employer":"Automon","email":"calhounbruce@automon.com","city":"Marienthal","state":"IL"} +{"account_number":108,"balance":19015,"firstname":"Christensen","lastname":"Weaver","age":21,"gender":"M","address":"398 Dearborn Court","employer":"Quilk","email":"christensenweaver@quilk.com","city":"Belvoir","state":"TX"} +{"account_number":110,"balance":4850,"firstname":"Daphne","lastname":"Byrd","age":23,"gender":"F","address":"239 Conover Street","employer":"Freakin","email":"daphnebyrd@freakin.com","city":"Taft","state":"MN"} +{"account_number":115,"balance":18750,"firstname":"Nikki","lastname":"Doyle","age":31,"gender":"F","address":"537 Clara Street","employer":"Fossiel","email":"nikkidoyle@fossiel.com","city":"Caron","state":"MS"} +{"account_number":122,"balance":17128,"firstname":"Aurora","lastname":"Fry","age":31,"gender":"F","address":"227 Knapp Street","employer":"Makingway","email":"aurorafry@makingway.com","city":"Maybell","state":"NE"} +{"account_number":127,"balance":48734,"firstname":"Diann","lastname":"Mclaughlin","age":33,"gender":"F","address":"340 Clermont Avenue","employer":"Enomen","email":"diannmclaughlin@enomen.com","city":"Rutherford","state":"ND"} +{"account_number":134,"balance":33829,"firstname":"Madelyn","lastname":"Norris","age":30,"gender":"F","address":"176 Noel Avenue","employer":"Endicil","email":"madelynnorris@endicil.com","city":"Walker","state":"NE"} +{"account_number":139,"balance":18444,"firstname":"Rios","lastname":"Todd","age":35,"gender":"F","address":"281 Georgia Avenue","employer":"Uberlux","email":"riostodd@uberlux.com","city":"Hannasville","state":"PA"} +{"account_number":141,"balance":20790,"firstname":"Liliana","lastname":"Caldwell","age":29,"gender":"M","address":"414 Huron Street","employer":"Rubadub","email":"lilianacaldwell@rubadub.com","city":"Hiwasse","state":"OK"} +{"account_number":146,"balance":39078,"firstname":"Lang","lastname":"Kaufman","age":32,"gender":"F","address":"626 Beverley Road","employer":"Rodeomad","email":"langkaufman@rodeomad.com","city":"Mahtowa","state":"RI"} +{"account_number":153,"balance":32074,"firstname":"Bird","lastname":"Cochran","age":31,"gender":"F","address":"691 Bokee Court","employer":"Supremia","email":"birdcochran@supremia.com","city":"Barrelville","state":"NE"} +{"account_number":158,"balance":9380,"firstname":"Natalie","lastname":"Mcdowell","age":27,"gender":"M","address":"953 Roder Avenue","employer":"Myopium","email":"nataliemcdowell@myopium.com","city":"Savage","state":"ND"} +{"account_number":160,"balance":48974,"firstname":"Hull","lastname":"Cherry","age":23,"gender":"F","address":"275 Beaumont Street","employer":"Noralex","email":"hullcherry@noralex.com","city":"Whipholt","state":"WA"} +{"account_number":165,"balance":18956,"firstname":"Sims","lastname":"Mckay","age":40,"gender":"F","address":"205 Jackson Street","employer":"Comtour","email":"simsmckay@comtour.com","city":"Tilden","state":"DC"} +{"account_number":172,"balance":18356,"firstname":"Marie","lastname":"Whitehead","age":20,"gender":"M","address":"704 Monaco Place","employer":"Sultrax","email":"mariewhitehead@sultrax.com","city":"Dragoon","state":"IL"} +{"account_number":177,"balance":48972,"firstname":"Harris","lastname":"Gross","age":40,"gender":"F","address":"468 Suydam Street","employer":"Kidstock","email":"harrisgross@kidstock.com","city":"Yettem","state":"KY"} +{"account_number":184,"balance":9157,"firstname":"Cathy","lastname":"Morrison","age":27,"gender":"M","address":"882 Pine Street","employer":"Zytrek","email":"cathymorrison@zytrek.com","city":"Fedora","state":"FL"} +{"account_number":189,"balance":20167,"firstname":"Ada","lastname":"Cortez","age":38,"gender":"F","address":"700 Forest Place","employer":"Micronaut","email":"adacortez@micronaut.com","city":"Eagletown","state":"TX"} +{"account_number":191,"balance":26172,"firstname":"Barr","lastname":"Sharpe","age":28,"gender":"M","address":"428 Auburn Place","employer":"Ziggles","email":"barrsharpe@ziggles.com","city":"Springdale","state":"KS"} +{"account_number":196,"balance":29931,"firstname":"Caldwell","lastname":"Daniel","age":28,"gender":"F","address":"405 Oliver Street","employer":"Furnigeer","email":"caldwelldaniel@furnigeer.com","city":"Zortman","state":"NE"} +{"account_number":204,"balance":27714,"firstname":"Mavis","lastname":"Deleon","age":39,"gender":"F","address":"400 Waldane Court","employer":"Lotron","email":"mavisdeleon@lotron.com","city":"Stollings","state":"LA"} +{"account_number":209,"balance":31052,"firstname":"Myers","lastname":"Noel","age":30,"gender":"F","address":"691 Alton Place","employer":"Greeker","email":"myersnoel@greeker.com","city":"Hinsdale","state":"KY"} +{"account_number":211,"balance":21539,"firstname":"Graciela","lastname":"Vaughan","age":22,"gender":"M","address":"558 Montauk Court","employer":"Fishland","email":"gracielavaughan@fishland.com","city":"Madrid","state":"PA"} +{"account_number":216,"balance":11422,"firstname":"Price","lastname":"Haley","age":35,"gender":"M","address":"233 Portland Avenue","employer":"Zeam","email":"pricehaley@zeam.com","city":"Titanic","state":"UT"} +{"account_number":223,"balance":9528,"firstname":"Newton","lastname":"Fletcher","age":26,"gender":"F","address":"654 Dewitt Avenue","employer":"Assistia","email":"newtonfletcher@assistia.com","city":"Nipinnawasee","state":"AK"} +{"account_number":228,"balance":10543,"firstname":"Rosella","lastname":"Albert","age":20,"gender":"M","address":"185 Gotham Avenue","employer":"Isoplex","email":"rosellaalbert@isoplex.com","city":"Finzel","state":"NY"} +{"account_number":230,"balance":10829,"firstname":"Chris","lastname":"Raymond","age":28,"gender":"F","address":"464 Remsen Street","employer":"Cogentry","email":"chrisraymond@cogentry.com","city":"Bowmansville","state":"SD"} +{"account_number":235,"balance":17729,"firstname":"Mcpherson","lastname":"Mueller","age":31,"gender":"M","address":"541 Strong Place","employer":"Tingles","email":"mcphersonmueller@tingles.com","city":"Brantleyville","state":"AR"} +{"account_number":242,"balance":42318,"firstname":"Berger","lastname":"Roach","age":21,"gender":"M","address":"125 Wakeman Place","employer":"Ovium","email":"bergerroach@ovium.com","city":"Hessville","state":"WI"} +{"account_number":247,"balance":45123,"firstname":"Mccormick","lastname":"Moon","age":37,"gender":"M","address":"582 Brighton Avenue","employer":"Norsup","email":"mccormickmoon@norsup.com","city":"Forestburg","state":"DE"} +{"account_number":254,"balance":35104,"firstname":"Yang","lastname":"Dodson","age":21,"gender":"M","address":"531 Lott Street","employer":"Mondicil","email":"yangdodson@mondicil.com","city":"Enoree","state":"UT"} +{"account_number":259,"balance":41877,"firstname":"Eleanor","lastname":"Gonzalez","age":30,"gender":"M","address":"800 Sumpter Street","employer":"Futuris","email":"eleanorgonzalez@futuris.com","city":"Jenkinsville","state":"ID"} +{"account_number":261,"balance":39998,"firstname":"Millicent","lastname":"Pickett","age":34,"gender":"F","address":"722 Montieth Street","employer":"Gushkool","email":"millicentpickett@gushkool.com","city":"Norwood","state":"MS"} +{"account_number":266,"balance":2777,"firstname":"Monique","lastname":"Conner","age":35,"gender":"F","address":"489 Metrotech Courtr","employer":"Flotonic","email":"moniqueconner@flotonic.com","city":"Retsof","state":"MD"} +{"account_number":273,"balance":11181,"firstname":"Murphy","lastname":"Chandler","age":20,"gender":"F","address":"569 Bradford Street","employer":"Zilch","email":"murphychandler@zilch.com","city":"Vicksburg","state":"FL"} +{"account_number":278,"balance":22530,"firstname":"Tamra","lastname":"Navarro","age":27,"gender":"F","address":"175 Woodruff Avenue","employer":"Norsul","email":"tamranavarro@norsul.com","city":"Glasgow","state":"VT"} +{"account_number":280,"balance":3380,"firstname":"Vilma","lastname":"Shields","age":26,"gender":"F","address":"133 Berriman Street","employer":"Applidec","email":"vilmashields@applidec.com","city":"Adamstown","state":"ME"} +{"account_number":285,"balance":47369,"firstname":"Hilda","lastname":"Phillips","age":28,"gender":"F","address":"618 Nixon Court","employer":"Comcur","email":"hildaphillips@comcur.com","city":"Siglerville","state":"NC"} +{"account_number":292,"balance":26679,"firstname":"Morrow","lastname":"Greene","age":20,"gender":"F","address":"691 Nassau Street","employer":"Columella","email":"morrowgreene@columella.com","city":"Sanborn","state":"FL"} +{"account_number":297,"balance":20508,"firstname":"Tucker","lastname":"Patrick","age":35,"gender":"F","address":"978 Whitwell Place","employer":"Valreda","email":"tuckerpatrick@valreda.com","city":"Deseret","state":"CO"} +{"account_number":300,"balance":25654,"firstname":"Lane","lastname":"Tate","age":26,"gender":"F","address":"632 Kay Court","employer":"Genesynk","email":"lanetate@genesynk.com","city":"Lowell","state":"MO"} +{"account_number":305,"balance":11655,"firstname":"Augusta","lastname":"Winters","age":29,"gender":"F","address":"377 Paerdegat Avenue","employer":"Vendblend","email":"augustawinters@vendblend.com","city":"Gwynn","state":"MA"} +{"account_number":312,"balance":8511,"firstname":"Burgess","lastname":"Gentry","age":25,"gender":"F","address":"382 Bergen Court","employer":"Orbixtar","email":"burgessgentry@orbixtar.com","city":"Conestoga","state":"WI"} +{"account_number":317,"balance":31968,"firstname":"Ruiz","lastname":"Morris","age":31,"gender":"F","address":"972 Dean Street","employer":"Apex","email":"ruizmorris@apex.com","city":"Jacksonwald","state":"WV"} +{"account_number":324,"balance":44976,"firstname":"Gladys","lastname":"Erickson","age":22,"gender":"M","address":"250 Battery Avenue","employer":"Eternis","email":"gladyserickson@eternis.com","city":"Marne","state":"IA"} +{"account_number":329,"balance":31138,"firstname":"Nellie","lastname":"Mercer","age":25,"gender":"M","address":"967 Ebony Court","employer":"Scenty","email":"nelliemercer@scenty.com","city":"Jardine","state":"AK"} +{"account_number":331,"balance":46004,"firstname":"Gibson","lastname":"Potts","age":34,"gender":"F","address":"994 Dahill Road","employer":"Zensus","email":"gibsonpotts@zensus.com","city":"Frizzleburg","state":"CO"} +{"account_number":336,"balance":40891,"firstname":"Dudley","lastname":"Avery","age":25,"gender":"M","address":"405 Powers Street","employer":"Genmom","email":"dudleyavery@genmom.com","city":"Clarksburg","state":"CO"} +{"account_number":343,"balance":37684,"firstname":"Robbie","lastname":"Logan","age":29,"gender":"M","address":"488 Linden Boulevard","employer":"Hydrocom","email":"robbielogan@hydrocom.com","city":"Stockdale","state":"TN"} +{"account_number":348,"balance":1360,"firstname":"Karina","lastname":"Russell","age":37,"gender":"M","address":"797 Moffat Street","employer":"Limozen","email":"karinarussell@limozen.com","city":"Riegelwood","state":"RI"} +{"account_number":350,"balance":4267,"firstname":"Wyatt","lastname":"Wise","age":22,"gender":"F","address":"896 Bleecker Street","employer":"Rockyard","email":"wyattwise@rockyard.com","city":"Joes","state":"MS"} +{"account_number":355,"balance":40961,"firstname":"Gregory","lastname":"Delacruz","age":38,"gender":"M","address":"876 Cortelyou Road","employer":"Oulu","email":"gregorydelacruz@oulu.com","city":"Waterloo","state":"WV"} +{"account_number":362,"balance":14938,"firstname":"Jimmie","lastname":"Dejesus","age":26,"gender":"M","address":"351 Navy Walk","employer":"Ecolight","email":"jimmiedejesus@ecolight.com","city":"Berlin","state":"ME"} +{"account_number":367,"balance":40458,"firstname":"Elaine","lastname":"Workman","age":20,"gender":"M","address":"188 Ridge Boulevard","employer":"Colaire","email":"elaineworkman@colaire.com","city":"Herbster","state":"AK"} +{"account_number":374,"balance":19521,"firstname":"Blanchard","lastname":"Stein","age":30,"gender":"M","address":"313 Bartlett Street","employer":"Cujo","email":"blanchardstein@cujo.com","city":"Cascades","state":"OR"} +{"account_number":379,"balance":12962,"firstname":"Ruthie","lastname":"Lamb","age":21,"gender":"M","address":"796 Rockaway Avenue","employer":"Incubus","email":"ruthielamb@incubus.com","city":"Hickory","state":"TX"} +{"account_number":381,"balance":40978,"firstname":"Sophie","lastname":"Mays","age":31,"gender":"M","address":"261 Varanda Place","employer":"Uneeq","email":"sophiemays@uneeq.com","city":"Cressey","state":"AR"} +{"account_number":386,"balance":42588,"firstname":"Wallace","lastname":"Barr","age":39,"gender":"F","address":"246 Beverly Road","employer":"Concility","email":"wallacebarr@concility.com","city":"Durham","state":"IN"} +{"account_number":393,"balance":43936,"firstname":"William","lastname":"Kelly","age":24,"gender":"M","address":"178 Lawrence Avenue","employer":"Techtrix","email":"williamkelly@techtrix.com","city":"Orin","state":"PA"} +{"account_number":398,"balance":8543,"firstname":"Leticia","lastname":"Duran","age":35,"gender":"F","address":"305 Senator Street","employer":"Xleen","email":"leticiaduran@xleen.com","city":"Cavalero","state":"PA"} +{"account_number":401,"balance":29408,"firstname":"Contreras","lastname":"Randolph","age":38,"gender":"M","address":"104 Lewis Avenue","employer":"Inrt","email":"contrerasrandolph@inrt.com","city":"Chesapeake","state":"CT"} +{"account_number":406,"balance":28127,"firstname":"Mccarthy","lastname":"Dunlap","age":28,"gender":"F","address":"684 Seacoast Terrace","employer":"Canopoly","email":"mccarthydunlap@canopoly.com","city":"Elliott","state":"NC"} +{"account_number":413,"balance":15631,"firstname":"Pugh","lastname":"Hamilton","age":39,"gender":"F","address":"124 Euclid Avenue","employer":"Techade","email":"pughhamilton@techade.com","city":"Beaulieu","state":"CA"} +{"account_number":418,"balance":10207,"firstname":"Reed","lastname":"Goff","age":32,"gender":"M","address":"959 Everit Street","employer":"Zillan","email":"reedgoff@zillan.com","city":"Hiko","state":"WV"} +{"account_number":420,"balance":44699,"firstname":"Brandie","lastname":"Hayden","age":22,"gender":"M","address":"291 Ash Street","employer":"Digifad","email":"brandiehayden@digifad.com","city":"Spelter","state":"NM"} +{"account_number":425,"balance":41308,"firstname":"Queen","lastname":"Leach","age":30,"gender":"M","address":"105 Fair Street","employer":"Magneato","email":"queenleach@magneato.com","city":"Barronett","state":"NH"} +{"account_number":432,"balance":28969,"firstname":"Preston","lastname":"Ferguson","age":40,"gender":"F","address":"239 Greenwood Avenue","employer":"Bitendrex","email":"prestonferguson@bitendrex.com","city":"Idledale","state":"ND"} +{"account_number":437,"balance":41225,"firstname":"Rosales","lastname":"Marquez","age":29,"gender":"M","address":"873 Ryerson Street","employer":"Ronelon","email":"rosalesmarquez@ronelon.com","city":"Allendale","state":"CA"} +{"account_number":444,"balance":44219,"firstname":"Dolly","lastname":"Finch","age":24,"gender":"F","address":"974 Interborough Parkway","employer":"Zytrac","email":"dollyfinch@zytrac.com","city":"Vowinckel","state":"WY"} +{"account_number":449,"balance":41950,"firstname":"Barnett","lastname":"Cantrell","age":39,"gender":"F","address":"945 Bedell Lane","employer":"Zentility","email":"barnettcantrell@zentility.com","city":"Swartzville","state":"ND"} +{"account_number":451,"balance":31950,"firstname":"Mason","lastname":"Mcleod","age":31,"gender":"F","address":"438 Havemeyer Street","employer":"Omatom","email":"masonmcleod@omatom.com","city":"Ryderwood","state":"NE"} +{"account_number":456,"balance":21419,"firstname":"Solis","lastname":"Kline","age":33,"gender":"M","address":"818 Ashford Street","employer":"Vetron","email":"soliskline@vetron.com","city":"Ruffin","state":"NY"} +{"account_number":463,"balance":36672,"firstname":"Heidi","lastname":"Acosta","age":20,"gender":"F","address":"692 Kenmore Terrace","employer":"Elpro","email":"heidiacosta@elpro.com","city":"Ezel","state":"SD"} +{"account_number":468,"balance":18400,"firstname":"Foreman","lastname":"Fowler","age":40,"gender":"M","address":"443 Jackson Court","employer":"Zillactic","email":"foremanfowler@zillactic.com","city":"Wakarusa","state":"WA"} +{"account_number":470,"balance":20455,"firstname":"Schneider","lastname":"Hull","age":35,"gender":"M","address":"724 Apollo Street","employer":"Exospeed","email":"schneiderhull@exospeed.com","city":"Watchtower","state":"ID"} +{"account_number":475,"balance":24427,"firstname":"Morales","lastname":"Jacobs","age":22,"gender":"F","address":"225 Desmond Court","employer":"Oronoko","email":"moralesjacobs@oronoko.com","city":"Clayville","state":"CT"} +{"account_number":482,"balance":14834,"firstname":"Janie","lastname":"Bass","age":39,"gender":"M","address":"781 Grattan Street","employer":"Manglo","email":"janiebass@manglo.com","city":"Kenwood","state":"IA"} +{"account_number":487,"balance":30718,"firstname":"Sawyer","lastname":"Vincent","age":26,"gender":"F","address":"238 Lancaster Avenue","employer":"Brainquil","email":"sawyervincent@brainquil.com","city":"Galesville","state":"MS"} +{"account_number":494,"balance":3592,"firstname":"Holden","lastname":"Bowen","age":30,"gender":"M","address":"374 Elmwood Avenue","employer":"Endipine","email":"holdenbowen@endipine.com","city":"Rosine","state":"ID"} +{"account_number":499,"balance":26060,"firstname":"Lara","lastname":"Perkins","age":26,"gender":"M","address":"703 Monroe Street","employer":"Paprikut","email":"laraperkins@paprikut.com","city":"Barstow","state":"NY"} +{"account_number":502,"balance":31898,"firstname":"Woodard","lastname":"Bailey","age":31,"gender":"F","address":"585 Albee Square","employer":"Imperium","email":"woodardbailey@imperium.com","city":"Matheny","state":"MT"} +{"account_number":507,"balance":27675,"firstname":"Blankenship","lastname":"Ramirez","age":31,"gender":"M","address":"630 Graham Avenue","employer":"Bytrex","email":"blankenshipramirez@bytrex.com","city":"Bancroft","state":"CT"} +{"account_number":514,"balance":30125,"firstname":"Solomon","lastname":"Bush","age":34,"gender":"M","address":"409 Harkness Avenue","employer":"Snacktion","email":"solomonbush@snacktion.com","city":"Grayhawk","state":"TX"} +{"account_number":519,"balance":3282,"firstname":"Lorna","lastname":"Franco","age":31,"gender":"F","address":"722 Schenck Court","employer":"Zentia","email":"lornafranco@zentia.com","city":"National","state":"FL"} +{"account_number":521,"balance":16348,"firstname":"Josefa","lastname":"Buckley","age":34,"gender":"F","address":"848 Taylor Street","employer":"Mazuda","email":"josefabuckley@mazuda.com","city":"Saranap","state":"NM"} +{"account_number":526,"balance":35375,"firstname":"Sweeney","lastname":"Fulton","age":33,"gender":"F","address":"550 Martense Street","employer":"Cormoran","email":"sweeneyfulton@cormoran.com","city":"Chalfant","state":"IA"} +{"account_number":533,"balance":13761,"firstname":"Margarita","lastname":"Diaz","age":23,"gender":"M","address":"295 Tapscott Street","employer":"Zilodyne","email":"margaritadiaz@zilodyne.com","city":"Hondah","state":"ID"} +{"account_number":538,"balance":16416,"firstname":"Koch","lastname":"Barker","age":21,"gender":"M","address":"919 Gerry Street","employer":"Xplor","email":"kochbarker@xplor.com","city":"Dixie","state":"WY"} +{"account_number":540,"balance":40235,"firstname":"Tammy","lastname":"Wiggins","age":32,"gender":"F","address":"186 Schenectady Avenue","employer":"Speedbolt","email":"tammywiggins@speedbolt.com","city":"Salvo","state":"LA"} +{"account_number":545,"balance":27011,"firstname":"Lena","lastname":"Lucas","age":20,"gender":"M","address":"110 Lamont Court","employer":"Kindaloo","email":"lenalucas@kindaloo.com","city":"Harleigh","state":"KY"} +{"account_number":552,"balance":14727,"firstname":"Kate","lastname":"Estes","age":39,"gender":"M","address":"785 Willmohr Street","employer":"Rodeocean","email":"kateestes@rodeocean.com","city":"Elfrida","state":"HI"} +{"account_number":557,"balance":3119,"firstname":"Landry","lastname":"Buck","age":20,"gender":"M","address":"558 Schweikerts Walk","employer":"Protodyne","email":"landrybuck@protodyne.com","city":"Edneyville","state":"AL"} +{"account_number":564,"balance":43631,"firstname":"Owens","lastname":"Bowers","age":22,"gender":"M","address":"842 Congress Street","employer":"Nspire","email":"owensbowers@nspire.com","city":"Machias","state":"VA"} +{"account_number":569,"balance":40019,"firstname":"Sherri","lastname":"Rowe","age":39,"gender":"F","address":"591 Arlington Place","employer":"Netility","email":"sherrirowe@netility.com","city":"Bridgetown","state":"SC"} +{"account_number":571,"balance":3014,"firstname":"Ayers","lastname":"Duffy","age":28,"gender":"F","address":"721 Wortman Avenue","employer":"Aquasseur","email":"ayersduffy@aquasseur.com","city":"Tilleda","state":"MS"} +{"account_number":576,"balance":29682,"firstname":"Helena","lastname":"Robertson","age":33,"gender":"F","address":"774 Devon Avenue","employer":"Vicon","email":"helenarobertson@vicon.com","city":"Dyckesville","state":"NV"} +{"account_number":583,"balance":26558,"firstname":"Castro","lastname":"West","age":34,"gender":"F","address":"814 Williams Avenue","employer":"Cipromox","email":"castrowest@cipromox.com","city":"Nescatunga","state":"IL"} +{"account_number":588,"balance":43531,"firstname":"Martina","lastname":"Collins","age":31,"gender":"M","address":"301 Anna Court","employer":"Geekwagon","email":"martinacollins@geekwagon.com","city":"Oneida","state":"VA"} +{"account_number":590,"balance":4652,"firstname":"Ladonna","lastname":"Tucker","age":31,"gender":"F","address":"162 Kane Place","employer":"Infotrips","email":"ladonnatucker@infotrips.com","city":"Utting","state":"IA"} +{"account_number":595,"balance":12478,"firstname":"Mccall","lastname":"Britt","age":36,"gender":"F","address":"823 Hill Street","employer":"Cablam","email":"mccallbritt@cablam.com","city":"Vernon","state":"CA"} +{"account_number":603,"balance":28145,"firstname":"Janette","lastname":"Guzman","age":31,"gender":"F","address":"976 Kingston Avenue","employer":"Splinx","email":"janetteguzman@splinx.com","city":"Boomer","state":"NC"} +{"account_number":608,"balance":47091,"firstname":"Carey","lastname":"Whitley","age":32,"gender":"F","address":"976 Lawrence Street","employer":"Poshome","email":"careywhitley@poshome.com","city":"Weogufka","state":"NE"} +{"account_number":610,"balance":40571,"firstname":"Foster","lastname":"Weber","age":24,"gender":"F","address":"323 Rochester Avenue","employer":"Firewax","email":"fosterweber@firewax.com","city":"Winston","state":"NY"} +{"account_number":615,"balance":28726,"firstname":"Delgado","lastname":"Curry","age":28,"gender":"F","address":"706 Butler Street","employer":"Zoxy","email":"delgadocurry@zoxy.com","city":"Gracey","state":"SD"} +{"account_number":622,"balance":9661,"firstname":"Paulette","lastname":"Hartman","age":38,"gender":"M","address":"375 Emerald Street","employer":"Locazone","email":"paulettehartman@locazone.com","city":"Canterwood","state":"OH"} +{"account_number":627,"balance":47546,"firstname":"Crawford","lastname":"Sears","age":37,"gender":"F","address":"686 Eastern Parkway","employer":"Updat","email":"crawfordsears@updat.com","city":"Bison","state":"VT"} +{"account_number":634,"balance":29805,"firstname":"Deloris","lastname":"Levy","age":38,"gender":"M","address":"838 Foster Avenue","employer":"Homelux","email":"delorislevy@homelux.com","city":"Kempton","state":"PA"} +{"account_number":639,"balance":28875,"firstname":"Caitlin","lastname":"Clements","age":32,"gender":"F","address":"627 Aster Court","employer":"Bunga","email":"caitlinclements@bunga.com","city":"Cetronia","state":"SC"} +{"account_number":641,"balance":18345,"firstname":"Sheppard","lastname":"Everett","age":39,"gender":"F","address":"791 Norwood Avenue","employer":"Roboid","email":"sheppardeverett@roboid.com","city":"Selma","state":"AK"} +{"account_number":646,"balance":15559,"firstname":"Lavonne","lastname":"Reyes","age":31,"gender":"F","address":"983 Newport Street","employer":"Parcoe","email":"lavonnereyes@parcoe.com","city":"Monument","state":"LA"} +{"account_number":653,"balance":7606,"firstname":"Marcia","lastname":"Bennett","age":33,"gender":"F","address":"455 Bragg Street","employer":"Opticall","email":"marciabennett@opticall.com","city":"Magnolia","state":"NC"} +{"account_number":658,"balance":10210,"firstname":"Bass","lastname":"Mcconnell","age":32,"gender":"F","address":"274 Ocean Avenue","employer":"Combot","email":"bassmcconnell@combot.com","city":"Beyerville","state":"OH"} +{"account_number":660,"balance":46427,"firstname":"Moon","lastname":"Wood","age":33,"gender":"F","address":"916 Amersfort Place","employer":"Olucore","email":"moonwood@olucore.com","city":"Como","state":"VA"} +{"account_number":665,"balance":15215,"firstname":"Britney","lastname":"Young","age":36,"gender":"M","address":"766 Sackman Street","employer":"Geoforma","email":"britneyyoung@geoforma.com","city":"Tuttle","state":"WI"} +{"account_number":672,"balance":12621,"firstname":"Camille","lastname":"Munoz","age":36,"gender":"F","address":"959 Lewis Place","employer":"Vantage","email":"camillemunoz@vantage.com","city":"Whitmer","state":"IN"} +{"account_number":677,"balance":8491,"firstname":"Snider","lastname":"Benton","age":26,"gender":"M","address":"827 Evans Street","employer":"Medicroix","email":"sniderbenton@medicroix.com","city":"Kaka","state":"UT"} +{"account_number":684,"balance":46091,"firstname":"Warren","lastname":"Snow","age":25,"gender":"M","address":"756 Oakland Place","employer":"Bizmatic","email":"warrensnow@bizmatic.com","city":"Hatteras","state":"NE"} +{"account_number":689,"balance":14985,"firstname":"Ines","lastname":"Chaney","age":28,"gender":"M","address":"137 Dikeman Street","employer":"Zidant","email":"ineschaney@zidant.com","city":"Nettie","state":"DC"} +{"account_number":691,"balance":10792,"firstname":"Mclean","lastname":"Colon","age":22,"gender":"M","address":"876 Classon Avenue","employer":"Elentrix","email":"mcleancolon@elentrix.com","city":"Unionville","state":"OK"} +{"account_number":696,"balance":17568,"firstname":"Crane","lastname":"Matthews","age":32,"gender":"F","address":"721 Gerritsen Avenue","employer":"Intradisk","email":"cranematthews@intradisk.com","city":"Brewster","state":"WV"} +{"account_number":704,"balance":45347,"firstname":"Peters","lastname":"Kent","age":22,"gender":"F","address":"871 Independence Avenue","employer":"Extragen","email":"peterskent@extragen.com","city":"Morriston","state":"CA"} +{"account_number":709,"balance":11015,"firstname":"Abbott","lastname":"Odom","age":29,"gender":"M","address":"893 Union Street","employer":"Jimbies","email":"abbottodom@jimbies.com","city":"Leeper","state":"NJ"} +{"account_number":711,"balance":26939,"firstname":"Villarreal","lastname":"Horton","age":35,"gender":"F","address":"861 Creamer Street","employer":"Lexicondo","email":"villarrealhorton@lexicondo.com","city":"Lydia","state":"MS"} +{"account_number":716,"balance":19789,"firstname":"Paul","lastname":"Mason","age":34,"gender":"F","address":"618 Nichols Avenue","employer":"Slax","email":"paulmason@slax.com","city":"Snowville","state":"OK"} +{"account_number":723,"balance":16421,"firstname":"Nixon","lastname":"Moran","age":27,"gender":"M","address":"569 Campus Place","employer":"Cuizine","email":"nixonmoran@cuizine.com","city":"Buxton","state":"DC"} +{"account_number":728,"balance":44818,"firstname":"Conley","lastname":"Preston","age":28,"gender":"M","address":"450 Coventry Road","employer":"Obones","email":"conleypreston@obones.com","city":"Alden","state":"CO"} +{"account_number":730,"balance":41299,"firstname":"Moore","lastname":"Lee","age":30,"gender":"M","address":"797 Turner Place","employer":"Orbean","email":"moorelee@orbean.com","city":"Highland","state":"DE"} +{"account_number":735,"balance":3984,"firstname":"Loraine","lastname":"Willis","age":32,"gender":"F","address":"928 Grove Street","employer":"Gadtron","email":"lorainewillis@gadtron.com","city":"Lowgap","state":"NY"} +{"account_number":742,"balance":24765,"firstname":"Merle","lastname":"Wooten","age":26,"gender":"M","address":"317 Pooles Lane","employer":"Tropolis","email":"merlewooten@tropolis.com","city":"Bentley","state":"ND"} +{"account_number":747,"balance":16617,"firstname":"Diaz","lastname":"Austin","age":38,"gender":"M","address":"676 Harway Avenue","employer":"Irack","email":"diazaustin@irack.com","city":"Cliff","state":"HI"} +{"account_number":754,"balance":10779,"firstname":"Jones","lastname":"Vega","age":25,"gender":"F","address":"795 India Street","employer":"Gluid","email":"jonesvega@gluid.com","city":"Tyhee","state":"FL"} +{"account_number":759,"balance":38007,"firstname":"Rose","lastname":"Carlson","age":27,"gender":"M","address":"987 Navy Street","employer":"Aquasure","email":"rosecarlson@aquasure.com","city":"Carlton","state":"CT"} +{"account_number":761,"balance":7663,"firstname":"Rae","lastname":"Juarez","age":34,"gender":"F","address":"560 Gilmore Court","employer":"Entropix","email":"raejuarez@entropix.com","city":"Northchase","state":"ID"} +{"account_number":766,"balance":21957,"firstname":"Thomas","lastname":"Gillespie","age":38,"gender":"M","address":"993 Williams Place","employer":"Octocore","email":"thomasgillespie@octocore.com","city":"Defiance","state":"MS"} +{"account_number":773,"balance":31126,"firstname":"Liza","lastname":"Coffey","age":36,"gender":"F","address":"540 Bulwer Place","employer":"Assurity","email":"lizacoffey@assurity.com","city":"Gilgo","state":"WV"} +{"account_number":778,"balance":46007,"firstname":"Underwood","lastname":"Wheeler","age":28,"gender":"M","address":"477 Provost Street","employer":"Decratex","email":"underwoodwheeler@decratex.com","city":"Sardis","state":"ID"} +{"account_number":780,"balance":4682,"firstname":"Maryanne","lastname":"Hendricks","age":26,"gender":"F","address":"709 Wolcott Street","employer":"Sarasonic","email":"maryannehendricks@sarasonic.com","city":"Santel","state":"NH"} +{"account_number":785,"balance":25078,"firstname":"Fields","lastname":"Lester","age":29,"gender":"M","address":"808 Chestnut Avenue","employer":"Visualix","email":"fieldslester@visualix.com","city":"Rowe","state":"PA"} +{"account_number":792,"balance":13109,"firstname":"Becky","lastname":"Jimenez","age":40,"gender":"F","address":"539 Front Street","employer":"Isologia","email":"beckyjimenez@isologia.com","city":"Summertown","state":"MI"} +{"account_number":797,"balance":6854,"firstname":"Lindsay","lastname":"Mills","age":26,"gender":"F","address":"919 Quay Street","employer":"Zoinage","email":"lindsaymills@zoinage.com","city":"Elliston","state":"VA"} +{"account_number":800,"balance":26217,"firstname":"Candy","lastname":"Oconnor","age":28,"gender":"M","address":"200 Newel Street","employer":"Radiantix","email":"candyoconnor@radiantix.com","city":"Sandston","state":"OH"} +{"account_number":805,"balance":18426,"firstname":"Jackson","lastname":"Sampson","age":27,"gender":"F","address":"722 Kenmore Court","employer":"Daido","email":"jacksonsampson@daido.com","city":"Bellamy","state":"ME"} +{"account_number":812,"balance":42593,"firstname":"Graves","lastname":"Newman","age":32,"gender":"F","address":"916 Joralemon Street","employer":"Ecrater","email":"gravesnewman@ecrater.com","city":"Crown","state":"PA"} +{"account_number":817,"balance":36582,"firstname":"Padilla","lastname":"Bauer","age":36,"gender":"F","address":"310 Cadman Plaza","employer":"Exoblue","email":"padillabauer@exoblue.com","city":"Ahwahnee","state":"MN"} +{"account_number":824,"balance":6053,"firstname":"Dyer","lastname":"Henson","age":33,"gender":"M","address":"650 Seaview Avenue","employer":"Nitracyr","email":"dyerhenson@nitracyr.com","city":"Gibsonia","state":"KS"} +{"account_number":829,"balance":20263,"firstname":"Althea","lastname":"Bell","age":37,"gender":"M","address":"319 Cook Street","employer":"Hyplex","email":"altheabell@hyplex.com","city":"Wadsworth","state":"DC"} +{"account_number":831,"balance":25375,"firstname":"Wendy","lastname":"Savage","age":37,"gender":"M","address":"421 Veranda Place","employer":"Neurocell","email":"wendysavage@neurocell.com","city":"Fresno","state":"MS"} +{"account_number":836,"balance":20797,"firstname":"Lloyd","lastname":"Lindsay","age":25,"gender":"F","address":"953 Dinsmore Place","employer":"Suretech","email":"lloydlindsay@suretech.com","city":"Conway","state":"VA"} +{"account_number":843,"balance":15555,"firstname":"Patricia","lastname":"Barton","age":34,"gender":"F","address":"406 Seabring Street","employer":"Providco","email":"patriciabarton@providco.com","city":"Avoca","state":"RI"} +{"account_number":848,"balance":15443,"firstname":"Carmella","lastname":"Cash","age":38,"gender":"M","address":"988 Exeter Street","employer":"Bristo","email":"carmellacash@bristo.com","city":"Northridge","state":"ID"} +{"account_number":850,"balance":6531,"firstname":"Carlene","lastname":"Gaines","age":37,"gender":"F","address":"753 Monroe Place","employer":"Naxdis","email":"carlenegaines@naxdis.com","city":"Genoa","state":"OR"} +{"account_number":855,"balance":40170,"firstname":"Mia","lastname":"Stevens","age":31,"gender":"F","address":"326 Driggs Avenue","employer":"Aeora","email":"miastevens@aeora.com","city":"Delwood","state":"IL"} +{"account_number":862,"balance":38792,"firstname":"Clayton","lastname":"Golden","age":38,"gender":"F","address":"620 Regent Place","employer":"Accusage","email":"claytongolden@accusage.com","city":"Ona","state":"NC"} +{"account_number":867,"balance":45453,"firstname":"Blanca","lastname":"Ellison","age":23,"gender":"F","address":"593 McKibben Street","employer":"Koogle","email":"blancaellison@koogle.com","city":"Frystown","state":"WY"} +{"account_number":874,"balance":23079,"firstname":"Lynette","lastname":"Higgins","age":22,"gender":"M","address":"377 McKinley Avenue","employer":"Menbrain","email":"lynettehiggins@menbrain.com","city":"Manitou","state":"TX"} +{"account_number":879,"balance":48332,"firstname":"Sabrina","lastname":"Lancaster","age":31,"gender":"F","address":"382 Oak Street","employer":"Webiotic","email":"sabrinalancaster@webiotic.com","city":"Lindisfarne","state":"AZ"} +{"account_number":881,"balance":26684,"firstname":"Barnes","lastname":"Ware","age":38,"gender":"F","address":"666 Hooper Street","employer":"Norali","email":"barnesware@norali.com","city":"Cazadero","state":"GA"} +{"account_number":886,"balance":14867,"firstname":"Willa","lastname":"Leblanc","age":38,"gender":"F","address":"773 Bergen Street","employer":"Nurali","email":"willaleblanc@nurali.com","city":"Hilltop","state":"NC"} +{"account_number":893,"balance":42584,"firstname":"Moses","lastname":"Campos","age":38,"gender":"F","address":"991 Bevy Court","employer":"Trollery","email":"mosescampos@trollery.com","city":"Freetown","state":"AK"} +{"account_number":898,"balance":12019,"firstname":"Lori","lastname":"Stevenson","age":29,"gender":"M","address":"910 Coles Street","employer":"Honotron","email":"loristevenson@honotron.com","city":"Shindler","state":"VT"} +{"account_number":901,"balance":35038,"firstname":"Irma","lastname":"Dotson","age":23,"gender":"F","address":"245 Mayfair Drive","employer":"Bleeko","email":"irmadotson@bleeko.com","city":"Lodoga","state":"UT"} +{"account_number":906,"balance":24073,"firstname":"Vicki","lastname":"Suarez","age":36,"gender":"M","address":"829 Roosevelt Place","employer":"Utara","email":"vickisuarez@utara.com","city":"Albrightsville","state":"AR"} +{"account_number":913,"balance":47657,"firstname":"Margery","lastname":"Monroe","age":25,"gender":"M","address":"941 Fanchon Place","employer":"Exerta","email":"margerymonroe@exerta.com","city":"Bannock","state":"MD"} +{"account_number":918,"balance":36776,"firstname":"Dianna","lastname":"Hernandez","age":25,"gender":"M","address":"499 Moultrie Street","employer":"Isologica","email":"diannahernandez@isologica.com","city":"Falconaire","state":"ID"} +{"account_number":920,"balance":41513,"firstname":"Jerri","lastname":"Mitchell","age":26,"gender":"M","address":"831 Kent Street","employer":"Tasmania","email":"jerrimitchell@tasmania.com","city":"Cotopaxi","state":"IA"} +{"account_number":925,"balance":18295,"firstname":"Rosario","lastname":"Jackson","age":24,"gender":"M","address":"178 Leonora Court","employer":"Progenex","email":"rosariojackson@progenex.com","city":"Rivereno","state":"DE"} +{"account_number":932,"balance":3111,"firstname":"Summer","lastname":"Porter","age":33,"gender":"F","address":"949 Grand Avenue","employer":"Multiflex","email":"summerporter@multiflex.com","city":"Spokane","state":"OK"} +{"account_number":937,"balance":43491,"firstname":"Selma","lastname":"Anderson","age":24,"gender":"M","address":"205 Reed Street","employer":"Dadabase","email":"selmaanderson@dadabase.com","city":"Malo","state":"AL"} +{"account_number":944,"balance":46478,"firstname":"Donaldson","lastname":"Woodard","age":38,"gender":"F","address":"498 Laurel Avenue","employer":"Zogak","email":"donaldsonwoodard@zogak.com","city":"Hasty","state":"ID"} +{"account_number":949,"balance":48703,"firstname":"Latasha","lastname":"Mullins","age":29,"gender":"F","address":"272 Lefferts Place","employer":"Zenolux","email":"latashamullins@zenolux.com","city":"Kieler","state":"MN"} +{"account_number":951,"balance":36337,"firstname":"Tran","lastname":"Burris","age":25,"gender":"F","address":"561 Rutland Road","employer":"Geoform","email":"tranburris@geoform.com","city":"Longbranch","state":"IL"} +{"account_number":956,"balance":19477,"firstname":"Randall","lastname":"Lynch","age":22,"gender":"F","address":"490 Madison Place","employer":"Cosmetex","email":"randalllynch@cosmetex.com","city":"Wells","state":"SD"} +{"account_number":963,"balance":30461,"firstname":"Griffin","lastname":"Sheppard","age":20,"gender":"M","address":"682 Linden Street","employer":"Zanymax","email":"griffinsheppard@zanymax.com","city":"Fannett","state":"NM"} +{"account_number":968,"balance":32371,"firstname":"Luella","lastname":"Burch","age":39,"gender":"M","address":"684 Arkansas Drive","employer":"Krag","email":"luellaburch@krag.com","city":"Brambleton","state":"SD"} +{"account_number":970,"balance":19648,"firstname":"Forbes","lastname":"Wallace","age":28,"gender":"M","address":"990 Mill Road","employer":"Pheast","email":"forbeswallace@pheast.com","city":"Lopezo","state":"AK"} +{"account_number":975,"balance":5239,"firstname":"Delores","lastname":"Booker","age":27,"gender":"F","address":"328 Conselyea Street","employer":"Centice","email":"deloresbooker@centice.com","city":"Williams","state":"HI"} +{"account_number":982,"balance":16511,"firstname":"Buck","lastname":"Robinson","age":24,"gender":"M","address":"301 Melrose Street","employer":"Calcu","email":"buckrobinson@calcu.com","city":"Welch","state":"PA"} +{"account_number":987,"balance":4072,"firstname":"Brock","lastname":"Sandoval","age":20,"gender":"F","address":"977 Gem Street","employer":"Fiberox","email":"brocksandoval@fiberox.com","city":"Celeryville","state":"NY"} +{"account_number":994,"balance":33298,"firstname":"Madge","lastname":"Holcomb","age":31,"gender":"M","address":"612 Hawthorne Street","employer":"Escenta","email":"madgeholcomb@escenta.com","city":"Alafaya","state":"OR"} +{"account_number":999,"balance":6087,"firstname":"Dorothy","lastname":"Barron","age":22,"gender":"F","address":"499 Laurel Avenue","employer":"Xurban","email":"dorothybarron@xurban.com","city":"Belvoir","state":"CA"} +{"account_number":4,"balance":27658,"firstname":"Rodriquez","lastname":"Flores","age":31,"gender":"F","address":"986 Wyckoff Avenue","employer":"Tourmania","email":"rodriquezflores@tourmania.com","city":"Eastvale","state":"HI"} +{"account_number":9,"balance":24776,"firstname":"Opal","lastname":"Meadows","age":39,"gender":"M","address":"963 Neptune Avenue","employer":"Cedward","email":"opalmeadows@cedward.com","city":"Olney","state":"OH"} +{"account_number":11,"balance":20203,"firstname":"Jenkins","lastname":"Haney","age":20,"gender":"M","address":"740 Ferry Place","employer":"Qimonk","email":"jenkinshaney@qimonk.com","city":"Steinhatchee","state":"GA"} +{"account_number":16,"balance":35883,"firstname":"Adrian","lastname":"Pitts","age":34,"gender":"F","address":"963 Fay Court","employer":"Combogene","email":"adrianpitts@combogene.com","city":"Remington","state":"SD"} +{"account_number":23,"balance":42374,"firstname":"Kirsten","lastname":"Fox","age":20,"gender":"M","address":"330 Dumont Avenue","employer":"Codax","email":"kirstenfox@codax.com","city":"Walton","state":"AK"} +{"account_number":28,"balance":42112,"firstname":"Vega","lastname":"Flynn","age":20,"gender":"M","address":"647 Hyman Court","employer":"Accupharm","email":"vegaflynn@accupharm.com","city":"Masthope","state":"OH"} +{"account_number":30,"balance":19087,"firstname":"Lamb","lastname":"Townsend","age":26,"gender":"M","address":"169 Lyme Avenue","employer":"Geeknet","email":"lambtownsend@geeknet.com","city":"Epworth","state":"AL"} +{"account_number":35,"balance":42039,"firstname":"Darla","lastname":"Bridges","age":27,"gender":"F","address":"315 Central Avenue","employer":"Xeronk","email":"darlabridges@xeronk.com","city":"Woodlake","state":"RI"} +{"account_number":42,"balance":21137,"firstname":"Harding","lastname":"Hobbs","age":26,"gender":"F","address":"474 Ridgewood Place","employer":"Xth","email":"hardinghobbs@xth.com","city":"Heil","state":"ND"} +{"account_number":47,"balance":33044,"firstname":"Georgia","lastname":"Wilkerson","age":23,"gender":"M","address":"369 Herbert Street","employer":"Endipin","email":"georgiawilkerson@endipin.com","city":"Dellview","state":"WI"} +{"account_number":54,"balance":23406,"firstname":"Angel","lastname":"Mann","age":22,"gender":"F","address":"229 Ferris Street","employer":"Amtas","email":"angelmann@amtas.com","city":"Calverton","state":"WA"} +{"account_number":59,"balance":37728,"firstname":"Malone","lastname":"Justice","age":37,"gender":"F","address":"721 Russell Street","employer":"Emoltra","email":"malonejustice@emoltra.com","city":"Trucksville","state":"HI"} +{"account_number":61,"balance":6856,"firstname":"Shawn","lastname":"Baird","age":20,"gender":"M","address":"605 Monument Walk","employer":"Moltonic","email":"shawnbaird@moltonic.com","city":"Darlington","state":"MN"} +{"account_number":66,"balance":25939,"firstname":"Franks","lastname":"Salinas","age":28,"gender":"M","address":"437 Hamilton Walk","employer":"Cowtown","email":"frankssalinas@cowtown.com","city":"Chase","state":"VT"} +{"account_number":73,"balance":33457,"firstname":"Irene","lastname":"Stephenson","age":32,"gender":"M","address":"684 Miller Avenue","employer":"Hawkster","email":"irenestephenson@hawkster.com","city":"Levant","state":"AR"} +{"account_number":78,"balance":48656,"firstname":"Elvira","lastname":"Patterson","age":23,"gender":"F","address":"834 Amber Street","employer":"Assistix","email":"elvirapatterson@assistix.com","city":"Dunbar","state":"TN"} +{"account_number":80,"balance":13445,"firstname":"Lacey","lastname":"Blanchard","age":30,"gender":"F","address":"823 Himrod Street","employer":"Comdom","email":"laceyblanchard@comdom.com","city":"Matthews","state":"MO"} +{"account_number":85,"balance":48735,"firstname":"Wilcox","lastname":"Sellers","age":20,"gender":"M","address":"212 Irving Avenue","employer":"Confrenzy","email":"wilcoxsellers@confrenzy.com","city":"Kipp","state":"MT"} +{"account_number":92,"balance":26753,"firstname":"Gay","lastname":"Brewer","age":34,"gender":"M","address":"369 Ditmars Street","employer":"Savvy","email":"gaybrewer@savvy.com","city":"Moquino","state":"HI"} +{"account_number":97,"balance":49671,"firstname":"Karen","lastname":"Trujillo","age":40,"gender":"F","address":"512 Cumberland Walk","employer":"Tsunamia","email":"karentrujillo@tsunamia.com","city":"Fredericktown","state":"MO"} +{"account_number":100,"balance":29869,"firstname":"Madden","lastname":"Woods","age":32,"gender":"F","address":"696 Ryder Avenue","employer":"Slumberia","email":"maddenwoods@slumberia.com","city":"Deercroft","state":"ME"} +{"account_number":105,"balance":29654,"firstname":"Castillo","lastname":"Dickerson","age":33,"gender":"F","address":"673 Oxford Street","employer":"Tellifly","email":"castillodickerson@tellifly.com","city":"Succasunna","state":"NY"} +{"account_number":112,"balance":38395,"firstname":"Frederick","lastname":"Case","age":30,"gender":"F","address":"580 Lexington Avenue","employer":"Talkalot","email":"frederickcase@talkalot.com","city":"Orovada","state":"MA"} +{"account_number":117,"balance":48831,"firstname":"Robin","lastname":"Hays","age":38,"gender":"F","address":"347 Hornell Loop","employer":"Pasturia","email":"robinhays@pasturia.com","city":"Sims","state":"WY"} +{"account_number":124,"balance":16425,"firstname":"Fern","lastname":"Lambert","age":20,"gender":"M","address":"511 Jay Street","employer":"Furnitech","email":"fernlambert@furnitech.com","city":"Cloverdale","state":"FL"} +{"account_number":129,"balance":42409,"firstname":"Alexandria","lastname":"Sanford","age":33,"gender":"F","address":"934 Ridgecrest Terrace","employer":"Kyagoro","email":"alexandriasanford@kyagoro.com","city":"Concho","state":"UT"} +{"account_number":131,"balance":28030,"firstname":"Dollie","lastname":"Koch","age":22,"gender":"F","address":"287 Manhattan Avenue","employer":"Skinserve","email":"dolliekoch@skinserve.com","city":"Shasta","state":"PA"} +{"account_number":136,"balance":45801,"firstname":"Winnie","lastname":"Holland","age":38,"gender":"M","address":"198 Mill Lane","employer":"Neteria","email":"winnieholland@neteria.com","city":"Urie","state":"IL"} +{"account_number":143,"balance":43093,"firstname":"Cohen","lastname":"Noble","age":39,"gender":"M","address":"454 Nelson Street","employer":"Buzzworks","email":"cohennoble@buzzworks.com","city":"Norvelt","state":"CO"} +{"account_number":148,"balance":3662,"firstname":"Annmarie","lastname":"Snider","age":34,"gender":"F","address":"857 Lafayette Walk","employer":"Edecine","email":"annmariesnider@edecine.com","city":"Hollins","state":"OH"} +{"account_number":150,"balance":15306,"firstname":"Ortega","lastname":"Dalton","age":20,"gender":"M","address":"237 Mermaid Avenue","employer":"Rameon","email":"ortegadalton@rameon.com","city":"Maxville","state":"NH"} +{"account_number":155,"balance":27878,"firstname":"Atkinson","lastname":"Hudson","age":39,"gender":"F","address":"434 Colin Place","employer":"Qualitern","email":"atkinsonhudson@qualitern.com","city":"Hoehne","state":"OH"} +{"account_number":162,"balance":6302,"firstname":"Griffith","lastname":"Calderon","age":35,"gender":"M","address":"871 Vandervoort Place","employer":"Quotezart","email":"griffithcalderon@quotezart.com","city":"Barclay","state":"FL"} +{"account_number":167,"balance":42051,"firstname":"Hampton","lastname":"Ryan","age":20,"gender":"M","address":"618 Fleet Place","employer":"Zipak","email":"hamptonryan@zipak.com","city":"Irwin","state":"KS"} +{"account_number":174,"balance":1464,"firstname":"Gamble","lastname":"Pierce","age":23,"gender":"F","address":"650 Eagle Street","employer":"Matrixity","email":"gamblepierce@matrixity.com","city":"Abiquiu","state":"OR"} +{"account_number":179,"balance":13265,"firstname":"Elise","lastname":"Drake","age":25,"gender":"M","address":"305 Christopher Avenue","employer":"Turnling","email":"elisedrake@turnling.com","city":"Loretto","state":"LA"} +{"account_number":181,"balance":27983,"firstname":"Bennett","lastname":"Hampton","age":22,"gender":"F","address":"435 Billings Place","employer":"Voipa","email":"bennetthampton@voipa.com","city":"Rodman","state":"WY"} +{"account_number":186,"balance":18373,"firstname":"Kline","lastname":"Joyce","age":32,"gender":"M","address":"285 Falmouth Street","employer":"Tetratrex","email":"klinejoyce@tetratrex.com","city":"Klondike","state":"SD"} +{"account_number":193,"balance":13412,"firstname":"Patty","lastname":"Petty","age":34,"gender":"F","address":"251 Vermont Street","employer":"Kinetica","email":"pattypetty@kinetica.com","city":"Grantville","state":"MS"} +{"account_number":198,"balance":19686,"firstname":"Rachael","lastname":"Sharp","age":38,"gender":"F","address":"443 Vernon Avenue","employer":"Powernet","email":"rachaelsharp@powernet.com","city":"Canoochee","state":"UT"} +{"account_number":201,"balance":14586,"firstname":"Ronda","lastname":"Perry","age":25,"gender":"F","address":"856 Downing Street","employer":"Artiq","email":"rondaperry@artiq.com","city":"Colton","state":"WV"} +{"account_number":206,"balance":47423,"firstname":"Kelli","lastname":"Francis","age":20,"gender":"M","address":"671 George Street","employer":"Exoswitch","email":"kellifrancis@exoswitch.com","city":"Babb","state":"NJ"} +{"account_number":213,"balance":34172,"firstname":"Bauer","lastname":"Summers","age":27,"gender":"M","address":"257 Boynton Place","employer":"Voratak","email":"bauersummers@voratak.com","city":"Oceola","state":"NC"} +{"account_number":218,"balance":26702,"firstname":"Garrison","lastname":"Bryan","age":24,"gender":"F","address":"478 Greenpoint Avenue","employer":"Uniworld","email":"garrisonbryan@uniworld.com","city":"Comptche","state":"WI"} +{"account_number":220,"balance":3086,"firstname":"Tania","lastname":"Middleton","age":22,"gender":"F","address":"541 Gunther Place","employer":"Zerology","email":"taniamiddleton@zerology.com","city":"Linwood","state":"IN"} +{"account_number":225,"balance":21949,"firstname":"Maryann","lastname":"Murphy","age":24,"gender":"F","address":"894 Bridgewater Street","employer":"Cinesanct","email":"maryannmurphy@cinesanct.com","city":"Cartwright","state":"RI"} +{"account_number":232,"balance":11984,"firstname":"Carr","lastname":"Jensen","age":34,"gender":"F","address":"995 Micieli Place","employer":"Biohab","email":"carrjensen@biohab.com","city":"Waikele","state":"OH"} +{"account_number":237,"balance":5603,"firstname":"Kirby","lastname":"Watkins","age":27,"gender":"F","address":"348 Blake Court","employer":"Sonique","email":"kirbywatkins@sonique.com","city":"Freelandville","state":"PA"} +{"account_number":244,"balance":8048,"firstname":"Judith","lastname":"Riggs","age":27,"gender":"F","address":"590 Kosciusko Street","employer":"Arctiq","email":"judithriggs@arctiq.com","city":"Gorham","state":"DC"} +{"account_number":249,"balance":16822,"firstname":"Mckinney","lastname":"Gallagher","age":38,"gender":"F","address":"939 Seigel Court","employer":"Premiant","email":"mckinneygallagher@premiant.com","city":"Catharine","state":"NH"} +{"account_number":251,"balance":13475,"firstname":"Marks","lastname":"Graves","age":39,"gender":"F","address":"427 Lawn Court","employer":"Dentrex","email":"marksgraves@dentrex.com","city":"Waukeenah","state":"IL"} +{"account_number":256,"balance":48318,"firstname":"Simon","lastname":"Hogan","age":31,"gender":"M","address":"789 Suydam Place","employer":"Dancerity","email":"simonhogan@dancerity.com","city":"Dargan","state":"GA"} +{"account_number":263,"balance":12837,"firstname":"Thornton","lastname":"Meyer","age":29,"gender":"M","address":"575 Elliott Place","employer":"Peticular","email":"thorntonmeyer@peticular.com","city":"Dotsero","state":"NH"} +{"account_number":268,"balance":20925,"firstname":"Avis","lastname":"Blackwell","age":36,"gender":"M","address":"569 Jerome Avenue","employer":"Magnina","email":"avisblackwell@magnina.com","city":"Bethany","state":"MD"} +{"account_number":270,"balance":43951,"firstname":"Moody","lastname":"Harmon","age":39,"gender":"F","address":"233 Vanderbilt Street","employer":"Otherside","email":"moodyharmon@otherside.com","city":"Elwood","state":"MT"} +{"account_number":275,"balance":2384,"firstname":"Reynolds","lastname":"Barnett","age":31,"gender":"M","address":"394 Stockton Street","employer":"Austex","email":"reynoldsbarnett@austex.com","city":"Grandview","state":"MS"} +{"account_number":282,"balance":38540,"firstname":"Gay","lastname":"Schultz","age":25,"gender":"F","address":"805 Claver Place","employer":"Handshake","email":"gayschultz@handshake.com","city":"Tampico","state":"MA"} +{"account_number":287,"balance":10845,"firstname":"Valerie","lastname":"Lang","age":35,"gender":"F","address":"423 Midwood Street","employer":"Quarx","email":"valerielang@quarx.com","city":"Cannondale","state":"VT"} +{"account_number":294,"balance":29582,"firstname":"Pitts","lastname":"Haynes","age":26,"gender":"M","address":"901 Broome Street","employer":"Aquazure","email":"pittshaynes@aquazure.com","city":"Turah","state":"SD"} +{"account_number":299,"balance":40825,"firstname":"Angela","lastname":"Talley","age":36,"gender":"F","address":"822 Bills Place","employer":"Remold","email":"angelatalley@remold.com","city":"Bethpage","state":"DC"} +{"account_number":302,"balance":11298,"firstname":"Isabella","lastname":"Hewitt","age":40,"gender":"M","address":"455 Bedford Avenue","employer":"Cincyr","email":"isabellahewitt@cincyr.com","city":"Blanford","state":"IN"} +{"account_number":307,"balance":43355,"firstname":"Enid","lastname":"Ashley","age":23,"gender":"M","address":"412 Emerson Place","employer":"Avenetro","email":"enidashley@avenetro.com","city":"Catherine","state":"WI"} +{"account_number":314,"balance":5848,"firstname":"Norton","lastname":"Norton","age":35,"gender":"M","address":"252 Ditmas Avenue","employer":"Talkola","email":"nortonnorton@talkola.com","city":"Veyo","state":"SC"} +{"account_number":319,"balance":15430,"firstname":"Ferrell","lastname":"Mckinney","age":36,"gender":"M","address":"874 Cranberry Street","employer":"Portaline","email":"ferrellmckinney@portaline.com","city":"Rose","state":"WV"} +{"account_number":321,"balance":43370,"firstname":"Marta","lastname":"Larsen","age":35,"gender":"M","address":"617 Williams Court","employer":"Manufact","email":"martalarsen@manufact.com","city":"Sisquoc","state":"MA"} +{"account_number":326,"balance":9692,"firstname":"Pearl","lastname":"Reese","age":30,"gender":"F","address":"451 Colonial Court","employer":"Accruex","email":"pearlreese@accruex.com","city":"Westmoreland","state":"MD"} +{"account_number":333,"balance":22778,"firstname":"Trudy","lastname":"Sweet","age":27,"gender":"F","address":"881 Kiely Place","employer":"Acumentor","email":"trudysweet@acumentor.com","city":"Kent","state":"IA"} +{"account_number":338,"balance":6969,"firstname":"Pierce","lastname":"Lawrence","age":35,"gender":"M","address":"318 Gallatin Place","employer":"Lunchpad","email":"piercelawrence@lunchpad.com","city":"Iola","state":"MD"} +{"account_number":340,"balance":42072,"firstname":"Juarez","lastname":"Gutierrez","age":40,"gender":"F","address":"802 Seba Avenue","employer":"Billmed","email":"juarezgutierrez@billmed.com","city":"Malott","state":"OH"} +{"account_number":345,"balance":9812,"firstname":"Parker","lastname":"Hines","age":38,"gender":"M","address":"715 Mill Avenue","employer":"Baluba","email":"parkerhines@baluba.com","city":"Blackgum","state":"KY"} +{"account_number":352,"balance":20290,"firstname":"Kendra","lastname":"Mcintosh","age":31,"gender":"F","address":"963 Wolf Place","employer":"Orboid","email":"kendramcintosh@orboid.com","city":"Bladensburg","state":"AK"} +{"account_number":357,"balance":15102,"firstname":"Adele","lastname":"Carroll","age":39,"gender":"F","address":"381 Arion Place","employer":"Aquafire","email":"adelecarroll@aquafire.com","city":"Springville","state":"RI"} +{"account_number":364,"balance":35247,"firstname":"Felicia","lastname":"Merrill","age":40,"gender":"F","address":"229 Branton Street","employer":"Prosely","email":"feliciamerrill@prosely.com","city":"Dola","state":"MA"} +{"account_number":369,"balance":17047,"firstname":"Mcfadden","lastname":"Guy","age":28,"gender":"F","address":"445 Lott Avenue","employer":"Kangle","email":"mcfaddenguy@kangle.com","city":"Greenbackville","state":"DE"} +{"account_number":371,"balance":19751,"firstname":"Barker","lastname":"Allen","age":32,"gender":"F","address":"295 Wallabout Street","employer":"Nexgene","email":"barkerallen@nexgene.com","city":"Nanafalia","state":"NE"} +{"account_number":376,"balance":44407,"firstname":"Mcmillan","lastname":"Dunn","age":21,"gender":"F","address":"771 Dorchester Road","employer":"Eargo","email":"mcmillandunn@eargo.com","city":"Yogaville","state":"RI"} +{"account_number":383,"balance":48889,"firstname":"Knox","lastname":"Larson","age":28,"gender":"F","address":"962 Bartlett Place","employer":"Bostonic","email":"knoxlarson@bostonic.com","city":"Smeltertown","state":"TX"} +{"account_number":388,"balance":9606,"firstname":"Julianne","lastname":"Nicholson","age":26,"gender":"F","address":"338 Crescent Street","employer":"Viasia","email":"juliannenicholson@viasia.com","city":"Alleghenyville","state":"MO"} +{"account_number":390,"balance":7464,"firstname":"Ramona","lastname":"Roy","age":32,"gender":"M","address":"135 Banner Avenue","employer":"Deminimum","email":"ramonaroy@deminimum.com","city":"Dodge","state":"ID"} +{"account_number":395,"balance":18679,"firstname":"Juliet","lastname":"Whitaker","age":31,"gender":"M","address":"128 Remsen Avenue","employer":"Toyletry","email":"julietwhitaker@toyletry.com","city":"Yonah","state":"LA"} +{"account_number":403,"balance":18833,"firstname":"Williamson","lastname":"Horn","age":32,"gender":"M","address":"223 Strickland Avenue","employer":"Nimon","email":"williamsonhorn@nimon.com","city":"Bawcomville","state":"NJ"} +{"account_number":408,"balance":34666,"firstname":"Lidia","lastname":"Guerrero","age":30,"gender":"M","address":"254 Stratford Road","employer":"Snowpoke","email":"lidiaguerrero@snowpoke.com","city":"Fairlee","state":"LA"} +{"account_number":410,"balance":31200,"firstname":"Fox","lastname":"Cardenas","age":39,"gender":"M","address":"987 Monitor Street","employer":"Corpulse","email":"foxcardenas@corpulse.com","city":"Southview","state":"NE"} +{"account_number":415,"balance":19449,"firstname":"Martinez","lastname":"Benson","age":36,"gender":"M","address":"172 Berkeley Place","employer":"Enersol","email":"martinezbenson@enersol.com","city":"Chumuckla","state":"AL"} +{"account_number":422,"balance":40162,"firstname":"Brigitte","lastname":"Scott","age":26,"gender":"M","address":"662 Vermont Court","employer":"Waretel","email":"brigittescott@waretel.com","city":"Elrama","state":"VA"} +{"account_number":427,"balance":1463,"firstname":"Rebekah","lastname":"Garrison","age":36,"gender":"F","address":"837 Hampton Avenue","employer":"Niquent","email":"rebekahgarrison@niquent.com","city":"Zarephath","state":"NY"} +{"account_number":434,"balance":11329,"firstname":"Christa","lastname":"Huff","age":25,"gender":"M","address":"454 Oriental Boulevard","employer":"Earthpure","email":"christahuff@earthpure.com","city":"Stevens","state":"DC"} +{"account_number":439,"balance":22752,"firstname":"Lula","lastname":"Williams","age":35,"gender":"M","address":"630 Furman Avenue","employer":"Vinch","email":"lulawilliams@vinch.com","city":"Newcastle","state":"ME"} +{"account_number":441,"balance":47947,"firstname":"Dickson","lastname":"Mcgee","age":29,"gender":"M","address":"478 Knight Court","employer":"Gogol","email":"dicksonmcgee@gogol.com","city":"Laurelton","state":"AR"} +{"account_number":446,"balance":23071,"firstname":"Lolita","lastname":"Fleming","age":32,"gender":"F","address":"918 Bridge Street","employer":"Vidto","email":"lolitafleming@vidto.com","city":"Brownlee","state":"HI"} +{"account_number":453,"balance":21520,"firstname":"Hood","lastname":"Powell","age":24,"gender":"F","address":"479 Brevoort Place","employer":"Vortexaco","email":"hoodpowell@vortexaco.com","city":"Alderpoint","state":"CT"} +{"account_number":458,"balance":8865,"firstname":"Aida","lastname":"Wolf","age":21,"gender":"F","address":"403 Thames Street","employer":"Isis","email":"aidawolf@isis.com","city":"Bordelonville","state":"ME"} +{"account_number":460,"balance":37734,"firstname":"Aguirre","lastname":"White","age":21,"gender":"F","address":"190 Crooke Avenue","employer":"Unq","email":"aguirrewhite@unq.com","city":"Albany","state":"NJ"} +{"account_number":465,"balance":10681,"firstname":"Pearlie","lastname":"Holman","age":29,"gender":"M","address":"916 Evergreen Avenue","employer":"Hometown","email":"pearlieholman@hometown.com","city":"Needmore","state":"UT"} +{"account_number":472,"balance":25571,"firstname":"Lee","lastname":"Long","age":32,"gender":"F","address":"288 Mill Street","employer":"Comverges","email":"leelong@comverges.com","city":"Movico","state":"MT"} +{"account_number":477,"balance":25892,"firstname":"Holcomb","lastname":"Cobb","age":40,"gender":"M","address":"369 Marconi Place","employer":"Steeltab","email":"holcombcobb@steeltab.com","city":"Byrnedale","state":"CA"} +{"account_number":484,"balance":3274,"firstname":"Staci","lastname":"Melendez","age":35,"gender":"F","address":"751 Otsego Street","employer":"Namebox","email":"stacimelendez@namebox.com","city":"Harborton","state":"NV"} +{"account_number":489,"balance":7879,"firstname":"Garrett","lastname":"Langley","age":36,"gender":"M","address":"331 Bowne Street","employer":"Zillidium","email":"garrettlangley@zillidium.com","city":"Riviera","state":"LA"} +{"account_number":491,"balance":42942,"firstname":"Teresa","lastname":"Owen","age":24,"gender":"F","address":"713 Canton Court","employer":"Plasmos","email":"teresaowen@plasmos.com","city":"Bartonsville","state":"NH"} +{"account_number":496,"balance":14869,"firstname":"Alison","lastname":"Conrad","age":35,"gender":"F","address":"347 Varet Street","employer":"Perkle","email":"alisonconrad@perkle.com","city":"Cliffside","state":"OH"} +{"account_number":504,"balance":49205,"firstname":"Shanna","lastname":"Chambers","age":23,"gender":"M","address":"220 Beard Street","employer":"Corporana","email":"shannachambers@corporana.com","city":"Cashtown","state":"AZ"} +{"account_number":509,"balance":34754,"firstname":"Durham","lastname":"Pacheco","age":40,"gender":"M","address":"129 Plymouth Street","employer":"Datacator","email":"durhampacheco@datacator.com","city":"Loveland","state":"NC"} +{"account_number":511,"balance":40908,"firstname":"Elba","lastname":"Grant","age":24,"gender":"F","address":"157 Bijou Avenue","employer":"Dognost","email":"elbagrant@dognost.com","city":"Coyote","state":"MT"} +{"account_number":516,"balance":44940,"firstname":"Roy","lastname":"Smith","age":37,"gender":"M","address":"770 Cherry Street","employer":"Parleynet","email":"roysmith@parleynet.com","city":"Carrsville","state":"RI"} +{"account_number":523,"balance":28729,"firstname":"Amalia","lastname":"Benjamin","age":40,"gender":"F","address":"173 Bushwick Place","employer":"Sentia","email":"amaliabenjamin@sentia.com","city":"Jacumba","state":"OK"} +{"account_number":528,"balance":4071,"firstname":"Thompson","lastname":"Hoover","age":27,"gender":"F","address":"580 Garden Street","employer":"Portalis","email":"thompsonhoover@portalis.com","city":"Knowlton","state":"AL"} +{"account_number":530,"balance":8840,"firstname":"Kathrine","lastname":"Evans","age":37,"gender":"M","address":"422 Division Place","employer":"Spherix","email":"kathrineevans@spherix.com","city":"Biddle","state":"CO"} +{"account_number":535,"balance":8715,"firstname":"Fry","lastname":"George","age":34,"gender":"M","address":"722 Green Street","employer":"Ewaves","email":"frygeorge@ewaves.com","city":"Kenmar","state":"DE"} +{"account_number":542,"balance":23285,"firstname":"Michelle","lastname":"Mayo","age":35,"gender":"M","address":"657 Caton Place","employer":"Biflex","email":"michellemayo@biflex.com","city":"Beaverdale","state":"WY"} +{"account_number":547,"balance":12870,"firstname":"Eaton","lastname":"Rios","age":32,"gender":"M","address":"744 Withers Street","employer":"Podunk","email":"eatonrios@podunk.com","city":"Chelsea","state":"IA"} +{"account_number":554,"balance":33163,"firstname":"Townsend","lastname":"Atkins","age":39,"gender":"M","address":"566 Ira Court","employer":"Acruex","email":"townsendatkins@acruex.com","city":"Valle","state":"IA"} +{"account_number":559,"balance":11450,"firstname":"Tonia","lastname":"Schmidt","age":38,"gender":"F","address":"508 Sheffield Avenue","employer":"Extro","email":"toniaschmidt@extro.com","city":"Newry","state":"CT"} +{"account_number":561,"balance":12370,"firstname":"Sellers","lastname":"Davis","age":30,"gender":"M","address":"860 Madoc Avenue","employer":"Isodrive","email":"sellersdavis@isodrive.com","city":"Trail","state":"KS"} +{"account_number":566,"balance":6183,"firstname":"Cox","lastname":"Roman","age":37,"gender":"M","address":"349 Winthrop Street","employer":"Medcom","email":"coxroman@medcom.com","city":"Rosewood","state":"WY"} +{"account_number":573,"balance":32171,"firstname":"Callie","lastname":"Castaneda","age":36,"gender":"M","address":"799 Scott Avenue","employer":"Earthwax","email":"calliecastaneda@earthwax.com","city":"Marshall","state":"NH"} +{"account_number":578,"balance":34259,"firstname":"Holmes","lastname":"Mcknight","age":37,"gender":"M","address":"969 Metropolitan Avenue","employer":"Cubicide","email":"holmesmcknight@cubicide.com","city":"Aguila","state":"PA"} +{"account_number":580,"balance":13716,"firstname":"Mcmahon","lastname":"York","age":34,"gender":"M","address":"475 Beacon Court","employer":"Zillar","email":"mcmahonyork@zillar.com","city":"Farmington","state":"MO"} +{"account_number":585,"balance":26745,"firstname":"Nieves","lastname":"Nolan","age":32,"gender":"M","address":"115 Seagate Terrace","employer":"Jumpstack","email":"nievesnolan@jumpstack.com","city":"Eastmont","state":"UT"} +{"account_number":592,"balance":32968,"firstname":"Head","lastname":"Webster","age":36,"gender":"F","address":"987 Lefferts Avenue","employer":"Empirica","email":"headwebster@empirica.com","city":"Rockingham","state":"TN"} +{"account_number":597,"balance":11246,"firstname":"Penny","lastname":"Knowles","age":33,"gender":"M","address":"139 Forbell Street","employer":"Ersum","email":"pennyknowles@ersum.com","city":"Vallonia","state":"IA"} +{"account_number":600,"balance":10336,"firstname":"Simmons","lastname":"Byers","age":37,"gender":"M","address":"250 Dictum Court","employer":"Qualitex","email":"simmonsbyers@qualitex.com","city":"Wanship","state":"OH"} +{"account_number":605,"balance":38427,"firstname":"Mcclain","lastname":"Manning","age":24,"gender":"M","address":"832 Leonard Street","employer":"Qiao","email":"mcclainmanning@qiao.com","city":"Calvary","state":"TX"} +{"account_number":612,"balance":11868,"firstname":"Dunn","lastname":"Cameron","age":32,"gender":"F","address":"156 Lorimer Street","employer":"Isonus","email":"dunncameron@isonus.com","city":"Virgie","state":"ND"} +{"account_number":617,"balance":35445,"firstname":"Kitty","lastname":"Cooley","age":22,"gender":"M","address":"788 Seagate Avenue","employer":"Ultrimax","email":"kittycooley@ultrimax.com","city":"Clarktown","state":"MD"} +{"account_number":624,"balance":27538,"firstname":"Roxanne","lastname":"Franklin","age":39,"gender":"F","address":"299 Woodrow Court","employer":"Silodyne","email":"roxannefranklin@silodyne.com","city":"Roulette","state":"VA"} +{"account_number":629,"balance":32987,"firstname":"Mcclure","lastname":"Rodgers","age":26,"gender":"M","address":"806 Pierrepont Place","employer":"Elita","email":"mcclurerodgers@elita.com","city":"Brownsville","state":"MI"} +{"account_number":631,"balance":21657,"firstname":"Corrine","lastname":"Barber","age":32,"gender":"F","address":"447 Hunts Lane","employer":"Quarmony","email":"corrinebarber@quarmony.com","city":"Wyano","state":"IL"} +{"account_number":636,"balance":8036,"firstname":"Agnes","lastname":"Hooper","age":25,"gender":"M","address":"865 Hanson Place","employer":"Digial","email":"agneshooper@digial.com","city":"Sperryville","state":"OK"} +{"account_number":643,"balance":8057,"firstname":"Hendricks","lastname":"Stokes","age":23,"gender":"F","address":"142 Barbey Street","employer":"Remotion","email":"hendricksstokes@remotion.com","city":"Lewis","state":"MA"} +{"account_number":648,"balance":11506,"firstname":"Terry","lastname":"Montgomery","age":21,"gender":"F","address":"115 Franklin Avenue","employer":"Enervate","email":"terrymontgomery@enervate.com","city":"Bascom","state":"MA"} +{"account_number":650,"balance":18091,"firstname":"Benton","lastname":"Knight","age":28,"gender":"F","address":"850 Aitken Place","employer":"Pholio","email":"bentonknight@pholio.com","city":"Cobbtown","state":"AL"} +{"account_number":655,"balance":22912,"firstname":"Eula","lastname":"Taylor","age":30,"gender":"M","address":"520 Orient Avenue","employer":"Miracula","email":"eulataylor@miracula.com","city":"Wacissa","state":"IN"} +{"account_number":662,"balance":10138,"firstname":"Daisy","lastname":"Burnett","age":33,"gender":"M","address":"114 Norman Avenue","employer":"Liquicom","email":"daisyburnett@liquicom.com","city":"Grahamtown","state":"MD"} +{"account_number":667,"balance":22559,"firstname":"Juliana","lastname":"Chase","age":32,"gender":"M","address":"496 Coleridge Street","employer":"Comtract","email":"julianachase@comtract.com","city":"Wilsonia","state":"NJ"} +{"account_number":674,"balance":36038,"firstname":"Watts","lastname":"Shannon","age":22,"gender":"F","address":"600 Story Street","employer":"Joviold","email":"wattsshannon@joviold.com","city":"Fairhaven","state":"ID"} +{"account_number":679,"balance":20149,"firstname":"Henrietta","lastname":"Bonner","age":33,"gender":"M","address":"461 Bond Street","employer":"Geekol","email":"henriettabonner@geekol.com","city":"Richville","state":"WA"} +{"account_number":681,"balance":34244,"firstname":"Velazquez","lastname":"Wolfe","age":33,"gender":"M","address":"773 Eckford Street","employer":"Zisis","email":"velazquezwolfe@zisis.com","city":"Smock","state":"ME"} +{"account_number":686,"balance":10116,"firstname":"Decker","lastname":"Mcclure","age":30,"gender":"F","address":"236 Commerce Street","employer":"Everest","email":"deckermcclure@everest.com","city":"Gibbsville","state":"TN"} +{"account_number":693,"balance":31233,"firstname":"Tabatha","lastname":"Zimmerman","age":30,"gender":"F","address":"284 Emmons Avenue","employer":"Pushcart","email":"tabathazimmerman@pushcart.com","city":"Esmont","state":"NC"} +{"account_number":698,"balance":14965,"firstname":"Baker","lastname":"Armstrong","age":36,"gender":"F","address":"796 Tehama Street","employer":"Nurplex","email":"bakerarmstrong@nurplex.com","city":"Starks","state":"UT"} +{"account_number":701,"balance":23772,"firstname":"Gardner","lastname":"Griffith","age":27,"gender":"M","address":"187 Moore Place","employer":"Vertide","email":"gardnergriffith@vertide.com","city":"Coventry","state":"NV"} +{"account_number":706,"balance":5282,"firstname":"Eliza","lastname":"Potter","age":39,"gender":"M","address":"945 Dunham Place","employer":"Playce","email":"elizapotter@playce.com","city":"Woodruff","state":"AK"} +{"account_number":713,"balance":20054,"firstname":"Iris","lastname":"Mcguire","age":21,"gender":"F","address":"508 Benson Avenue","employer":"Duflex","email":"irismcguire@duflex.com","city":"Hillsboro","state":"MO"} +{"account_number":718,"balance":13876,"firstname":"Hickman","lastname":"Dillard","age":22,"gender":"F","address":"132 Etna Street","employer":"Genmy","email":"hickmandillard@genmy.com","city":"Curtice","state":"NV"} +{"account_number":720,"balance":31356,"firstname":"Ruth","lastname":"Vance","age":32,"gender":"F","address":"229 Adams Street","employer":"Zilidium","email":"ruthvance@zilidium.com","city":"Allison","state":"IA"} +{"account_number":725,"balance":14677,"firstname":"Reeves","lastname":"Tillman","age":26,"gender":"M","address":"674 Ivan Court","employer":"Cemention","email":"reevestillman@cemention.com","city":"Navarre","state":"MA"} +{"account_number":732,"balance":38445,"firstname":"Delia","lastname":"Cruz","age":37,"gender":"F","address":"870 Cheever Place","employer":"Multron","email":"deliacruz@multron.com","city":"Cresaptown","state":"NH"} +{"account_number":737,"balance":40431,"firstname":"Sampson","lastname":"Yates","age":23,"gender":"F","address":"214 Cox Place","employer":"Signidyne","email":"sampsonyates@signidyne.com","city":"Brazos","state":"GA"} +{"account_number":744,"balance":8690,"firstname":"Bernard","lastname":"Martinez","age":21,"gender":"M","address":"148 Dunne Place","employer":"Dragbot","email":"bernardmartinez@dragbot.com","city":"Moraida","state":"MN"} +{"account_number":749,"balance":1249,"firstname":"Rush","lastname":"Boyle","age":36,"gender":"M","address":"310 Argyle Road","employer":"Sportan","email":"rushboyle@sportan.com","city":"Brady","state":"WA"} +{"account_number":751,"balance":49252,"firstname":"Patrick","lastname":"Osborne","age":23,"gender":"M","address":"915 Prospect Avenue","employer":"Gynko","email":"patrickosborne@gynko.com","city":"Takilma","state":"MO"} +{"account_number":756,"balance":40006,"firstname":"Jasmine","lastname":"Howell","age":32,"gender":"M","address":"605 Elliott Walk","employer":"Ecratic","email":"jasminehowell@ecratic.com","city":"Harrodsburg","state":"OH"} +{"account_number":763,"balance":12091,"firstname":"Liz","lastname":"Bentley","age":22,"gender":"F","address":"933 Debevoise Avenue","employer":"Nipaz","email":"lizbentley@nipaz.com","city":"Glenville","state":"NJ"} +{"account_number":768,"balance":2213,"firstname":"Sondra","lastname":"Soto","age":21,"gender":"M","address":"625 Colonial Road","employer":"Navir","email":"sondrasoto@navir.com","city":"Benson","state":"VA"} +{"account_number":770,"balance":39505,"firstname":"Joann","lastname":"Crane","age":26,"gender":"M","address":"798 Farragut Place","employer":"Lingoage","email":"joanncrane@lingoage.com","city":"Kirk","state":"MA"} +{"account_number":775,"balance":27943,"firstname":"Wilson","lastname":"Merritt","age":33,"gender":"F","address":"288 Thornton Street","employer":"Geeky","email":"wilsonmerritt@geeky.com","city":"Holtville","state":"HI"} +{"account_number":782,"balance":3960,"firstname":"Maldonado","lastname":"Craig","age":36,"gender":"F","address":"345 Myrtle Avenue","employer":"Zilencio","email":"maldonadocraig@zilencio.com","city":"Yukon","state":"ID"} +{"account_number":787,"balance":11876,"firstname":"Harper","lastname":"Wynn","age":21,"gender":"F","address":"139 Oceanic Avenue","employer":"Interfind","email":"harperwynn@interfind.com","city":"Gerber","state":"ND"} +{"account_number":794,"balance":16491,"firstname":"Walker","lastname":"Charles","age":32,"gender":"M","address":"215 Kenilworth Place","employer":"Orbin","email":"walkercharles@orbin.com","city":"Rivers","state":"WI"} +{"account_number":799,"balance":2889,"firstname":"Myra","lastname":"Guerra","age":28,"gender":"F","address":"625 Dahlgreen Place","employer":"Digigene","email":"myraguerra@digigene.com","city":"Draper","state":"CA"} +{"account_number":802,"balance":19630,"firstname":"Gracie","lastname":"Foreman","age":40,"gender":"F","address":"219 Kent Avenue","employer":"Supportal","email":"gracieforeman@supportal.com","city":"Westboro","state":"NH"} +{"account_number":807,"balance":29206,"firstname":"Hatfield","lastname":"Lowe","age":23,"gender":"M","address":"499 Adler Place","employer":"Lovepad","email":"hatfieldlowe@lovepad.com","city":"Wiscon","state":"DC"} +{"account_number":814,"balance":9838,"firstname":"Morse","lastname":"Mcbride","age":26,"gender":"F","address":"776 Calyer Street","employer":"Inear","email":"morsemcbride@inear.com","city":"Kingstowne","state":"ND"} +{"account_number":819,"balance":3971,"firstname":"Karyn","lastname":"Medina","age":24,"gender":"F","address":"417 Utica Avenue","employer":"Qnekt","email":"karynmedina@qnekt.com","city":"Kerby","state":"WY"} +{"account_number":821,"balance":33271,"firstname":"Trisha","lastname":"Blankenship","age":22,"gender":"M","address":"329 Jamaica Avenue","employer":"Chorizon","email":"trishablankenship@chorizon.com","city":"Sexton","state":"VT"} +{"account_number":826,"balance":11548,"firstname":"Summers","lastname":"Vinson","age":22,"gender":"F","address":"742 Irwin Street","employer":"Globoil","email":"summersvinson@globoil.com","city":"Callaghan","state":"WY"} +{"account_number":833,"balance":46154,"firstname":"Woodward","lastname":"Hood","age":22,"gender":"M","address":"398 Atkins Avenue","employer":"Zedalis","email":"woodwardhood@zedalis.com","city":"Stonybrook","state":"NE"} +{"account_number":838,"balance":24629,"firstname":"Latonya","lastname":"Blake","age":37,"gender":"F","address":"531 Milton Street","employer":"Rugstars","email":"latonyablake@rugstars.com","city":"Tedrow","state":"WA"} +{"account_number":840,"balance":39615,"firstname":"Boone","lastname":"Gomez","age":38,"gender":"M","address":"256 Hampton Place","employer":"Geekular","email":"boonegomez@geekular.com","city":"Westerville","state":"HI"} +{"account_number":845,"balance":35422,"firstname":"Tracy","lastname":"Vaughn","age":39,"gender":"M","address":"645 Rockaway Parkway","employer":"Andryx","email":"tracyvaughn@andryx.com","city":"Wilmington","state":"ME"} +{"account_number":852,"balance":6041,"firstname":"Allen","lastname":"Hammond","age":26,"gender":"M","address":"793 Essex Street","employer":"Tersanki","email":"allenhammond@tersanki.com","city":"Osmond","state":"NC"} +{"account_number":857,"balance":39678,"firstname":"Alyce","lastname":"Douglas","age":23,"gender":"M","address":"326 Robert Street","employer":"Earbang","email":"alycedouglas@earbang.com","city":"Thornport","state":"GA"} +{"account_number":864,"balance":21804,"firstname":"Duffy","lastname":"Anthony","age":23,"gender":"M","address":"582 Cooke Court","employer":"Schoolio","email":"duffyanthony@schoolio.com","city":"Brenton","state":"CO"} +{"account_number":869,"balance":43544,"firstname":"Corinne","lastname":"Robbins","age":25,"gender":"F","address":"732 Quentin Road","employer":"Orbaxter","email":"corinnerobbins@orbaxter.com","city":"Roy","state":"TN"} +{"account_number":871,"balance":35854,"firstname":"Norma","lastname":"Burt","age":32,"gender":"M","address":"934 Cyrus Avenue","employer":"Magnafone","email":"normaburt@magnafone.com","city":"Eden","state":"TN"} +{"account_number":876,"balance":48568,"firstname":"Brady","lastname":"Glover","age":21,"gender":"F","address":"565 Oceanview Avenue","employer":"Comvex","email":"bradyglover@comvex.com","city":"Noblestown","state":"ID"} +{"account_number":883,"balance":33679,"firstname":"Austin","lastname":"Jefferson","age":34,"gender":"M","address":"846 Lincoln Avenue","employer":"Polarax","email":"austinjefferson@polarax.com","city":"Savannah","state":"CT"} +{"account_number":888,"balance":22277,"firstname":"Myrna","lastname":"Herman","age":39,"gender":"F","address":"649 Harwood Place","employer":"Enthaze","email":"myrnaherman@enthaze.com","city":"Idamay","state":"AR"} +{"account_number":890,"balance":31198,"firstname":"Alvarado","lastname":"Pate","age":25,"gender":"M","address":"269 Ashland Place","employer":"Ovolo","email":"alvaradopate@ovolo.com","city":"Volta","state":"MI"} +{"account_number":895,"balance":7327,"firstname":"Lara","lastname":"Mcdaniel","age":36,"gender":"M","address":"854 Willow Place","employer":"Acusage","email":"laramcdaniel@acusage.com","city":"Imperial","state":"NC"} +{"account_number":903,"balance":10238,"firstname":"Wade","lastname":"Page","age":35,"gender":"F","address":"685 Waldorf Court","employer":"Eplosion","email":"wadepage@eplosion.com","city":"Welda","state":"AL"} +{"account_number":908,"balance":45975,"firstname":"Mosley","lastname":"Holloway","age":31,"gender":"M","address":"929 Eldert Lane","employer":"Anivet","email":"mosleyholloway@anivet.com","city":"Biehle","state":"MS"} +{"account_number":910,"balance":36831,"firstname":"Esmeralda","lastname":"James","age":23,"gender":"F","address":"535 High Street","employer":"Terrasys","email":"esmeraldajames@terrasys.com","city":"Dubois","state":"IN"} +{"account_number":915,"balance":19816,"firstname":"Farrell","lastname":"French","age":35,"gender":"F","address":"126 McKibbin Street","employer":"Techmania","email":"farrellfrench@techmania.com","city":"Wescosville","state":"AL"} +{"account_number":922,"balance":39347,"firstname":"Irwin","lastname":"Pugh","age":32,"gender":"M","address":"463 Shale Street","employer":"Idego","email":"irwinpugh@idego.com","city":"Ivanhoe","state":"ID"} +{"account_number":927,"balance":19976,"firstname":"Jeanette","lastname":"Acevedo","age":26,"gender":"M","address":"694 Polhemus Place","employer":"Halap","email":"jeanetteacevedo@halap.com","city":"Harrison","state":"MO"} +{"account_number":934,"balance":43987,"firstname":"Freida","lastname":"Daniels","age":34,"gender":"M","address":"448 Cove Lane","employer":"Vurbo","email":"freidadaniels@vurbo.com","city":"Snelling","state":"NJ"} +{"account_number":939,"balance":31228,"firstname":"Hodges","lastname":"Massey","age":37,"gender":"F","address":"431 Dahl Court","employer":"Kegular","email":"hodgesmassey@kegular.com","city":"Katonah","state":"MD"} +{"account_number":941,"balance":38796,"firstname":"Kim","lastname":"Moss","age":28,"gender":"F","address":"105 Onderdonk Avenue","employer":"Digirang","email":"kimmoss@digirang.com","city":"Centerville","state":"TX"} +{"account_number":946,"balance":42794,"firstname":"Ina","lastname":"Obrien","age":36,"gender":"M","address":"339 Rewe Street","employer":"Eclipsent","email":"inaobrien@eclipsent.com","city":"Soham","state":"RI"} +{"account_number":953,"balance":1110,"firstname":"Baxter","lastname":"Black","age":27,"gender":"M","address":"720 Stillwell Avenue","employer":"Uplinx","email":"baxterblack@uplinx.com","city":"Drummond","state":"MN"} +{"account_number":958,"balance":32849,"firstname":"Brown","lastname":"Wilkins","age":40,"gender":"M","address":"686 Delmonico Place","employer":"Medesign","email":"brownwilkins@medesign.com","city":"Shelby","state":"WY"} +{"account_number":960,"balance":2905,"firstname":"Curry","lastname":"Vargas","age":40,"gender":"M","address":"242 Blake Avenue","employer":"Pearlesex","email":"curryvargas@pearlesex.com","city":"Henrietta","state":"NH"} +{"account_number":965,"balance":21882,"firstname":"Patrica","lastname":"Melton","age":28,"gender":"M","address":"141 Rodney Street","employer":"Flexigen","email":"patricamelton@flexigen.com","city":"Klagetoh","state":"MD"} +{"account_number":972,"balance":24719,"firstname":"Leona","lastname":"Christian","age":26,"gender":"F","address":"900 Woodpoint Road","employer":"Extrawear","email":"leonachristian@extrawear.com","city":"Roderfield","state":"MA"} +{"account_number":977,"balance":6744,"firstname":"Rodgers","lastname":"Mccray","age":21,"gender":"F","address":"612 Duryea Place","employer":"Papricut","email":"rodgersmccray@papricut.com","city":"Marenisco","state":"MD"} +{"account_number":984,"balance":1904,"firstname":"Viola","lastname":"Crawford","age":35,"gender":"F","address":"354 Linwood Street","employer":"Ginkle","email":"violacrawford@ginkle.com","city":"Witmer","state":"AR"} +{"account_number":989,"balance":48622,"firstname":"Franklin","lastname":"Frank","age":38,"gender":"M","address":"270 Carlton Avenue","employer":"Shopabout","email":"franklinfrank@shopabout.com","city":"Guthrie","state":"NC"} +{"account_number":991,"balance":4239,"firstname":"Connie","lastname":"Berry","age":28,"gender":"F","address":"647 Gardner Avenue","employer":"Flumbo","email":"connieberry@flumbo.com","city":"Frierson","state":"MO"} +{"account_number":996,"balance":17541,"firstname":"Andrews","lastname":"Herrera","age":30,"gender":"F","address":"570 Vandam Street","employer":"Klugger","email":"andrewsherrera@klugger.com","city":"Whitehaven","state":"MN"} +{"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} +{"account_number":5,"balance":29342,"firstname":"Leola","lastname":"Stewart","age":30,"gender":"F","address":"311 Elm Place","employer":"Diginetic","email":"leolastewart@diginetic.com","city":"Fairview","state":"NJ"} +{"account_number":12,"balance":37055,"firstname":"Stafford","lastname":"Brock","age":20,"gender":"F","address":"296 Wythe Avenue","employer":"Uncorp","email":"staffordbrock@uncorp.com","city":"Bend","state":"AL"} +{"account_number":17,"balance":7831,"firstname":"Bessie","lastname":"Orr","age":31,"gender":"F","address":"239 Hinsdale Street","employer":"Skyplex","email":"bessieorr@skyplex.com","city":"Graball","state":"FL"} +{"account_number":24,"balance":44182,"firstname":"Wood","lastname":"Dale","age":39,"gender":"M","address":"582 Gelston Avenue","employer":"Besto","email":"wooddale@besto.com","city":"Juntura","state":"MI"} +{"account_number":29,"balance":27323,"firstname":"Leah","lastname":"Santiago","age":33,"gender":"M","address":"193 Schenck Avenue","employer":"Isologix","email":"leahsantiago@isologix.com","city":"Gerton","state":"ND"} +{"account_number":31,"balance":30443,"firstname":"Kristen","lastname":"Santana","age":22,"gender":"F","address":"130 Middagh Street","employer":"Dogspa","email":"kristensantana@dogspa.com","city":"Vale","state":"MA"} +{"account_number":36,"balance":15902,"firstname":"Alexandra","lastname":"Nguyen","age":39,"gender":"F","address":"389 Elizabeth Place","employer":"Bittor","email":"alexandranguyen@bittor.com","city":"Hemlock","state":"KY"} +{"account_number":43,"balance":33474,"firstname":"Ryan","lastname":"Howe","age":25,"gender":"M","address":"660 Huntington Street","employer":"Microluxe","email":"ryanhowe@microluxe.com","city":"Clara","state":"CT"} +{"account_number":48,"balance":40608,"firstname":"Peck","lastname":"Downs","age":39,"gender":"F","address":"594 Dwight Street","employer":"Ramjob","email":"peckdowns@ramjob.com","city":"Coloma","state":"WA"} +{"account_number":50,"balance":43695,"firstname":"Sheena","lastname":"Kirkland","age":33,"gender":"M","address":"598 Bank Street","employer":"Zerbina","email":"sheenakirkland@zerbina.com","city":"Walland","state":"IN"} +{"account_number":55,"balance":22020,"firstname":"Shelia","lastname":"Puckett","age":33,"gender":"M","address":"265 Royce Place","employer":"Izzby","email":"sheliapuckett@izzby.com","city":"Slovan","state":"HI"} +{"account_number":62,"balance":43065,"firstname":"Lester","lastname":"Stanton","age":37,"gender":"M","address":"969 Doughty Street","employer":"Geekko","email":"lesterstanton@geekko.com","city":"Itmann","state":"DC"} +{"account_number":67,"balance":39430,"firstname":"Isabelle","lastname":"Spence","age":39,"gender":"M","address":"718 Troy Avenue","employer":"Geeketron","email":"isabellespence@geeketron.com","city":"Camptown","state":"WA"} +{"account_number":74,"balance":47167,"firstname":"Lauri","lastname":"Saunders","age":38,"gender":"F","address":"768 Lynch Street","employer":"Securia","email":"laurisaunders@securia.com","city":"Caroline","state":"TN"} +{"account_number":79,"balance":28185,"firstname":"Booker","lastname":"Lowery","age":29,"gender":"M","address":"817 Campus Road","employer":"Sensate","email":"bookerlowery@sensate.com","city":"Carlos","state":"MT"} +{"account_number":81,"balance":46568,"firstname":"Dennis","lastname":"Gilbert","age":40,"gender":"M","address":"619 Minna Street","employer":"Melbacor","email":"dennisgilbert@melbacor.com","city":"Kersey","state":"ND"} +{"account_number":86,"balance":15428,"firstname":"Walton","lastname":"Butler","age":36,"gender":"M","address":"999 Schenck Street","employer":"Unisure","email":"waltonbutler@unisure.com","city":"Bentonville","state":"IL"} +{"account_number":93,"balance":17728,"firstname":"Jeri","lastname":"Booth","age":31,"gender":"M","address":"322 Roosevelt Court","employer":"Geekology","email":"jeribooth@geekology.com","city":"Leming","state":"ND"} +{"account_number":98,"balance":15085,"firstname":"Cora","lastname":"Barrett","age":24,"gender":"F","address":"555 Neptune Court","employer":"Kiosk","email":"corabarrett@kiosk.com","city":"Independence","state":"MN"} +{"account_number":101,"balance":43400,"firstname":"Cecelia","lastname":"Grimes","age":31,"gender":"M","address":"972 Lincoln Place","employer":"Ecosys","email":"ceceliagrimes@ecosys.com","city":"Manchester","state":"AR"} +{"account_number":106,"balance":8212,"firstname":"Josefina","lastname":"Wagner","age":36,"gender":"M","address":"418 Estate Road","employer":"Kyaguru","email":"josefinawagner@kyaguru.com","city":"Darbydale","state":"FL"} +{"account_number":113,"balance":41652,"firstname":"Burt","lastname":"Moses","age":27,"gender":"M","address":"633 Berry Street","employer":"Uni","email":"burtmoses@uni.com","city":"Russellville","state":"CT"} +{"account_number":118,"balance":2223,"firstname":"Ballard","lastname":"Vasquez","age":33,"gender":"F","address":"101 Bush Street","employer":"Intergeek","email":"ballardvasquez@intergeek.com","city":"Century","state":"MN"} +{"account_number":120,"balance":38565,"firstname":"Browning","lastname":"Rodriquez","age":33,"gender":"M","address":"910 Moore Street","employer":"Opportech","email":"browningrodriquez@opportech.com","city":"Cutter","state":"ND"} +{"account_number":125,"balance":5396,"firstname":"Tanisha","lastname":"Dixon","age":30,"gender":"M","address":"482 Hancock Street","employer":"Junipoor","email":"tanishadixon@junipoor.com","city":"Wauhillau","state":"IA"} +{"account_number":132,"balance":37707,"firstname":"Horton","lastname":"Romero","age":35,"gender":"M","address":"427 Rutherford Place","employer":"Affluex","email":"hortonromero@affluex.com","city":"Hall","state":"AK"} +{"account_number":137,"balance":3596,"firstname":"Frost","lastname":"Freeman","age":29,"gender":"F","address":"191 Dennett Place","employer":"Beadzza","email":"frostfreeman@beadzza.com","city":"Sabillasville","state":"HI"} +{"account_number":144,"balance":43257,"firstname":"Evans","lastname":"Dyer","age":30,"gender":"F","address":"912 Post Court","employer":"Magmina","email":"evansdyer@magmina.com","city":"Gordon","state":"HI"} +{"account_number":149,"balance":22994,"firstname":"Megan","lastname":"Gonzales","age":21,"gender":"M","address":"836 Tampa Court","employer":"Andershun","email":"megangonzales@andershun.com","city":"Rockhill","state":"AL"} +{"account_number":151,"balance":34473,"firstname":"Kent","lastname":"Joyner","age":20,"gender":"F","address":"799 Truxton Street","employer":"Kozgene","email":"kentjoyner@kozgene.com","city":"Allamuchy","state":"DC"} +{"account_number":156,"balance":40185,"firstname":"Sloan","lastname":"Pennington","age":24,"gender":"F","address":"573 Opal Court","employer":"Hopeli","email":"sloanpennington@hopeli.com","city":"Evergreen","state":"CT"} +{"account_number":163,"balance":43075,"firstname":"Wilda","lastname":"Norman","age":33,"gender":"F","address":"173 Beadel Street","employer":"Kog","email":"wildanorman@kog.com","city":"Bodega","state":"ME"} +{"account_number":168,"balance":49568,"firstname":"Carissa","lastname":"Simon","age":20,"gender":"M","address":"975 Flatbush Avenue","employer":"Zillacom","email":"carissasimon@zillacom.com","city":"Neibert","state":"IL"} +{"account_number":170,"balance":6025,"firstname":"Mann","lastname":"Madden","age":36,"gender":"F","address":"161 Radde Place","employer":"Farmex","email":"mannmadden@farmex.com","city":"Thermal","state":"LA"} +{"account_number":175,"balance":16213,"firstname":"Montoya","lastname":"Donaldson","age":28,"gender":"F","address":"481 Morton Street","employer":"Envire","email":"montoyadonaldson@envire.com","city":"Delco","state":"MA"} +{"account_number":182,"balance":7803,"firstname":"Manuela","lastname":"Dillon","age":21,"gender":"M","address":"742 Garnet Street","employer":"Moreganic","email":"manueladillon@moreganic.com","city":"Ilchester","state":"TX"} +{"account_number":187,"balance":26581,"firstname":"Autumn","lastname":"Hodges","age":35,"gender":"M","address":"757 Granite Street","employer":"Ezentia","email":"autumnhodges@ezentia.com","city":"Martinsville","state":"KY"} +{"account_number":194,"balance":16311,"firstname":"Beck","lastname":"Rosario","age":39,"gender":"M","address":"721 Cambridge Place","employer":"Zoid","email":"beckrosario@zoid.com","city":"Efland","state":"ID"} +{"account_number":199,"balance":18086,"firstname":"Branch","lastname":"Love","age":26,"gender":"M","address":"458 Commercial Street","employer":"Frolix","email":"branchlove@frolix.com","city":"Caspar","state":"NC"} +{"account_number":202,"balance":26466,"firstname":"Medina","lastname":"Brown","age":31,"gender":"F","address":"519 Sunnyside Court","employer":"Bleendot","email":"medinabrown@bleendot.com","city":"Winfred","state":"MI"} +{"account_number":207,"balance":45535,"firstname":"Evelyn","lastname":"Lara","age":35,"gender":"F","address":"636 Chestnut Street","employer":"Ultrasure","email":"evelynlara@ultrasure.com","city":"Logan","state":"MI"} +{"account_number":214,"balance":24418,"firstname":"Luann","lastname":"Faulkner","age":37,"gender":"F","address":"697 Hazel Court","employer":"Zolar","email":"luannfaulkner@zolar.com","city":"Ticonderoga","state":"TX"} +{"account_number":219,"balance":17127,"firstname":"Edwards","lastname":"Hurley","age":25,"gender":"M","address":"834 Stockholm Street","employer":"Austech","email":"edwardshurley@austech.com","city":"Bayview","state":"NV"} +{"account_number":221,"balance":15803,"firstname":"Benjamin","lastname":"Barrera","age":34,"gender":"M","address":"568 Main Street","employer":"Zaphire","email":"benjaminbarrera@zaphire.com","city":"Germanton","state":"WY"} +{"account_number":226,"balance":37720,"firstname":"Wilkins","lastname":"Brady","age":40,"gender":"F","address":"486 Baltic Street","employer":"Dogtown","email":"wilkinsbrady@dogtown.com","city":"Condon","state":"MT"} +{"account_number":233,"balance":23020,"firstname":"Washington","lastname":"Walsh","age":27,"gender":"M","address":"366 Church Avenue","employer":"Candecor","email":"washingtonwalsh@candecor.com","city":"Westphalia","state":"MA"} +{"account_number":238,"balance":21287,"firstname":"Constance","lastname":"Wong","age":28,"gender":"M","address":"496 Brown Street","employer":"Grainspot","email":"constancewong@grainspot.com","city":"Cecilia","state":"IN"} +{"account_number":240,"balance":49741,"firstname":"Oconnor","lastname":"Clay","age":35,"gender":"F","address":"659 Highland Boulevard","employer":"Franscene","email":"oconnorclay@franscene.com","city":"Kilbourne","state":"NH"} +{"account_number":245,"balance":22026,"firstname":"Fran","lastname":"Bolton","age":28,"gender":"F","address":"147 Jerome Street","employer":"Solaren","email":"franbolton@solaren.com","city":"Nash","state":"RI"} +{"account_number":252,"balance":18831,"firstname":"Elvia","lastname":"Poole","age":22,"gender":"F","address":"836 Delevan Street","employer":"Velity","email":"elviapoole@velity.com","city":"Groveville","state":"MI"} +{"account_number":257,"balance":5318,"firstname":"Olive","lastname":"Oneil","age":35,"gender":"F","address":"457 Decatur Street","employer":"Helixo","email":"oliveoneil@helixo.com","city":"Chicopee","state":"MI"} +{"account_number":264,"balance":22084,"firstname":"Samantha","lastname":"Ferrell","age":35,"gender":"F","address":"488 Fulton Street","employer":"Flum","email":"samanthaferrell@flum.com","city":"Brandywine","state":"MT"} +{"account_number":269,"balance":43317,"firstname":"Crosby","lastname":"Figueroa","age":34,"gender":"M","address":"910 Aurelia Court","employer":"Pyramia","email":"crosbyfigueroa@pyramia.com","city":"Leyner","state":"OH"} +{"account_number":271,"balance":11864,"firstname":"Holt","lastname":"Walter","age":30,"gender":"F","address":"645 Poplar Avenue","employer":"Grupoli","email":"holtwalter@grupoli.com","city":"Mansfield","state":"OR"} +{"account_number":276,"balance":11606,"firstname":"Pittman","lastname":"Mathis","age":23,"gender":"F","address":"567 Charles Place","employer":"Zuvy","email":"pittmanmathis@zuvy.com","city":"Roeville","state":"KY"} +{"account_number":283,"balance":24070,"firstname":"Fuentes","lastname":"Foley","age":30,"gender":"M","address":"729 Walker Court","employer":"Knowlysis","email":"fuentesfoley@knowlysis.com","city":"Tryon","state":"TN"} +{"account_number":288,"balance":27243,"firstname":"Wong","lastname":"Stone","age":39,"gender":"F","address":"440 Willoughby Street","employer":"Zentix","email":"wongstone@zentix.com","city":"Wheatfields","state":"DC"} +{"account_number":290,"balance":26103,"firstname":"Neva","lastname":"Burgess","age":37,"gender":"F","address":"985 Wyona Street","employer":"Slofast","email":"nevaburgess@slofast.com","city":"Cawood","state":"DC"} +{"account_number":295,"balance":37358,"firstname":"Howe","lastname":"Nash","age":20,"gender":"M","address":"833 Union Avenue","employer":"Aquacine","email":"howenash@aquacine.com","city":"Indio","state":"MN"} +{"account_number":303,"balance":21976,"firstname":"Huffman","lastname":"Green","age":24,"gender":"F","address":"455 Colby Court","employer":"Comtest","email":"huffmangreen@comtest.com","city":"Weeksville","state":"UT"} +{"account_number":308,"balance":33989,"firstname":"Glass","lastname":"Schroeder","age":25,"gender":"F","address":"670 Veterans Avenue","employer":"Realmo","email":"glassschroeder@realmo.com","city":"Gratton","state":"NY"} +{"account_number":310,"balance":23049,"firstname":"Shannon","lastname":"Morton","age":39,"gender":"F","address":"412 Pleasant Place","employer":"Ovation","email":"shannonmorton@ovation.com","city":"Edgar","state":"AZ"} +{"account_number":315,"balance":1314,"firstname":"Clare","lastname":"Morrow","age":33,"gender":"F","address":"728 Madeline Court","employer":"Gaptec","email":"claremorrow@gaptec.com","city":"Mapletown","state":"PA"} +{"account_number":322,"balance":6303,"firstname":"Gilliam","lastname":"Horne","age":27,"gender":"M","address":"414 Florence Avenue","employer":"Shepard","email":"gilliamhorne@shepard.com","city":"Winesburg","state":"WY"} +{"account_number":327,"balance":29294,"firstname":"Nell","lastname":"Contreras","age":27,"gender":"M","address":"694 Gold Street","employer":"Momentia","email":"nellcontreras@momentia.com","city":"Cumminsville","state":"AL"} +{"account_number":334,"balance":9178,"firstname":"Cross","lastname":"Floyd","age":21,"gender":"F","address":"815 Herkimer Court","employer":"Maroptic","email":"crossfloyd@maroptic.com","city":"Kraemer","state":"AK"} +{"account_number":339,"balance":3992,"firstname":"Franco","lastname":"Welch","age":38,"gender":"F","address":"776 Brightwater Court","employer":"Earthplex","email":"francowelch@earthplex.com","city":"Naomi","state":"ME"} +{"account_number":341,"balance":44367,"firstname":"Alberta","lastname":"Bradford","age":30,"gender":"F","address":"670 Grant Avenue","employer":"Bugsall","email":"albertabradford@bugsall.com","city":"Romeville","state":"MT"} +{"account_number":346,"balance":26594,"firstname":"Shelby","lastname":"Sanchez","age":36,"gender":"F","address":"257 Fillmore Avenue","employer":"Geekus","email":"shelbysanchez@geekus.com","city":"Seymour","state":"CO"} +{"account_number":353,"balance":45182,"firstname":"Rivera","lastname":"Sherman","age":37,"gender":"M","address":"603 Garden Place","employer":"Bovis","email":"riverasherman@bovis.com","city":"Otranto","state":"CA"} +{"account_number":358,"balance":44043,"firstname":"Hale","lastname":"Baldwin","age":40,"gender":"F","address":"845 Menahan Street","employer":"Kidgrease","email":"halebaldwin@kidgrease.com","city":"Day","state":"AK"} +{"account_number":360,"balance":26651,"firstname":"Ward","lastname":"Hicks","age":34,"gender":"F","address":"592 Brighton Court","employer":"Biotica","email":"wardhicks@biotica.com","city":"Kanauga","state":"VT"} +{"account_number":365,"balance":3176,"firstname":"Sanders","lastname":"Holder","age":31,"gender":"F","address":"453 Cypress Court","employer":"Geekola","email":"sandersholder@geekola.com","city":"Staples","state":"TN"} +{"account_number":372,"balance":28566,"firstname":"Alba","lastname":"Forbes","age":24,"gender":"M","address":"814 Meserole Avenue","employer":"Isostream","email":"albaforbes@isostream.com","city":"Clarence","state":"OR"} +{"account_number":377,"balance":5374,"firstname":"Margo","lastname":"Gay","age":34,"gender":"F","address":"613 Chase Court","employer":"Rotodyne","email":"margogay@rotodyne.com","city":"Waumandee","state":"KS"} +{"account_number":384,"balance":48758,"firstname":"Sallie","lastname":"Houston","age":31,"gender":"F","address":"836 Polar Street","employer":"Squish","email":"salliehouston@squish.com","city":"Morningside","state":"NC"} +{"account_number":389,"balance":8839,"firstname":"York","lastname":"Cummings","age":27,"gender":"M","address":"778 Centre Street","employer":"Insurity","email":"yorkcummings@insurity.com","city":"Freeburn","state":"RI"} +{"account_number":391,"balance":14733,"firstname":"Holman","lastname":"Jordan","age":30,"gender":"M","address":"391 Forrest Street","employer":"Maineland","email":"holmanjordan@maineland.com","city":"Cade","state":"CT"} +{"account_number":396,"balance":14613,"firstname":"Marsha","lastname":"Elliott","age":38,"gender":"F","address":"297 Liberty Avenue","employer":"Orbiflex","email":"marshaelliott@orbiflex.com","city":"Windsor","state":"TX"} +{"account_number":404,"balance":34978,"firstname":"Massey","lastname":"Becker","age":26,"gender":"F","address":"930 Pitkin Avenue","employer":"Genekom","email":"masseybecker@genekom.com","city":"Blairstown","state":"OR"} +{"account_number":409,"balance":36960,"firstname":"Maura","lastname":"Glenn","age":31,"gender":"M","address":"183 Poly Place","employer":"Viagreat","email":"mauraglenn@viagreat.com","city":"Foscoe","state":"DE"} +{"account_number":411,"balance":1172,"firstname":"Guzman","lastname":"Whitfield","age":22,"gender":"M","address":"181 Perry Terrace","employer":"Springbee","email":"guzmanwhitfield@springbee.com","city":"Balm","state":"IN"} +{"account_number":416,"balance":27169,"firstname":"Hunt","lastname":"Schwartz","age":28,"gender":"F","address":"461 Havens Place","employer":"Danja","email":"huntschwartz@danja.com","city":"Grenelefe","state":"NV"} +{"account_number":423,"balance":38852,"firstname":"Hines","lastname":"Underwood","age":21,"gender":"F","address":"284 Louise Terrace","employer":"Namegen","email":"hinesunderwood@namegen.com","city":"Downsville","state":"CO"} +{"account_number":428,"balance":13925,"firstname":"Stephens","lastname":"Cain","age":20,"gender":"F","address":"189 Summit Street","employer":"Rocklogic","email":"stephenscain@rocklogic.com","city":"Bourg","state":"HI"} +{"account_number":430,"balance":15251,"firstname":"Alejandra","lastname":"Chavez","age":34,"gender":"M","address":"651 Butler Place","employer":"Gology","email":"alejandrachavez@gology.com","city":"Allensworth","state":"VT"} +{"account_number":435,"balance":14654,"firstname":"Sue","lastname":"Lopez","age":22,"gender":"F","address":"632 Stone Avenue","employer":"Emergent","email":"suelopez@emergent.com","city":"Waterford","state":"TN"} +{"account_number":442,"balance":36211,"firstname":"Lawanda","lastname":"Leon","age":27,"gender":"F","address":"126 Canal Avenue","employer":"Xixan","email":"lawandaleon@xixan.com","city":"Berwind","state":"TN"} +{"account_number":447,"balance":11402,"firstname":"Lucia","lastname":"Livingston","age":35,"gender":"M","address":"773 Lake Avenue","employer":"Soprano","email":"lucialivingston@soprano.com","city":"Edgewater","state":"TN"} +{"account_number":454,"balance":31687,"firstname":"Alicia","lastname":"Rollins","age":22,"gender":"F","address":"483 Verona Place","employer":"Boilcat","email":"aliciarollins@boilcat.com","city":"Lutsen","state":"MD"} +{"account_number":459,"balance":18869,"firstname":"Pamela","lastname":"Henry","age":20,"gender":"F","address":"361 Locust Avenue","employer":"Imageflow","email":"pamelahenry@imageflow.com","city":"Greenfields","state":"OH"} +{"account_number":461,"balance":38807,"firstname":"Mcbride","lastname":"Padilla","age":34,"gender":"F","address":"550 Borinquen Pl","employer":"Zepitope","email":"mcbridepadilla@zepitope.com","city":"Emory","state":"AZ"} +{"account_number":466,"balance":25109,"firstname":"Marcie","lastname":"Mcmillan","age":30,"gender":"F","address":"947 Gain Court","employer":"Entroflex","email":"marciemcmillan@entroflex.com","city":"Ronco","state":"ND"} +{"account_number":473,"balance":5391,"firstname":"Susan","lastname":"Luna","age":25,"gender":"F","address":"521 Bogart Street","employer":"Zaya","email":"susanluna@zaya.com","city":"Grazierville","state":"MI"} +{"account_number":478,"balance":28044,"firstname":"Dana","lastname":"Decker","age":35,"gender":"M","address":"627 Dobbin Street","employer":"Acrodance","email":"danadecker@acrodance.com","city":"Sharon","state":"MN"} +{"account_number":480,"balance":40807,"firstname":"Anastasia","lastname":"Parker","age":24,"gender":"M","address":"650 Folsom Place","employer":"Zilladyne","email":"anastasiaparker@zilladyne.com","city":"Oberlin","state":"WY"} +{"account_number":485,"balance":44235,"firstname":"Albert","lastname":"Roberts","age":40,"gender":"M","address":"385 Harman Street","employer":"Stralum","email":"albertroberts@stralum.com","city":"Watrous","state":"NM"} +{"account_number":492,"balance":31055,"firstname":"Burnett","lastname":"Briggs","age":35,"gender":"M","address":"987 Cass Place","employer":"Pharmex","email":"burnettbriggs@pharmex.com","city":"Cornfields","state":"TX"} +{"account_number":497,"balance":13493,"firstname":"Doyle","lastname":"Jenkins","age":30,"gender":"M","address":"205 Nevins Street","employer":"Unia","email":"doylejenkins@unia.com","city":"Nicut","state":"DC"} +{"account_number":500,"balance":39143,"firstname":"Pope","lastname":"Keith","age":28,"gender":"F","address":"537 Fane Court","employer":"Zboo","email":"popekeith@zboo.com","city":"Courtland","state":"AL"} +{"account_number":505,"balance":45493,"firstname":"Shelley","lastname":"Webb","age":29,"gender":"M","address":"873 Crawford Avenue","employer":"Quadeebo","email":"shelleywebb@quadeebo.com","city":"Topanga","state":"IL"} +{"account_number":512,"balance":47432,"firstname":"Alisha","lastname":"Morales","age":29,"gender":"M","address":"623 Batchelder Street","employer":"Terragen","email":"alishamorales@terragen.com","city":"Gilmore","state":"VA"} +{"account_number":517,"balance":3022,"firstname":"Allyson","lastname":"Walls","age":38,"gender":"F","address":"334 Coffey Street","employer":"Gorganic","email":"allysonwalls@gorganic.com","city":"Dahlen","state":"GA"} +{"account_number":524,"balance":49334,"firstname":"Salas","lastname":"Farley","age":30,"gender":"F","address":"499 Trucklemans Lane","employer":"Xumonk","email":"salasfarley@xumonk.com","city":"Noxen","state":"AL"} +{"account_number":529,"balance":21788,"firstname":"Deann","lastname":"Fisher","age":23,"gender":"F","address":"511 Buffalo Avenue","employer":"Twiist","email":"deannfisher@twiist.com","city":"Templeton","state":"WA"} +{"account_number":531,"balance":39770,"firstname":"Janet","lastname":"Pena","age":38,"gender":"M","address":"645 Livonia Avenue","employer":"Corecom","email":"janetpena@corecom.com","city":"Garberville","state":"OK"} +{"account_number":536,"balance":6255,"firstname":"Emma","lastname":"Adkins","age":33,"gender":"F","address":"971 Calder Place","employer":"Ontagene","email":"emmaadkins@ontagene.com","city":"Ruckersville","state":"GA"} +{"account_number":543,"balance":48022,"firstname":"Marina","lastname":"Rasmussen","age":31,"gender":"M","address":"446 Love Lane","employer":"Crustatia","email":"marinarasmussen@crustatia.com","city":"Statenville","state":"MD"} +{"account_number":548,"balance":36930,"firstname":"Sandra","lastname":"Andrews","age":37,"gender":"M","address":"973 Prospect Street","employer":"Datagene","email":"sandraandrews@datagene.com","city":"Inkerman","state":"MO"} +{"account_number":550,"balance":32238,"firstname":"Walsh","lastname":"Goodwin","age":22,"gender":"M","address":"953 Canda Avenue","employer":"Proflex","email":"walshgoodwin@proflex.com","city":"Ypsilanti","state":"MT"} +{"account_number":555,"balance":10750,"firstname":"Fannie","lastname":"Slater","age":31,"gender":"M","address":"457 Tech Place","employer":"Kineticut","email":"fannieslater@kineticut.com","city":"Basye","state":"MO"} +{"account_number":562,"balance":10737,"firstname":"Sarah","lastname":"Strong","age":39,"gender":"F","address":"177 Pioneer Street","employer":"Megall","email":"sarahstrong@megall.com","city":"Ladera","state":"WY"} +{"account_number":567,"balance":6507,"firstname":"Diana","lastname":"Dominguez","age":40,"gender":"M","address":"419 Albany Avenue","employer":"Ohmnet","email":"dianadominguez@ohmnet.com","city":"Wildwood","state":"TX"} +{"account_number":574,"balance":32954,"firstname":"Andrea","lastname":"Mosley","age":24,"gender":"M","address":"368 Throop Avenue","employer":"Musix","email":"andreamosley@musix.com","city":"Blende","state":"DC"} +{"account_number":579,"balance":12044,"firstname":"Banks","lastname":"Sawyer","age":36,"gender":"M","address":"652 Doone Court","employer":"Rooforia","email":"bankssawyer@rooforia.com","city":"Foxworth","state":"ND"} +{"account_number":581,"balance":16525,"firstname":"Fuller","lastname":"Mcintyre","age":32,"gender":"M","address":"169 Bergen Place","employer":"Applideck","email":"fullermcintyre@applideck.com","city":"Kenvil","state":"NY"} +{"account_number":586,"balance":13644,"firstname":"Love","lastname":"Velasquez","age":26,"gender":"F","address":"290 Girard Street","employer":"Zomboid","email":"lovevelasquez@zomboid.com","city":"Villarreal","state":"SD"} +{"account_number":593,"balance":41230,"firstname":"Muriel","lastname":"Vazquez","age":37,"gender":"M","address":"395 Montgomery Street","employer":"Sustenza","email":"murielvazquez@sustenza.com","city":"Strykersville","state":"OK"} +{"account_number":598,"balance":33251,"firstname":"Morgan","lastname":"Coleman","age":33,"gender":"M","address":"324 McClancy Place","employer":"Aclima","email":"morgancoleman@aclima.com","city":"Bowden","state":"WA"} +{"account_number":601,"balance":20796,"firstname":"Vickie","lastname":"Valentine","age":34,"gender":"F","address":"432 Bassett Avenue","employer":"Comvene","email":"vickievalentine@comvene.com","city":"Teasdale","state":"UT"} +{"account_number":606,"balance":28770,"firstname":"Michael","lastname":"Bray","age":31,"gender":"M","address":"935 Lake Place","employer":"Telepark","email":"michaelbray@telepark.com","city":"Lemoyne","state":"CT"} +{"account_number":613,"balance":39340,"firstname":"Eddie","lastname":"Mccarty","age":34,"gender":"F","address":"971 Richards Street","employer":"Bisba","email":"eddiemccarty@bisba.com","city":"Fruitdale","state":"NY"} +{"account_number":618,"balance":8976,"firstname":"Cheri","lastname":"Ford","age":30,"gender":"F","address":"803 Ridgewood Avenue","employer":"Zorromop","email":"cheriford@zorromop.com","city":"Gambrills","state":"VT"} +{"account_number":620,"balance":7224,"firstname":"Coleen","lastname":"Bartlett","age":38,"gender":"M","address":"761 Carroll Street","employer":"Idealis","email":"coleenbartlett@idealis.com","city":"Mathews","state":"DE"} +{"account_number":625,"balance":46010,"firstname":"Cynthia","lastname":"Johnston","age":23,"gender":"M","address":"142 Box Street","employer":"Zentry","email":"cynthiajohnston@zentry.com","city":"Makena","state":"MA"} +{"account_number":632,"balance":40470,"firstname":"Kay","lastname":"Warren","age":20,"gender":"F","address":"422 Alabama Avenue","employer":"Realysis","email":"kaywarren@realysis.com","city":"Homestead","state":"HI"} +{"account_number":637,"balance":3169,"firstname":"Kathy","lastname":"Carter","age":27,"gender":"F","address":"410 Jamison Lane","employer":"Limage","email":"kathycarter@limage.com","city":"Ernstville","state":"WA"} +{"account_number":644,"balance":44021,"firstname":"Etta","lastname":"Miller","age":21,"gender":"F","address":"376 Lawton Street","employer":"Bluegrain","email":"ettamiller@bluegrain.com","city":"Baker","state":"MD"} +{"account_number":649,"balance":20275,"firstname":"Jeanine","lastname":"Malone","age":26,"gender":"F","address":"114 Dodworth Street","employer":"Nixelt","email":"jeaninemalone@nixelt.com","city":"Keyport","state":"AK"} +{"account_number":651,"balance":18360,"firstname":"Young","lastname":"Reeves","age":34,"gender":"M","address":"581 Plaza Street","employer":"Krog","email":"youngreeves@krog.com","city":"Sussex","state":"WY"} +{"account_number":656,"balance":21632,"firstname":"Olson","lastname":"Hunt","age":36,"gender":"M","address":"342 Jaffray Street","employer":"Volax","email":"olsonhunt@volax.com","city":"Bangor","state":"WA"} +{"account_number":663,"balance":2456,"firstname":"Rollins","lastname":"Richards","age":37,"gender":"M","address":"129 Sullivan Place","employer":"Geostele","email":"rollinsrichards@geostele.com","city":"Morgandale","state":"FL"} +{"account_number":668,"balance":45069,"firstname":"Potter","lastname":"Michael","age":27,"gender":"M","address":"803 Glenmore Avenue","employer":"Ontality","email":"pottermichael@ontality.com","city":"Newkirk","state":"KS"} +{"account_number":670,"balance":10178,"firstname":"Ollie","lastname":"Riley","age":22,"gender":"M","address":"252 Jackson Place","employer":"Adornica","email":"ollieriley@adornica.com","city":"Brethren","state":"WI"} +{"account_number":675,"balance":36102,"firstname":"Fisher","lastname":"Shepard","age":27,"gender":"F","address":"859 Varick Street","employer":"Qot","email":"fishershepard@qot.com","city":"Diaperville","state":"MD"} +{"account_number":682,"balance":14168,"firstname":"Anne","lastname":"Hale","age":22,"gender":"F","address":"708 Anthony Street","employer":"Cytrek","email":"annehale@cytrek.com","city":"Beechmont","state":"WV"} +{"account_number":687,"balance":48630,"firstname":"Caroline","lastname":"Cox","age":31,"gender":"M","address":"626 Hillel Place","employer":"Opticon","email":"carolinecox@opticon.com","city":"Loma","state":"ND"} +{"account_number":694,"balance":33125,"firstname":"Craig","lastname":"Palmer","age":31,"gender":"F","address":"273 Montrose Avenue","employer":"Comvey","email":"craigpalmer@comvey.com","city":"Cleary","state":"OK"} +{"account_number":699,"balance":4156,"firstname":"Gallagher","lastname":"Marshall","age":37,"gender":"F","address":"648 Clifford Place","employer":"Exiand","email":"gallaghermarshall@exiand.com","city":"Belfair","state":"KY"} +{"account_number":702,"balance":46490,"firstname":"Meadows","lastname":"Delgado","age":26,"gender":"M","address":"612 Jardine Place","employer":"Daisu","email":"meadowsdelgado@daisu.com","city":"Venice","state":"AR"} +{"account_number":707,"balance":30325,"firstname":"Sonya","lastname":"Trevino","age":30,"gender":"F","address":"181 Irving Place","employer":"Atgen","email":"sonyatrevino@atgen.com","city":"Enetai","state":"TN"} +{"account_number":714,"balance":16602,"firstname":"Socorro","lastname":"Murray","age":34,"gender":"F","address":"810 Manhattan Court","employer":"Isoswitch","email":"socorromurray@isoswitch.com","city":"Jugtown","state":"AZ"} +{"account_number":719,"balance":33107,"firstname":"Leanna","lastname":"Reed","age":25,"gender":"F","address":"528 Krier Place","employer":"Rodeology","email":"leannareed@rodeology.com","city":"Carrizo","state":"WI"} +{"account_number":721,"balance":32958,"firstname":"Mara","lastname":"Dickson","age":26,"gender":"M","address":"810 Harrison Avenue","employer":"Comtours","email":"maradickson@comtours.com","city":"Thynedale","state":"DE"} +{"account_number":726,"balance":44737,"firstname":"Rosemary","lastname":"Salazar","age":21,"gender":"M","address":"290 Croton Loop","employer":"Rockabye","email":"rosemarysalazar@rockabye.com","city":"Helen","state":"IA"} +{"account_number":733,"balance":15722,"firstname":"Lakeisha","lastname":"Mccarthy","age":37,"gender":"M","address":"782 Turnbull Avenue","employer":"Exosis","email":"lakeishamccarthy@exosis.com","city":"Caberfae","state":"NM"} +{"account_number":738,"balance":44936,"firstname":"Rosalind","lastname":"Hunter","age":32,"gender":"M","address":"644 Eaton Court","employer":"Zolarity","email":"rosalindhunter@zolarity.com","city":"Cataract","state":"SD"} +{"account_number":740,"balance":6143,"firstname":"Chambers","lastname":"Hahn","age":22,"gender":"M","address":"937 Windsor Place","employer":"Medalert","email":"chambershahn@medalert.com","city":"Dorneyville","state":"DC"} +{"account_number":745,"balance":4572,"firstname":"Jacobs","lastname":"Sweeney","age":32,"gender":"M","address":"189 Lott Place","employer":"Comtent","email":"jacobssweeney@comtent.com","city":"Advance","state":"NJ"} +{"account_number":752,"balance":14039,"firstname":"Jerry","lastname":"Rush","age":31,"gender":"M","address":"632 Dank Court","employer":"Ebidco","email":"jerryrush@ebidco.com","city":"Geyserville","state":"AR"} +{"account_number":757,"balance":34628,"firstname":"Mccullough","lastname":"Moore","age":30,"gender":"F","address":"304 Hastings Street","employer":"Nikuda","email":"mcculloughmoore@nikuda.com","city":"Charco","state":"DC"} +{"account_number":764,"balance":3728,"firstname":"Noemi","lastname":"Gill","age":30,"gender":"M","address":"427 Chester Street","employer":"Avit","email":"noemigill@avit.com","city":"Chesterfield","state":"AL"} +{"account_number":769,"balance":15362,"firstname":"Francis","lastname":"Beck","age":28,"gender":"M","address":"454 Livingston Street","employer":"Furnafix","email":"francisbeck@furnafix.com","city":"Dunnavant","state":"HI"} +{"account_number":771,"balance":32784,"firstname":"Jocelyn","lastname":"Boone","age":23,"gender":"M","address":"513 Division Avenue","employer":"Collaire","email":"jocelynboone@collaire.com","city":"Lisco","state":"VT"} +{"account_number":776,"balance":29177,"firstname":"Duke","lastname":"Atkinson","age":24,"gender":"M","address":"520 Doscher Street","employer":"Tripsch","email":"dukeatkinson@tripsch.com","city":"Lafferty","state":"NC"} +{"account_number":783,"balance":11911,"firstname":"Faith","lastname":"Cooper","age":25,"gender":"F","address":"539 Rapelye Street","employer":"Insuron","email":"faithcooper@insuron.com","city":"Jennings","state":"MN"} +{"account_number":788,"balance":12473,"firstname":"Marianne","lastname":"Aguilar","age":39,"gender":"F","address":"213 Holly Street","employer":"Marqet","email":"marianneaguilar@marqet.com","city":"Alfarata","state":"HI"} +{"account_number":790,"balance":29912,"firstname":"Ellis","lastname":"Sullivan","age":39,"gender":"F","address":"877 Coyle Street","employer":"Enersave","email":"ellissullivan@enersave.com","city":"Canby","state":"MS"} +{"account_number":795,"balance":31450,"firstname":"Bruce","lastname":"Avila","age":34,"gender":"M","address":"865 Newkirk Placez","employer":"Plasmosis","email":"bruceavila@plasmosis.com","city":"Ada","state":"ID"} +{"account_number":803,"balance":49567,"firstname":"Marissa","lastname":"Spears","age":25,"gender":"M","address":"963 Highland Avenue","employer":"Centregy","email":"marissaspears@centregy.com","city":"Bloomington","state":"MS"} +{"account_number":808,"balance":11251,"firstname":"Nola","lastname":"Quinn","age":20,"gender":"M","address":"863 Wythe Place","employer":"Iplax","email":"nolaquinn@iplax.com","city":"Cuylerville","state":"NH"} +{"account_number":810,"balance":10563,"firstname":"Alyssa","lastname":"Ortega","age":40,"gender":"M","address":"977 Clymer Street","employer":"Eventage","email":"alyssaortega@eventage.com","city":"Convent","state":"SC"} +{"account_number":815,"balance":19336,"firstname":"Guthrie","lastname":"Morse","age":30,"gender":"M","address":"685 Vandalia Avenue","employer":"Gronk","email":"guthriemorse@gronk.com","city":"Fowlerville","state":"OR"} +{"account_number":822,"balance":13024,"firstname":"Hicks","lastname":"Farrell","age":25,"gender":"M","address":"468 Middleton Street","employer":"Zolarex","email":"hicksfarrell@zolarex.com","city":"Columbus","state":"OR"} +{"account_number":827,"balance":37536,"firstname":"Naomi","lastname":"Ball","age":29,"gender":"F","address":"319 Stewart Street","employer":"Isotronic","email":"naomiball@isotronic.com","city":"Trona","state":"NM"} +{"account_number":834,"balance":38049,"firstname":"Sybil","lastname":"Carrillo","age":25,"gender":"M","address":"359 Baughman Place","employer":"Phuel","email":"sybilcarrillo@phuel.com","city":"Kohatk","state":"CT"} +{"account_number":839,"balance":38292,"firstname":"Langley","lastname":"Neal","age":39,"gender":"F","address":"565 Newton Street","employer":"Liquidoc","email":"langleyneal@liquidoc.com","city":"Osage","state":"AL"} +{"account_number":841,"balance":28291,"firstname":"Dalton","lastname":"Waters","age":21,"gender":"M","address":"859 Grand Street","employer":"Malathion","email":"daltonwaters@malathion.com","city":"Tonopah","state":"AZ"} +{"account_number":846,"balance":35099,"firstname":"Maureen","lastname":"Glass","age":22,"gender":"M","address":"140 Amherst Street","employer":"Stelaecor","email":"maureenglass@stelaecor.com","city":"Cucumber","state":"IL"} +{"account_number":853,"balance":38353,"firstname":"Travis","lastname":"Parks","age":40,"gender":"M","address":"930 Bay Avenue","employer":"Pyramax","email":"travisparks@pyramax.com","city":"Gadsden","state":"ND"} +{"account_number":858,"balance":23194,"firstname":"Small","lastname":"Hatfield","age":36,"gender":"M","address":"593 Tennis Court","employer":"Letpro","email":"smallhatfield@letpro.com","city":"Haena","state":"KS"} +{"account_number":860,"balance":23613,"firstname":"Clark","lastname":"Boyd","age":37,"gender":"M","address":"501 Rock Street","employer":"Deepends","email":"clarkboyd@deepends.com","city":"Whitewater","state":"MA"} +{"account_number":865,"balance":10574,"firstname":"Cook","lastname":"Kelley","age":28,"gender":"F","address":"865 Lincoln Terrace","employer":"Quizmo","email":"cookkelley@quizmo.com","city":"Kansas","state":"KY"} +{"account_number":872,"balance":26314,"firstname":"Jane","lastname":"Greer","age":36,"gender":"F","address":"717 Hewes Street","employer":"Newcube","email":"janegreer@newcube.com","city":"Delshire","state":"DE"} +{"account_number":877,"balance":42879,"firstname":"Tracey","lastname":"Ruiz","age":34,"gender":"F","address":"141 Tompkins Avenue","employer":"Waab","email":"traceyruiz@waab.com","city":"Zeba","state":"NM"} +{"account_number":884,"balance":29316,"firstname":"Reva","lastname":"Rosa","age":40,"gender":"M","address":"784 Greene Avenue","employer":"Urbanshee","email":"revarosa@urbanshee.com","city":"Bakersville","state":"MS"} +{"account_number":889,"balance":26464,"firstname":"Fischer","lastname":"Klein","age":38,"gender":"F","address":"948 Juliana Place","employer":"Comtext","email":"fischerklein@comtext.com","city":"Jackpot","state":"PA"} +{"account_number":891,"balance":34829,"firstname":"Jacobson","lastname":"Clemons","age":24,"gender":"F","address":"507 Wilson Street","employer":"Quilm","email":"jacobsonclemons@quilm.com","city":"Muir","state":"TX"} +{"account_number":896,"balance":31947,"firstname":"Buckley","lastname":"Peterson","age":26,"gender":"M","address":"217 Beayer Place","employer":"Earwax","email":"buckleypeterson@earwax.com","city":"Franklin","state":"DE"} +{"account_number":904,"balance":27707,"firstname":"Mendez","lastname":"Mcneil","age":26,"gender":"M","address":"431 Halsey Street","employer":"Macronaut","email":"mendezmcneil@macronaut.com","city":"Troy","state":"OK"} +{"account_number":909,"balance":18421,"firstname":"Stark","lastname":"Lewis","age":36,"gender":"M","address":"409 Tilden Avenue","employer":"Frosnex","email":"starklewis@frosnex.com","city":"Axis","state":"CA"} +{"account_number":911,"balance":42655,"firstname":"Annie","lastname":"Lyons","age":21,"gender":"M","address":"518 Woods Place","employer":"Enerforce","email":"annielyons@enerforce.com","city":"Stagecoach","state":"MA"} +{"account_number":916,"balance":47887,"firstname":"Jarvis","lastname":"Alexander","age":40,"gender":"M","address":"406 Bergen Avenue","employer":"Equitax","email":"jarvisalexander@equitax.com","city":"Haring","state":"KY"} +{"account_number":923,"balance":48466,"firstname":"Mueller","lastname":"Mckee","age":26,"gender":"M","address":"298 Ruby Street","employer":"Luxuria","email":"muellermckee@luxuria.com","city":"Coleville","state":"TN"} +{"account_number":928,"balance":19611,"firstname":"Hester","lastname":"Copeland","age":22,"gender":"F","address":"425 Cropsey Avenue","employer":"Dymi","email":"hestercopeland@dymi.com","city":"Wolcott","state":"NE"} +{"account_number":930,"balance":47257,"firstname":"Kinney","lastname":"Lawson","age":39,"gender":"M","address":"501 Raleigh Place","employer":"Neptide","email":"kinneylawson@neptide.com","city":"Deltaville","state":"MD"} +{"account_number":935,"balance":4959,"firstname":"Flowers","lastname":"Robles","age":30,"gender":"M","address":"201 Hull Street","employer":"Xelegyl","email":"flowersrobles@xelegyl.com","city":"Rehrersburg","state":"AL"} +{"account_number":942,"balance":21299,"firstname":"Hamilton","lastname":"Clayton","age":26,"gender":"M","address":"413 Debevoise Street","employer":"Architax","email":"hamiltonclayton@architax.com","city":"Terlingua","state":"NM"} +{"account_number":947,"balance":22039,"firstname":"Virgie","lastname":"Garza","age":30,"gender":"M","address":"903 Matthews Court","employer":"Plasmox","email":"virgiegarza@plasmox.com","city":"Somerset","state":"WY"} +{"account_number":954,"balance":49404,"firstname":"Jenna","lastname":"Martin","age":22,"gender":"M","address":"688 Hart Street","employer":"Zinca","email":"jennamartin@zinca.com","city":"Oasis","state":"MD"} +{"account_number":959,"balance":34743,"firstname":"Shaffer","lastname":"Cervantes","age":40,"gender":"M","address":"931 Varick Avenue","employer":"Oceanica","email":"shaffercervantes@oceanica.com","city":"Bowie","state":"AL"} +{"account_number":961,"balance":43219,"firstname":"Betsy","lastname":"Hyde","age":27,"gender":"F","address":"183 Junius Street","employer":"Tubalum","email":"betsyhyde@tubalum.com","city":"Driftwood","state":"TX"} +{"account_number":966,"balance":20619,"firstname":"Susanne","lastname":"Rodriguez","age":35,"gender":"F","address":"255 Knickerbocker Avenue","employer":"Comtrek","email":"susannerodriguez@comtrek.com","city":"Trinway","state":"TX"} +{"account_number":973,"balance":45756,"firstname":"Rice","lastname":"Farmer","age":31,"gender":"M","address":"476 Nassau Avenue","employer":"Photobin","email":"ricefarmer@photobin.com","city":"Suitland","state":"ME"} +{"account_number":978,"balance":21459,"firstname":"Melanie","lastname":"Rojas","age":33,"gender":"M","address":"991 Java Street","employer":"Kage","email":"melanierojas@kage.com","city":"Greenock","state":"VT"} +{"account_number":980,"balance":42436,"firstname":"Cash","lastname":"Collier","age":33,"gender":"F","address":"999 Sapphire Street","employer":"Ceprene","email":"cashcollier@ceprene.com","city":"Glidden","state":"AK"} +{"account_number":985,"balance":20083,"firstname":"Martin","lastname":"Gardner","age":28,"gender":"F","address":"644 Fairview Place","employer":"Golistic","email":"martingardner@golistic.com","city":"Connerton","state":"NJ"} +{"account_number":992,"balance":11413,"firstname":"Kristie","lastname":"Kennedy","age":33,"gender":"F","address":"750 Hudson Avenue","employer":"Ludak","email":"kristiekennedy@ludak.com","city":"Warsaw","state":"WY"} +{"account_number":997,"balance":25311,"firstname":"Combs","lastname":"Frederick","age":20,"gender":"M","address":"586 Lloyd Court","employer":"Pathways","email":"combsfrederick@pathways.com","city":"Williamson","state":"CA"} +{"account_number":3,"balance":44947,"firstname":"Levine","lastname":"Burks","age":26,"gender":"F","address":"328 Wilson Avenue","employer":"Amtap","email":"levineburks@amtap.com","city":"Cochranville","state":"HI"} +{"account_number":8,"balance":48868,"firstname":"Jan","lastname":"Burns","age":35,"gender":"M","address":"699 Visitation Place","employer":"Glasstep","email":"janburns@glasstep.com","city":"Wakulla","state":"AZ"} +{"account_number":10,"balance":46170,"firstname":"Dominique","lastname":"Park","age":37,"gender":"F","address":"100 Gatling Place","employer":"Conjurica","email":"dominiquepark@conjurica.com","city":"Omar","state":"NJ"} +{"account_number":15,"balance":43456,"firstname":"Bobbie","lastname":"Sexton","age":21,"gender":"M","address":"232 Sedgwick Place","employer":"Zytrex","email":"bobbiesexton@zytrex.com","city":"Hendersonville","state":"CA"} +{"account_number":22,"balance":40283,"firstname":"Barrera","lastname":"Terrell","age":23,"gender":"F","address":"292 Orange Street","employer":"Steelfab","email":"barreraterrell@steelfab.com","city":"Bynum","state":"ME"} +{"account_number":27,"balance":6176,"firstname":"Meyers","lastname":"Williamson","age":26,"gender":"F","address":"675 Henderson Walk","employer":"Plexia","email":"meyerswilliamson@plexia.com","city":"Richmond","state":"AZ"} +{"account_number":34,"balance":35379,"firstname":"Ellison","lastname":"Kim","age":30,"gender":"F","address":"986 Revere Place","employer":"Signity","email":"ellisonkim@signity.com","city":"Sehili","state":"IL"} +{"account_number":39,"balance":38688,"firstname":"Bowers","lastname":"Mendez","age":22,"gender":"F","address":"665 Bennet Court","employer":"Farmage","email":"bowersmendez@farmage.com","city":"Duryea","state":"PA"} +{"account_number":41,"balance":36060,"firstname":"Hancock","lastname":"Holden","age":20,"gender":"M","address":"625 Gaylord Drive","employer":"Poochies","email":"hancockholden@poochies.com","city":"Alamo","state":"KS"} +{"account_number":46,"balance":12351,"firstname":"Karla","lastname":"Bowman","age":23,"gender":"M","address":"554 Chapel Street","employer":"Undertap","email":"karlabowman@undertap.com","city":"Sylvanite","state":"DC"} +{"account_number":53,"balance":28101,"firstname":"Kathryn","lastname":"Payne","age":29,"gender":"F","address":"467 Louis Place","employer":"Katakana","email":"kathrynpayne@katakana.com","city":"Harviell","state":"SD"} +{"account_number":58,"balance":31697,"firstname":"Marva","lastname":"Cannon","age":40,"gender":"M","address":"993 Highland Place","employer":"Comcubine","email":"marvacannon@comcubine.com","city":"Orviston","state":"MO"} +{"account_number":60,"balance":45955,"firstname":"Maude","lastname":"Casey","age":31,"gender":"F","address":"566 Strauss Street","employer":"Quilch","email":"maudecasey@quilch.com","city":"Enlow","state":"GA"} +{"account_number":65,"balance":23282,"firstname":"Leonor","lastname":"Pruitt","age":24,"gender":"M","address":"974 Terrace Place","employer":"Velos","email":"leonorpruitt@velos.com","city":"Devon","state":"WI"} +{"account_number":72,"balance":9732,"firstname":"Barlow","lastname":"Rhodes","age":25,"gender":"F","address":"891 Clinton Avenue","employer":"Zialactic","email":"barlowrhodes@zialactic.com","city":"Echo","state":"TN"} +{"account_number":77,"balance":5724,"firstname":"Byrd","lastname":"Conley","age":24,"gender":"F","address":"698 Belmont Avenue","employer":"Zidox","email":"byrdconley@zidox.com","city":"Rockbridge","state":"SC"} +{"account_number":84,"balance":3001,"firstname":"Hutchinson","lastname":"Newton","age":34,"gender":"F","address":"553 Locust Street","employer":"Zaggles","email":"hutchinsonnewton@zaggles.com","city":"Snyderville","state":"DC"} +{"account_number":89,"balance":13263,"firstname":"Mcdowell","lastname":"Bradley","age":28,"gender":"M","address":"960 Howard Alley","employer":"Grok","email":"mcdowellbradley@grok.com","city":"Toftrees","state":"TX"} +{"account_number":91,"balance":29799,"firstname":"Vonda","lastname":"Galloway","age":20,"gender":"M","address":"988 Voorhies Avenue","employer":"Illumity","email":"vondagalloway@illumity.com","city":"Holcombe","state":"HI"} +{"account_number":96,"balance":15933,"firstname":"Shirley","lastname":"Edwards","age":38,"gender":"M","address":"817 Caton Avenue","employer":"Equitox","email":"shirleyedwards@equitox.com","city":"Nelson","state":"MA"} +{"account_number":104,"balance":32619,"firstname":"Casey","lastname":"Roth","age":29,"gender":"M","address":"963 Railroad Avenue","employer":"Hotcakes","email":"caseyroth@hotcakes.com","city":"Davenport","state":"OH"} +{"account_number":109,"balance":25812,"firstname":"Gretchen","lastname":"Dawson","age":31,"gender":"M","address":"610 Bethel Loop","employer":"Tetak","email":"gretchendawson@tetak.com","city":"Hailesboro","state":"CO"} +{"account_number":111,"balance":1481,"firstname":"Traci","lastname":"Allison","age":35,"gender":"M","address":"922 Bryant Street","employer":"Enjola","email":"traciallison@enjola.com","city":"Robinette","state":"OR"} +{"account_number":116,"balance":21335,"firstname":"Hobbs","lastname":"Wright","age":24,"gender":"M","address":"965 Temple Court","employer":"Netbook","email":"hobbswright@netbook.com","city":"Strong","state":"CA"} +{"account_number":123,"balance":3079,"firstname":"Cleo","lastname":"Beach","age":27,"gender":"F","address":"653 Haring Street","employer":"Proxsoft","email":"cleobeach@proxsoft.com","city":"Greensburg","state":"ME"} +{"account_number":128,"balance":3556,"firstname":"Mack","lastname":"Bullock","age":34,"gender":"F","address":"462 Ingraham Street","employer":"Terascape","email":"mackbullock@terascape.com","city":"Eureka","state":"PA"} +{"account_number":130,"balance":24171,"firstname":"Roxie","lastname":"Cantu","age":33,"gender":"M","address":"841 Catherine Street","employer":"Skybold","email":"roxiecantu@skybold.com","city":"Deputy","state":"NE"} +{"account_number":135,"balance":24885,"firstname":"Stevenson","lastname":"Crosby","age":40,"gender":"F","address":"473 Boardwalk ","employer":"Accel","email":"stevensoncrosby@accel.com","city":"Norris","state":"OK"} +{"account_number":142,"balance":4544,"firstname":"Vang","lastname":"Hughes","age":27,"gender":"M","address":"357 Landis Court","employer":"Bolax","email":"vanghughes@bolax.com","city":"Emerald","state":"WY"} +{"account_number":147,"balance":35921,"firstname":"Charmaine","lastname":"Whitney","age":28,"gender":"F","address":"484 Seton Place","employer":"Comveyer","email":"charmainewhitney@comveyer.com","city":"Dexter","state":"DC"} +{"account_number":154,"balance":40945,"firstname":"Burns","lastname":"Solis","age":31,"gender":"M","address":"274 Lorraine Street","employer":"Rodemco","email":"burnssolis@rodemco.com","city":"Ballico","state":"WI"} +{"account_number":159,"balance":1696,"firstname":"Alvarez","lastname":"Mack","age":22,"gender":"F","address":"897 Manor Court","employer":"Snorus","email":"alvarezmack@snorus.com","city":"Rosedale","state":"CA"} +{"account_number":161,"balance":4659,"firstname":"Doreen","lastname":"Randall","age":37,"gender":"F","address":"178 Court Street","employer":"Calcula","email":"doreenrandall@calcula.com","city":"Belmont","state":"TX"} +{"account_number":166,"balance":33847,"firstname":"Rutledge","lastname":"Rivas","age":23,"gender":"M","address":"352 Verona Street","employer":"Virxo","email":"rutledgerivas@virxo.com","city":"Brandermill","state":"NE"} +{"account_number":173,"balance":5989,"firstname":"Whitley","lastname":"Blevins","age":32,"gender":"M","address":"127 Brooklyn Avenue","employer":"Pawnagra","email":"whitleyblevins@pawnagra.com","city":"Rodanthe","state":"ND"} +{"account_number":178,"balance":36735,"firstname":"Clements","lastname":"Finley","age":39,"gender":"F","address":"270 Story Court","employer":"Imaginart","email":"clementsfinley@imaginart.com","city":"Lookingglass","state":"MN"} +{"account_number":180,"balance":34236,"firstname":"Ursula","lastname":"Goodman","age":32,"gender":"F","address":"414 Clinton Street","employer":"Earthmark","email":"ursulagoodman@earthmark.com","city":"Rote","state":"AR"} +{"account_number":185,"balance":43532,"firstname":"Laurel","lastname":"Cline","age":40,"gender":"M","address":"788 Fenimore Street","employer":"Prismatic","email":"laurelcline@prismatic.com","city":"Frank","state":"UT"} +{"account_number":192,"balance":23508,"firstname":"Ramsey","lastname":"Carr","age":31,"gender":"F","address":"209 Williamsburg Street","employer":"Strezzo","email":"ramseycarr@strezzo.com","city":"Grapeview","state":"NM"} +{"account_number":197,"balance":17246,"firstname":"Sweet","lastname":"Sanders","age":33,"gender":"F","address":"712 Homecrest Court","employer":"Isosure","email":"sweetsanders@isosure.com","city":"Sheatown","state":"VT"} +{"account_number":200,"balance":26210,"firstname":"Teri","lastname":"Hester","age":39,"gender":"M","address":"653 Abbey Court","employer":"Electonic","email":"terihester@electonic.com","city":"Martell","state":"MD"} +{"account_number":205,"balance":45493,"firstname":"Johnson","lastname":"Chang","age":28,"gender":"F","address":"331 John Street","employer":"Gleamink","email":"johnsonchang@gleamink.com","city":"Sultana","state":"KS"} +{"account_number":212,"balance":10299,"firstname":"Marisol","lastname":"Fischer","age":39,"gender":"M","address":"362 Prince Street","employer":"Autograte","email":"marisolfischer@autograte.com","city":"Oley","state":"SC"} +{"account_number":217,"balance":33730,"firstname":"Sally","lastname":"Mccoy","age":38,"gender":"F","address":"854 Corbin Place","employer":"Omnigog","email":"sallymccoy@omnigog.com","city":"Escondida","state":"FL"} +{"account_number":224,"balance":42708,"firstname":"Billie","lastname":"Nixon","age":28,"gender":"F","address":"241 Kaufman Place","employer":"Xanide","email":"billienixon@xanide.com","city":"Chapin","state":"NY"} +{"account_number":229,"balance":2740,"firstname":"Jana","lastname":"Hensley","age":30,"gender":"M","address":"176 Erasmus Street","employer":"Isotrack","email":"janahensley@isotrack.com","city":"Caledonia","state":"ME"} +{"account_number":231,"balance":46180,"firstname":"Essie","lastname":"Clarke","age":34,"gender":"F","address":"308 Harbor Lane","employer":"Pharmacon","email":"essieclarke@pharmacon.com","city":"Fillmore","state":"MS"} +{"account_number":236,"balance":41200,"firstname":"Suzanne","lastname":"Bird","age":39,"gender":"F","address":"219 Luquer Street","employer":"Imant","email":"suzannebird@imant.com","city":"Bainbridge","state":"NY"} +{"account_number":243,"balance":29902,"firstname":"Evangelina","lastname":"Perez","age":20,"gender":"M","address":"787 Joval Court","employer":"Keengen","email":"evangelinaperez@keengen.com","city":"Mulberry","state":"SD"} +{"account_number":248,"balance":49989,"firstname":"West","lastname":"England","age":36,"gender":"M","address":"717 Hendrickson Place","employer":"Obliq","email":"westengland@obliq.com","city":"Maury","state":"WA"} +{"account_number":250,"balance":27893,"firstname":"Earlene","lastname":"Ellis","age":39,"gender":"F","address":"512 Bay Street","employer":"Codact","email":"earleneellis@codact.com","city":"Sunwest","state":"GA"} +{"account_number":255,"balance":49339,"firstname":"Iva","lastname":"Rivers","age":38,"gender":"M","address":"470 Rost Place","employer":"Mantrix","email":"ivarivers@mantrix.com","city":"Disautel","state":"MD"} +{"account_number":262,"balance":30289,"firstname":"Tameka","lastname":"Levine","age":36,"gender":"F","address":"815 Atlantic Avenue","employer":"Acium","email":"tamekalevine@acium.com","city":"Winchester","state":"SD"} +{"account_number":267,"balance":42753,"firstname":"Weeks","lastname":"Castillo","age":21,"gender":"F","address":"526 Holt Court","employer":"Talendula","email":"weekscastillo@talendula.com","city":"Washington","state":"NV"} +{"account_number":274,"balance":12104,"firstname":"Frieda","lastname":"House","age":33,"gender":"F","address":"171 Banker Street","employer":"Quonk","email":"friedahouse@quonk.com","city":"Aberdeen","state":"NJ"} +{"account_number":279,"balance":15904,"firstname":"Chapman","lastname":"Hart","age":32,"gender":"F","address":"902 Bliss Terrace","employer":"Kongene","email":"chapmanhart@kongene.com","city":"Bradenville","state":"NJ"} +{"account_number":281,"balance":39830,"firstname":"Bean","lastname":"Aguirre","age":20,"gender":"F","address":"133 Pilling Street","employer":"Amril","email":"beanaguirre@amril.com","city":"Waterview","state":"TX"} +{"account_number":286,"balance":39063,"firstname":"Rosetta","lastname":"Turner","age":35,"gender":"M","address":"169 Jefferson Avenue","employer":"Spacewax","email":"rosettaturner@spacewax.com","city":"Stewart","state":"MO"} +{"account_number":293,"balance":29867,"firstname":"Cruz","lastname":"Carver","age":28,"gender":"F","address":"465 Boerum Place","employer":"Vitricomp","email":"cruzcarver@vitricomp.com","city":"Crayne","state":"CO"} +{"account_number":298,"balance":34334,"firstname":"Bullock","lastname":"Marsh","age":20,"gender":"M","address":"589 Virginia Place","employer":"Renovize","email":"bullockmarsh@renovize.com","city":"Coinjock","state":"UT"} +{"account_number":301,"balance":16782,"firstname":"Minerva","lastname":"Graham","age":35,"gender":"M","address":"532 Harrison Place","employer":"Sureplex","email":"minervagraham@sureplex.com","city":"Belleview","state":"GA"} +{"account_number":306,"balance":2171,"firstname":"Hensley","lastname":"Hardin","age":40,"gender":"M","address":"196 Maujer Street","employer":"Neocent","email":"hensleyhardin@neocent.com","city":"Reinerton","state":"HI"} +{"account_number":313,"balance":34108,"firstname":"Alston","lastname":"Henderson","age":36,"gender":"F","address":"132 Prescott Place","employer":"Prosure","email":"alstonhenderson@prosure.com","city":"Worton","state":"IA"} +{"account_number":318,"balance":8512,"firstname":"Nichole","lastname":"Pearson","age":34,"gender":"F","address":"656 Lacon Court","employer":"Yurture","email":"nicholepearson@yurture.com","city":"Juarez","state":"MO"} +{"account_number":320,"balance":34521,"firstname":"Patti","lastname":"Brennan","age":37,"gender":"F","address":"870 Degraw Street","employer":"Cognicode","email":"pattibrennan@cognicode.com","city":"Torboy","state":"FL"} +{"account_number":325,"balance":1956,"firstname":"Magdalena","lastname":"Simmons","age":25,"gender":"F","address":"681 Townsend Street","employer":"Geekosis","email":"magdalenasimmons@geekosis.com","city":"Sterling","state":"CA"} +{"account_number":332,"balance":37770,"firstname":"Shepherd","lastname":"Davenport","age":28,"gender":"F","address":"586 Montague Terrace","employer":"Ecraze","email":"shepherddavenport@ecraze.com","city":"Accoville","state":"NM"} +{"account_number":337,"balance":43432,"firstname":"Monroe","lastname":"Stafford","age":37,"gender":"F","address":"183 Seigel Street","employer":"Centuria","email":"monroestafford@centuria.com","city":"Camino","state":"DE"} +{"account_number":344,"balance":42654,"firstname":"Sasha","lastname":"Baxter","age":35,"gender":"F","address":"700 Bedford Place","employer":"Callflex","email":"sashabaxter@callflex.com","city":"Campo","state":"MI"} +{"account_number":349,"balance":24180,"firstname":"Allison","lastname":"Fitzpatrick","age":22,"gender":"F","address":"913 Arlington Avenue","employer":"Veraq","email":"allisonfitzpatrick@veraq.com","city":"Marbury","state":"TX"} +{"account_number":351,"balance":47089,"firstname":"Hendrix","lastname":"Stephens","age":29,"gender":"M","address":"181 Beaver Street","employer":"Recrisys","email":"hendrixstephens@recrisys.com","city":"Denio","state":"OR"} +{"account_number":356,"balance":34540,"firstname":"Lourdes","lastname":"Valdez","age":20,"gender":"F","address":"700 Anchorage Place","employer":"Interloo","email":"lourdesvaldez@interloo.com","city":"Goldfield","state":"OK"} +{"account_number":363,"balance":34007,"firstname":"Peggy","lastname":"Bright","age":21,"gender":"M","address":"613 Engert Avenue","employer":"Inventure","email":"peggybright@inventure.com","city":"Chautauqua","state":"ME"} +{"account_number":368,"balance":23535,"firstname":"Hooper","lastname":"Tyson","age":39,"gender":"M","address":"892 Taaffe Place","employer":"Zaggle","email":"hoopertyson@zaggle.com","city":"Nutrioso","state":"ME"} +{"account_number":370,"balance":28499,"firstname":"Oneill","lastname":"Carney","age":25,"gender":"F","address":"773 Adelphi Street","employer":"Bedder","email":"oneillcarney@bedder.com","city":"Yorklyn","state":"FL"} +{"account_number":375,"balance":23860,"firstname":"Phoebe","lastname":"Patton","age":25,"gender":"M","address":"564 Hale Avenue","employer":"Xoggle","email":"phoebepatton@xoggle.com","city":"Brule","state":"NM"} +{"account_number":382,"balance":42061,"firstname":"Finley","lastname":"Singleton","age":37,"gender":"F","address":"407 Clay Street","employer":"Quarex","email":"finleysingleton@quarex.com","city":"Bedias","state":"LA"} +{"account_number":387,"balance":35916,"firstname":"April","lastname":"Hill","age":29,"gender":"M","address":"818 Bayard Street","employer":"Kengen","email":"aprilhill@kengen.com","city":"Chloride","state":"NC"} +{"account_number":394,"balance":6121,"firstname":"Lorrie","lastname":"Nunez","age":38,"gender":"M","address":"221 Ralph Avenue","employer":"Bullzone","email":"lorrienunez@bullzone.com","city":"Longoria","state":"ID"} +{"account_number":399,"balance":32587,"firstname":"Carmela","lastname":"Franks","age":23,"gender":"M","address":"617 Dewey Place","employer":"Zensure","email":"carmelafranks@zensure.com","city":"Sanders","state":"DC"} +{"account_number":402,"balance":1282,"firstname":"Pacheco","lastname":"Rosales","age":32,"gender":"M","address":"538 Pershing Loop","employer":"Circum","email":"pachecorosales@circum.com","city":"Elbert","state":"ID"} +{"account_number":407,"balance":36417,"firstname":"Gilda","lastname":"Jacobson","age":29,"gender":"F","address":"883 Loring Avenue","employer":"Comveyor","email":"gildajacobson@comveyor.com","city":"Topaz","state":"NH"} +{"account_number":414,"balance":17506,"firstname":"Conway","lastname":"Daugherty","age":37,"gender":"F","address":"643 Kermit Place","employer":"Lyria","email":"conwaydaugherty@lyria.com","city":"Vaughn","state":"NV"} +{"account_number":419,"balance":34847,"firstname":"Helen","lastname":"Montoya","age":29,"gender":"F","address":"736 Kingsland Avenue","employer":"Hairport","email":"helenmontoya@hairport.com","city":"Edinburg","state":"NE"} +{"account_number":421,"balance":46868,"firstname":"Tamika","lastname":"Mccall","age":27,"gender":"F","address":"764 Bragg Court","employer":"Eventix","email":"tamikamccall@eventix.com","city":"Tivoli","state":"RI"} +{"account_number":426,"balance":4499,"firstname":"Julie","lastname":"Parsons","age":31,"gender":"M","address":"768 Keap Street","employer":"Goko","email":"julieparsons@goko.com","city":"Coldiron","state":"VA"} +{"account_number":433,"balance":19266,"firstname":"Wilkinson","lastname":"Flowers","age":39,"gender":"M","address":"154 Douglass Street","employer":"Xsports","email":"wilkinsonflowers@xsports.com","city":"Coultervillle","state":"MN"} +{"account_number":438,"balance":16367,"firstname":"Walter","lastname":"Velez","age":27,"gender":"F","address":"931 Farragut Road","employer":"Virva","email":"waltervelez@virva.com","city":"Tyro","state":"WV"} +{"account_number":440,"balance":41590,"firstname":"Ray","lastname":"Wiley","age":31,"gender":"F","address":"102 Barwell Terrace","employer":"Polaria","email":"raywiley@polaria.com","city":"Hardyville","state":"IA"} +{"account_number":445,"balance":41178,"firstname":"Rodriguez","lastname":"Macias","age":34,"gender":"M","address":"164 Boerum Street","employer":"Xylar","email":"rodriguezmacias@xylar.com","city":"Riner","state":"AL"} +{"account_number":452,"balance":3589,"firstname":"Blackwell","lastname":"Delaney","age":39,"gender":"F","address":"443 Sackett Street","employer":"Imkan","email":"blackwelldelaney@imkan.com","city":"Gasquet","state":"DC"} +{"account_number":457,"balance":14057,"firstname":"Bush","lastname":"Gordon","age":34,"gender":"M","address":"975 Dakota Place","employer":"Softmicro","email":"bushgordon@softmicro.com","city":"Chemung","state":"PA"} +{"account_number":464,"balance":20504,"firstname":"Cobb","lastname":"Humphrey","age":21,"gender":"M","address":"823 Sunnyside Avenue","employer":"Apexia","email":"cobbhumphrey@apexia.com","city":"Wintersburg","state":"NY"} +{"account_number":469,"balance":26509,"firstname":"Marci","lastname":"Shepherd","age":26,"gender":"M","address":"565 Hall Street","employer":"Shadease","email":"marcishepherd@shadease.com","city":"Springhill","state":"IL"} +{"account_number":471,"balance":7629,"firstname":"Juana","lastname":"Silva","age":36,"gender":"M","address":"249 Amity Street","employer":"Artworlds","email":"juanasilva@artworlds.com","city":"Norfolk","state":"TX"} +{"account_number":476,"balance":33386,"firstname":"Silva","lastname":"Marks","age":31,"gender":"F","address":"183 Eldert Street","employer":"Medifax","email":"silvamarks@medifax.com","city":"Hachita","state":"RI"} +{"account_number":483,"balance":6344,"firstname":"Kelley","lastname":"Harper","age":29,"gender":"M","address":"758 Preston Court","employer":"Xyqag","email":"kelleyharper@xyqag.com","city":"Healy","state":"IA"} +{"account_number":488,"balance":6289,"firstname":"Wilma","lastname":"Hopkins","age":38,"gender":"M","address":"428 Lee Avenue","employer":"Entality","email":"wilmahopkins@entality.com","city":"Englevale","state":"WI"} +{"account_number":490,"balance":1447,"firstname":"Strong","lastname":"Hendrix","age":26,"gender":"F","address":"134 Beach Place","employer":"Duoflex","email":"stronghendrix@duoflex.com","city":"Allentown","state":"ND"} +{"account_number":495,"balance":13478,"firstname":"Abigail","lastname":"Nichols","age":40,"gender":"F","address":"887 President Street","employer":"Enquility","email":"abigailnichols@enquility.com","city":"Bagtown","state":"NM"} +{"account_number":503,"balance":42649,"firstname":"Leta","lastname":"Stout","age":39,"gender":"F","address":"518 Bowery Street","employer":"Pivitol","email":"letastout@pivitol.com","city":"Boonville","state":"ND"} +{"account_number":508,"balance":41300,"firstname":"Lawrence","lastname":"Mathews","age":27,"gender":"F","address":"987 Rose Street","employer":"Deviltoe","email":"lawrencemathews@deviltoe.com","city":"Woodburn","state":"FL"} +{"account_number":510,"balance":48504,"firstname":"Petty","lastname":"Sykes","age":28,"gender":"M","address":"566 Village Road","employer":"Nebulean","email":"pettysykes@nebulean.com","city":"Wedgewood","state":"MO"} +{"account_number":515,"balance":18531,"firstname":"Lott","lastname":"Keller","age":27,"gender":"M","address":"827 Miami Court","employer":"Translink","email":"lottkeller@translink.com","city":"Gila","state":"TX"} +{"account_number":522,"balance":19879,"firstname":"Faulkner","lastname":"Garrett","age":29,"gender":"F","address":"396 Grove Place","employer":"Pigzart","email":"faulknergarrett@pigzart.com","city":"Felt","state":"AR"} +{"account_number":527,"balance":2028,"firstname":"Carver","lastname":"Peters","age":35,"gender":"M","address":"816 Victor Road","employer":"Housedown","email":"carverpeters@housedown.com","city":"Nadine","state":"MD"} +{"account_number":534,"balance":20470,"firstname":"Cristina","lastname":"Russo","age":25,"gender":"F","address":"500 Highlawn Avenue","employer":"Cyclonica","email":"cristinarusso@cyclonica.com","city":"Gorst","state":"KS"} +{"account_number":539,"balance":24560,"firstname":"Tami","lastname":"Maddox","age":23,"gender":"F","address":"741 Pineapple Street","employer":"Accidency","email":"tamimaddox@accidency.com","city":"Kennedyville","state":"OH"} +{"account_number":541,"balance":42915,"firstname":"Logan","lastname":"Burke","age":32,"gender":"M","address":"904 Clarendon Road","employer":"Overplex","email":"loganburke@overplex.com","city":"Johnsonburg","state":"OH"} +{"account_number":546,"balance":43242,"firstname":"Bernice","lastname":"Sims","age":33,"gender":"M","address":"382 Columbia Street","employer":"Verbus","email":"bernicesims@verbus.com","city":"Sena","state":"KY"} +{"account_number":553,"balance":28390,"firstname":"Aimee","lastname":"Cohen","age":28,"gender":"M","address":"396 Lafayette Avenue","employer":"Eplode","email":"aimeecohen@eplode.com","city":"Thatcher","state":"NJ"} +{"account_number":558,"balance":8922,"firstname":"Horne","lastname":"Valenzuela","age":20,"gender":"F","address":"979 Kensington Street","employer":"Isoternia","email":"hornevalenzuela@isoternia.com","city":"Greenbush","state":"NC"} +{"account_number":560,"balance":24514,"firstname":"Felecia","lastname":"Oneill","age":26,"gender":"M","address":"995 Autumn Avenue","employer":"Mediot","email":"feleciaoneill@mediot.com","city":"Joppa","state":"IN"} +{"account_number":565,"balance":15197,"firstname":"Taylor","lastname":"Ingram","age":37,"gender":"F","address":"113 Will Place","employer":"Lyrichord","email":"tayloringram@lyrichord.com","city":"Collins","state":"ME"} +{"account_number":572,"balance":49355,"firstname":"Therese","lastname":"Espinoza","age":20,"gender":"M","address":"994 Chester Court","employer":"Gonkle","email":"thereseespinoza@gonkle.com","city":"Hayes","state":"UT"} +{"account_number":577,"balance":21398,"firstname":"Gilbert","lastname":"Serrano","age":38,"gender":"F","address":"294 Troutman Street","employer":"Senmao","email":"gilbertserrano@senmao.com","city":"Greer","state":"MT"} +{"account_number":584,"balance":5346,"firstname":"Pearson","lastname":"Bryant","age":40,"gender":"F","address":"971 Heyward Street","employer":"Anacho","email":"pearsonbryant@anacho.com","city":"Bluffview","state":"MN"} +{"account_number":589,"balance":33260,"firstname":"Ericka","lastname":"Cote","age":39,"gender":"F","address":"425 Bath Avenue","employer":"Venoflex","email":"erickacote@venoflex.com","city":"Blue","state":"CT"} +{"account_number":591,"balance":48997,"firstname":"Rivers","lastname":"Macdonald","age":34,"gender":"F","address":"919 Johnson Street","employer":"Ziore","email":"riversmacdonald@ziore.com","city":"Townsend","state":"IL"} +{"account_number":596,"balance":4063,"firstname":"Letitia","lastname":"Walker","age":26,"gender":"F","address":"963 Vanderveer Place","employer":"Zizzle","email":"letitiawalker@zizzle.com","city":"Rossmore","state":"ID"} +{"account_number":604,"balance":10675,"firstname":"Isabel","lastname":"Gilliam","age":23,"gender":"M","address":"854 Broadway ","employer":"Zenthall","email":"isabelgilliam@zenthall.com","city":"Ventress","state":"WI"} +{"account_number":609,"balance":28586,"firstname":"Montgomery","lastname":"Washington","age":30,"gender":"M","address":"169 Schroeders Avenue","employer":"Kongle","email":"montgomerywashington@kongle.com","city":"Croom","state":"AZ"} +{"account_number":611,"balance":17528,"firstname":"Katherine","lastname":"Prince","age":33,"gender":"F","address":"705 Elm Avenue","employer":"Zillacon","email":"katherineprince@zillacon.com","city":"Rew","state":"MI"} +{"account_number":616,"balance":25276,"firstname":"Jessie","lastname":"Mayer","age":35,"gender":"F","address":"683 Chester Avenue","employer":"Emtrak","email":"jessiemayer@emtrak.com","city":"Marysville","state":"HI"} +{"account_number":623,"balance":20514,"firstname":"Rose","lastname":"Combs","age":32,"gender":"F","address":"312 Grimes Road","employer":"Aquamate","email":"rosecombs@aquamate.com","city":"Fostoria","state":"OH"} +{"account_number":628,"balance":42736,"firstname":"Buckner","lastname":"Chen","age":37,"gender":"M","address":"863 Rugby Road","employer":"Jamnation","email":"bucknerchen@jamnation.com","city":"Camas","state":"TX"} +{"account_number":630,"balance":46060,"firstname":"Leanne","lastname":"Jones","age":31,"gender":"M","address":"451 Bayview Avenue","employer":"Wazzu","email":"leannejones@wazzu.com","city":"Kylertown","state":"OK"} +{"account_number":635,"balance":44705,"firstname":"Norman","lastname":"Gilmore","age":33,"gender":"M","address":"330 Gates Avenue","employer":"Comfirm","email":"normangilmore@comfirm.com","city":"Riceville","state":"TN"} +{"account_number":642,"balance":32852,"firstname":"Reyna","lastname":"Harris","age":35,"gender":"M","address":"305 Powell Street","employer":"Bedlam","email":"reynaharris@bedlam.com","city":"Florence","state":"KS"} +{"account_number":647,"balance":10147,"firstname":"Annabelle","lastname":"Velazquez","age":30,"gender":"M","address":"299 Kensington Walk","employer":"Sealoud","email":"annabellevelazquez@sealoud.com","city":"Soudan","state":"ME"} +{"account_number":654,"balance":38695,"firstname":"Armstrong","lastname":"Frazier","age":25,"gender":"M","address":"899 Seeley Street","employer":"Zensor","email":"armstrongfrazier@zensor.com","city":"Cherokee","state":"UT"} +{"account_number":659,"balance":29648,"firstname":"Dorsey","lastname":"Sosa","age":40,"gender":"M","address":"270 Aberdeen Street","employer":"Daycore","email":"dorseysosa@daycore.com","city":"Chamberino","state":"SC"} +{"account_number":661,"balance":3679,"firstname":"Joanne","lastname":"Spencer","age":39,"gender":"F","address":"910 Montauk Avenue","employer":"Visalia","email":"joannespencer@visalia.com","city":"Valmy","state":"NH"} +{"account_number":666,"balance":13880,"firstname":"Mcguire","lastname":"Lloyd","age":40,"gender":"F","address":"658 Just Court","employer":"Centrexin","email":"mcguirelloyd@centrexin.com","city":"Warren","state":"MT"} +{"account_number":673,"balance":11303,"firstname":"Mcdaniel","lastname":"Harrell","age":33,"gender":"M","address":"565 Montgomery Place","employer":"Eyeris","email":"mcdanielharrell@eyeris.com","city":"Garnet","state":"NV"} +{"account_number":678,"balance":43663,"firstname":"Ruby","lastname":"Shaffer","age":28,"gender":"M","address":"350 Clark Street","employer":"Comtrail","email":"rubyshaffer@comtrail.com","city":"Aurora","state":"MA"} +{"account_number":680,"balance":31561,"firstname":"Melton","lastname":"Camacho","age":32,"gender":"F","address":"771 Montana Place","employer":"Insuresys","email":"meltoncamacho@insuresys.com","city":"Sparkill","state":"IN"} +{"account_number":685,"balance":22249,"firstname":"Yesenia","lastname":"Rowland","age":24,"gender":"F","address":"193 Dekalb Avenue","employer":"Coriander","email":"yeseniarowland@coriander.com","city":"Lupton","state":"NC"} +{"account_number":692,"balance":10435,"firstname":"Haney","lastname":"Barlow","age":21,"gender":"F","address":"267 Lenox Road","employer":"Egypto","email":"haneybarlow@egypto.com","city":"Detroit","state":"IN"} +{"account_number":697,"balance":48745,"firstname":"Mallory","lastname":"Emerson","age":24,"gender":"F","address":"318 Dunne Court","employer":"Exoplode","email":"malloryemerson@exoplode.com","city":"Montura","state":"LA"} +{"account_number":700,"balance":19164,"firstname":"Patel","lastname":"Durham","age":21,"gender":"F","address":"440 King Street","employer":"Icology","email":"pateldurham@icology.com","city":"Mammoth","state":"IL"} +{"account_number":705,"balance":28415,"firstname":"Krystal","lastname":"Cross","age":22,"gender":"M","address":"604 Drew Street","employer":"Tubesys","email":"krystalcross@tubesys.com","city":"Dalton","state":"MO"} +{"account_number":712,"balance":12459,"firstname":"Butler","lastname":"Alston","age":37,"gender":"M","address":"486 Hemlock Street","employer":"Quordate","email":"butleralston@quordate.com","city":"Verdi","state":"MS"} +{"account_number":717,"balance":29270,"firstname":"Erickson","lastname":"Mcdonald","age":31,"gender":"M","address":"873 Franklin Street","employer":"Exotechno","email":"ericksonmcdonald@exotechno.com","city":"Jessie","state":"MS"} +{"account_number":724,"balance":12548,"firstname":"Hopper","lastname":"Peck","age":31,"gender":"M","address":"849 Hendrickson Street","employer":"Uxmox","email":"hopperpeck@uxmox.com","city":"Faxon","state":"UT"} +{"account_number":729,"balance":41812,"firstname":"Katy","lastname":"Rivera","age":36,"gender":"F","address":"791 Olive Street","employer":"Blurrybus","email":"katyrivera@blurrybus.com","city":"Innsbrook","state":"MI"} +{"account_number":731,"balance":4994,"firstname":"Lorene","lastname":"Weiss","age":35,"gender":"M","address":"990 Ocean Court","employer":"Comvoy","email":"loreneweiss@comvoy.com","city":"Lavalette","state":"WI"} +{"account_number":736,"balance":28677,"firstname":"Rogers","lastname":"Mcmahon","age":21,"gender":"F","address":"423 Cameron Court","employer":"Brainclip","email":"rogersmcmahon@brainclip.com","city":"Saddlebrooke","state":"FL"} +{"account_number":743,"balance":14077,"firstname":"Susana","lastname":"Moody","age":23,"gender":"M","address":"842 Fountain Avenue","employer":"Bitrex","email":"susanamoody@bitrex.com","city":"Temperanceville","state":"TN"} +{"account_number":748,"balance":38060,"firstname":"Ford","lastname":"Branch","age":25,"gender":"M","address":"926 Cypress Avenue","employer":"Buzzness","email":"fordbranch@buzzness.com","city":"Beason","state":"DC"} +{"account_number":750,"balance":40481,"firstname":"Cherie","lastname":"Brooks","age":20,"gender":"F","address":"601 Woodhull Street","employer":"Kaggle","email":"cheriebrooks@kaggle.com","city":"Groton","state":"MA"} +{"account_number":755,"balance":43878,"firstname":"Bartlett","lastname":"Conway","age":22,"gender":"M","address":"453 Times Placez","employer":"Konnect","email":"bartlettconway@konnect.com","city":"Belva","state":"VT"} +{"account_number":762,"balance":10291,"firstname":"Amanda","lastname":"Head","age":20,"gender":"F","address":"990 Ocean Parkway","employer":"Zentury","email":"amandahead@zentury.com","city":"Hegins","state":"AR"} +{"account_number":767,"balance":26220,"firstname":"Anthony","lastname":"Sutton","age":27,"gender":"F","address":"179 Fayette Street","employer":"Xiix","email":"anthonysutton@xiix.com","city":"Iberia","state":"TN"} +{"account_number":774,"balance":35287,"firstname":"Lynnette","lastname":"Alvarez","age":38,"gender":"F","address":"991 Brightwater Avenue","employer":"Gink","email":"lynnettealvarez@gink.com","city":"Leola","state":"NC"} +{"account_number":779,"balance":40983,"firstname":"Maggie","lastname":"Pace","age":32,"gender":"F","address":"104 Harbor Court","employer":"Bulljuice","email":"maggiepace@bulljuice.com","city":"Floris","state":"MA"} +{"account_number":781,"balance":29961,"firstname":"Sanford","lastname":"Mullen","age":26,"gender":"F","address":"879 Dover Street","employer":"Zanity","email":"sanfordmullen@zanity.com","city":"Martinez","state":"TX"} +{"account_number":786,"balance":3024,"firstname":"Rene","lastname":"Vang","age":33,"gender":"M","address":"506 Randolph Street","employer":"Isopop","email":"renevang@isopop.com","city":"Vienna","state":"NJ"} +{"account_number":793,"balance":16911,"firstname":"Alford","lastname":"Compton","age":36,"gender":"M","address":"186 Veronica Place","employer":"Zyple","email":"alfordcompton@zyple.com","city":"Sugartown","state":"AK"} +{"account_number":798,"balance":3165,"firstname":"Catherine","lastname":"Ward","age":30,"gender":"F","address":"325 Burnett Street","employer":"Dreamia","email":"catherineward@dreamia.com","city":"Glenbrook","state":"SD"} +{"account_number":801,"balance":14954,"firstname":"Molly","lastname":"Maldonado","age":37,"gender":"M","address":"518 Maple Avenue","employer":"Straloy","email":"mollymaldonado@straloy.com","city":"Hebron","state":"WI"} +{"account_number":806,"balance":36492,"firstname":"Carson","lastname":"Riddle","age":31,"gender":"M","address":"984 Lois Avenue","employer":"Terrago","email":"carsonriddle@terrago.com","city":"Leland","state":"MN"} +{"account_number":813,"balance":30833,"firstname":"Ebony","lastname":"Bishop","age":20,"gender":"M","address":"487 Ridge Court","employer":"Optique","email":"ebonybishop@optique.com","city":"Fairmount","state":"WA"} +{"account_number":818,"balance":24433,"firstname":"Espinoza","lastname":"Petersen","age":26,"gender":"M","address":"641 Glenwood Road","employer":"Futurity","email":"espinozapetersen@futurity.com","city":"Floriston","state":"MD"} +{"account_number":820,"balance":1011,"firstname":"Shepard","lastname":"Ramsey","age":24,"gender":"F","address":"806 Village Court","employer":"Mantro","email":"shepardramsey@mantro.com","city":"Tibbie","state":"NV"} +{"account_number":825,"balance":49000,"firstname":"Terra","lastname":"Witt","age":21,"gender":"F","address":"590 Conway Street","employer":"Insectus","email":"terrawitt@insectus.com","city":"Forbestown","state":"AR"} +{"account_number":832,"balance":8582,"firstname":"Laura","lastname":"Gibbs","age":39,"gender":"F","address":"511 Osborn Street","employer":"Corepan","email":"lauragibbs@corepan.com","city":"Worcester","state":"KS"} +{"account_number":837,"balance":14485,"firstname":"Amy","lastname":"Villarreal","age":35,"gender":"M","address":"381 Stillwell Place","employer":"Fleetmix","email":"amyvillarreal@fleetmix.com","city":"Sanford","state":"IA"} +{"account_number":844,"balance":26840,"firstname":"Jill","lastname":"David","age":31,"gender":"M","address":"346 Legion Street","employer":"Zytrax","email":"jilldavid@zytrax.com","city":"Saticoy","state":"SC"} +{"account_number":849,"balance":16200,"firstname":"Barry","lastname":"Chapman","age":26,"gender":"M","address":"931 Dekoven Court","employer":"Darwinium","email":"barrychapman@darwinium.com","city":"Whitestone","state":"WY"} +{"account_number":851,"balance":22026,"firstname":"Henderson","lastname":"Price","age":33,"gender":"F","address":"530 Hausman Street","employer":"Plutorque","email":"hendersonprice@plutorque.com","city":"Brutus","state":"RI"} +{"account_number":856,"balance":27583,"firstname":"Alissa","lastname":"Knox","age":25,"gender":"M","address":"258 Empire Boulevard","employer":"Geologix","email":"alissaknox@geologix.com","city":"Hartsville/Hartley","state":"MN"} +{"account_number":863,"balance":23165,"firstname":"Melendez","lastname":"Fernandez","age":40,"gender":"M","address":"661 Johnson Avenue","employer":"Vixo","email":"melendezfernandez@vixo.com","city":"Farmers","state":"IL"} +{"account_number":868,"balance":27624,"firstname":"Polly","lastname":"Barron","age":22,"gender":"M","address":"129 Frank Court","employer":"Geofarm","email":"pollybarron@geofarm.com","city":"Loyalhanna","state":"ND"} +{"account_number":870,"balance":43882,"firstname":"Goff","lastname":"Phelps","age":21,"gender":"M","address":"164 Montague Street","employer":"Digigen","email":"goffphelps@digigen.com","city":"Weedville","state":"IL"} +{"account_number":875,"balance":19655,"firstname":"Mercer","lastname":"Pratt","age":24,"gender":"M","address":"608 Perry Place","employer":"Twiggery","email":"mercerpratt@twiggery.com","city":"Eggertsville","state":"MO"} +{"account_number":882,"balance":10895,"firstname":"Mari","lastname":"Landry","age":39,"gender":"M","address":"963 Gerald Court","employer":"Kenegy","email":"marilandry@kenegy.com","city":"Lithium","state":"NC"} +{"account_number":887,"balance":31772,"firstname":"Eunice","lastname":"Watts","age":36,"gender":"F","address":"707 Stuyvesant Avenue","employer":"Memora","email":"eunicewatts@memora.com","city":"Westwood","state":"TN"} +{"account_number":894,"balance":1031,"firstname":"Tyler","lastname":"Fitzgerald","age":32,"gender":"M","address":"787 Meserole Street","employer":"Jetsilk","email":"tylerfitzgerald@jetsilk.com","city":"Woodlands","state":"WV"} +{"account_number":899,"balance":32953,"firstname":"Carney","lastname":"Callahan","age":23,"gender":"M","address":"724 Kimball Street","employer":"Mangelica","email":"carneycallahan@mangelica.com","city":"Tecolotito","state":"MT"} +{"account_number":902,"balance":13345,"firstname":"Hallie","lastname":"Jarvis","age":23,"gender":"F","address":"237 Duryea Court","employer":"Anixang","email":"halliejarvis@anixang.com","city":"Boykin","state":"IN"} +{"account_number":907,"balance":12961,"firstname":"Ingram","lastname":"William","age":36,"gender":"M","address":"826 Overbaugh Place","employer":"Genmex","email":"ingramwilliam@genmex.com","city":"Kimmell","state":"AK"} +{"account_number":914,"balance":7120,"firstname":"Esther","lastname":"Bean","age":32,"gender":"F","address":"583 Macon Street","employer":"Applica","email":"estherbean@applica.com","city":"Homeworth","state":"MN"} +{"account_number":919,"balance":39655,"firstname":"Shauna","lastname":"Hanson","age":27,"gender":"M","address":"557 Hart Place","employer":"Exospace","email":"shaunahanson@exospace.com","city":"Outlook","state":"LA"} +{"account_number":921,"balance":49119,"firstname":"Barbara","lastname":"Wade","age":29,"gender":"M","address":"687 Hoyts Lane","employer":"Roughies","email":"barbarawade@roughies.com","city":"Sattley","state":"CO"} +{"account_number":926,"balance":49433,"firstname":"Welch","lastname":"Mcgowan","age":21,"gender":"M","address":"833 Quincy Street","employer":"Atomica","email":"welchmcgowan@atomica.com","city":"Hampstead","state":"VT"} +{"account_number":933,"balance":18071,"firstname":"Tabitha","lastname":"Cole","age":21,"gender":"F","address":"916 Rogers Avenue","employer":"Eclipto","email":"tabithacole@eclipto.com","city":"Lawrence","state":"TX"} +{"account_number":938,"balance":9597,"firstname":"Sharron","lastname":"Santos","age":40,"gender":"F","address":"215 Matthews Place","employer":"Zenco","email":"sharronsantos@zenco.com","city":"Wattsville","state":"VT"} +{"account_number":940,"balance":23285,"firstname":"Melinda","lastname":"Mendoza","age":38,"gender":"M","address":"806 Kossuth Place","employer":"Kneedles","email":"melindamendoza@kneedles.com","city":"Coaldale","state":"OK"} +{"account_number":945,"balance":23085,"firstname":"Hansen","lastname":"Hebert","age":33,"gender":"F","address":"287 Conduit Boulevard","employer":"Capscreen","email":"hansenhebert@capscreen.com","city":"Taycheedah","state":"AK"} +{"account_number":952,"balance":21430,"firstname":"Angelique","lastname":"Weeks","age":33,"gender":"M","address":"659 Reeve Place","employer":"Exodoc","email":"angeliqueweeks@exodoc.com","city":"Turpin","state":"MD"} +{"account_number":957,"balance":11373,"firstname":"Michael","lastname":"Giles","age":31,"gender":"M","address":"668 Court Square","employer":"Yogasm","email":"michaelgiles@yogasm.com","city":"Rosburg","state":"WV"} +{"account_number":964,"balance":26154,"firstname":"Elena","lastname":"Waller","age":34,"gender":"F","address":"618 Crystal Street","employer":"Insurety","email":"elenawaller@insurety.com","city":"Gallina","state":"NY"} +{"account_number":969,"balance":22214,"firstname":"Briggs","lastname":"Lynn","age":30,"gender":"M","address":"952 Lester Court","employer":"Quinex","email":"briggslynn@quinex.com","city":"Roland","state":"ID"} +{"account_number":971,"balance":22772,"firstname":"Gabrielle","lastname":"Reilly","age":32,"gender":"F","address":"964 Tudor Terrace","employer":"Blanet","email":"gabriellereilly@blanet.com","city":"Falmouth","state":"AL"} +{"account_number":976,"balance":31707,"firstname":"Mullen","lastname":"Tanner","age":26,"gender":"M","address":"711 Whitney Avenue","employer":"Pulze","email":"mullentanner@pulze.com","city":"Mooresburg","state":"MA"} +{"account_number":983,"balance":47205,"firstname":"Mattie","lastname":"Eaton","age":24,"gender":"F","address":"418 Allen Avenue","employer":"Trasola","email":"mattieeaton@trasola.com","city":"Dupuyer","state":"NJ"} +{"account_number":988,"balance":17803,"firstname":"Lucy","lastname":"Castro","age":34,"gender":"F","address":"425 Fleet Walk","employer":"Geekfarm","email":"lucycastro@geekfarm.com","city":"Mulino","state":"VA"} +{"account_number":990,"balance":44456,"firstname":"Kelly","lastname":"Steele","age":35,"gender":"M","address":"809 Hoyt Street","employer":"Eschoir","email":"kellysteele@eschoir.com","city":"Stewartville","state":"ID"} +{"account_number":995,"balance":21153,"firstname":"Phelps","lastname":"Parrish","age":25,"gender":"M","address":"666 Miller Place","employer":"Pearlessa","email":"phelpsparrish@pearlessa.com","city":"Brecon","state":"ME"} diff --git a/sql-cli/tests/test_esconnection.py b/sql-cli/tests/test_esconnection.py new file mode 100644 index 0000000000..95d30f6426 --- /dev/null +++ b/sql-cli/tests/test_esconnection.py @@ -0,0 +1,148 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import pytest +import mock +from textwrap import dedent + +from elasticsearch.exceptions import ConnectionError +from elasticsearch import Elasticsearch, RequestsHttpConnection + +from .utils import estest, load_data, run, TEST_INDEX_NAME +from src.odfe_sql_cli.esconnection import ESConnection + +INVALID_ENDPOINT = "http://invalid:9200" +OPEN_DISTRO_ENDPOINT = "https://opedistro:9200" +AES_ENDPOINT = "https://fake.es.amazonaws.com" +AUTH = ("username", "password") + + +class TestExecutor: + def load_data_to_es(self, connection): + doc = {"a": "aws"} + load_data(connection, doc) + + @estest + def test_query(self, connection): + self.load_data_to_es(connection) + + assert run(connection, "select * from %s" % TEST_INDEX_NAME) == dedent( + """\ + fetched rows / total rows = 1/1 + +-----+ + | a | + |-----| + | aws | + +-----+""" + ) + + @estest + def test_query_nonexistent_index(self, connection): + self.load_data_to_es(connection) + + expected = { + "reason": "Error occurred in Elasticsearch engine: no such index [non-existed]", + "details": "org.elasticsearch.index.IndexNotFoundException: no such index [non-existed]\nFor more " + "details, please send request for Json format to see the raw response from elasticsearch " + "engine.", + "type": "IndexNotFoundException", + } + + with mock.patch("src.odfe_sql_cli.esconnection.click.secho") as mock_secho: + run(connection, "select * from non-existed") + + mock_secho.assert_called_with(message=str(expected), fg="red") + + def test_connection_fail(self): + test_executor = ESConnection(endpoint=INVALID_ENDPOINT) + err_message = "Can not connect to endpoint %s" % INVALID_ENDPOINT + + with mock.patch("sys.exit") as mock_sys_exit, mock.patch("src.odfe_sql_cli.esconnection.click.secho") as mock_secho: + test_executor.set_connection() + + mock_sys_exit.assert_called() + mock_secho.assert_called_with(message=err_message, fg="red") + + def test_lost_connection(self): + test_esexecutor = ESConnection(endpoint=INVALID_ENDPOINT) + + def side_effect_set_connection(is_reconnected): + if is_reconnected: + pass + else: + return ConnectionError() + + with mock.patch("src.odfe_sql_cli.esconnection.click.secho") as mock_secho, mock.patch.object( + test_esexecutor, "set_connection" + ) as mock_set_connection: + # Assume reconnection success + mock_set_connection.side_effect = side_effect_set_connection(is_reconnected=True) + test_esexecutor.handle_server_close_connection() + + mock_secho.assert_any_call(message="Reconnecting...", fg="green") + mock_secho.assert_any_call(message="Reconnected! Please run query again", fg="green") + # Assume reconnection fail + mock_set_connection.side_effect = side_effect_set_connection(is_reconnected=False) + test_esexecutor.handle_server_close_connection() + + mock_secho.assert_any_call(message="Reconnecting...", fg="green") + mock_secho.assert_any_call( + message="Connection Failed. Check your ES is running and then come back", fg="red" + ) + + def test_reconnection_exception(self): + test_executor = ESConnection(endpoint=INVALID_ENDPOINT) + + with pytest.raises(ConnectionError) as error: + assert test_executor.set_connection(True) + + def test_select_client(self): + od_test_executor = ESConnection(endpoint=OPEN_DISTRO_ENDPOINT, http_auth=AUTH) + aes_test_executor = ESConnection(endpoint=AES_ENDPOINT, use_aws_authentication=True) + + with mock.patch.object(od_test_executor, "get_open_distro_client") as mock_od_client, mock.patch.object( + ESConnection, "is_sql_plugin_installed", return_value=True + ): + od_test_executor.set_connection() + mock_od_client.assert_called() + + with mock.patch.object(aes_test_executor, "get_aes_client") as mock_aes_client, mock.patch.object( + ESConnection, "is_sql_plugin_installed", return_value=True + ): + aes_test_executor.set_connection() + mock_aes_client.assert_called() + + def test_get_od_client(self): + od_test_executor = ESConnection(endpoint=OPEN_DISTRO_ENDPOINT, http_auth=AUTH) + + with mock.patch.object(Elasticsearch, "__init__", return_value=None) as mock_es: + od_test_executor.get_open_distro_client() + + mock_es.assert_called_with( + [OPEN_DISTRO_ENDPOINT], http_auth=AUTH, verify_certs=False, ssl_context=od_test_executor.ssl_context + ) + + def test_get_aes_client(self): + aes_test_executor = ESConnection(endpoint=AES_ENDPOINT, use_aws_authentication=True) + + with mock.patch.object(Elasticsearch, "__init__", return_value=None) as mock_es: + aes_test_executor.get_aes_client() + + mock_es.assert_called_with( + hosts=[AES_ENDPOINT], + http_auth=aes_test_executor.aws_auth, + use_ssl=True, + verify_certs=True, + connection_class=RequestsHttpConnection, + ) diff --git a/sql-cli/tests/test_formatter.py b/sql-cli/tests/test_formatter.py new file mode 100644 index 0000000000..b0f85c34a5 --- /dev/null +++ b/sql-cli/tests/test_formatter.py @@ -0,0 +1,183 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +from __future__ import unicode_literals, print_function + +import mock +import pytest +from collections import namedtuple + +from src.odfe_sql_cli.odfesql_cli import OdfeSqlCli, COLOR_CODE_REGEX +from src.odfe_sql_cli.formatter import Formatter +from src.odfe_sql_cli.utils import OutputSettings + + +class TestFormatter: + @pytest.fixture + def pset_pager_mocks(self): + cli = OdfeSqlCli() + with mock.patch("src.odfe_sql_cli.main.click.echo") as mock_echo, mock.patch( + "src.odfe_sql_cli.main.click.echo_via_pager" + ) as mock_echo_via_pager, mock.patch.object(cli, "prompt_app") as mock_app: + yield cli, mock_echo, mock_echo_via_pager, mock_app + + termsize = namedtuple("termsize", ["rows", "columns"]) + test_line = "-" * 10 + test_data = [ + (10, 10, "\n".join([test_line] * 7)), + (10, 10, "\n".join([test_line] * 6)), + (10, 10, "\n".join([test_line] * 5)), + (10, 10, "-" * 11), + (10, 10, "-" * 10), + (10, 10, "-" * 9), + ] + + use_pager_when_on = [True, True, False, True, False, False] + + test_ids = [ + "Output longer than terminal height", + "Output equal to terminal height", + "Output shorter than terminal height", + "Output longer than terminal width", + "Output equal to terminal width", + "Output shorter than terminal width", + ] + + pager_test_data = [l + (r,) for l, r in zip(test_data, use_pager_when_on)] + + def test_format_output(self): + settings = OutputSettings(table_format="psql") + formatter = Formatter(settings) + data = { + "schema": [{"name": "name", "type": "text"}, {"name": "age", "type": "long"}], + "total": 1, + "datarows": [["Tim", 24]], + "size": 1, + "status": 200, + } + + results = formatter.format_output(data) + + expected = [ + "fetched rows / total rows = 1/1", + "+--------+-------+", + "| name | age |", + "|--------+-------|", + "| Tim | 24 |", + "+--------+-------+", + ] + assert list(results) == expected + + def test_format_array_output(self): + settings = OutputSettings(table_format="psql") + formatter = Formatter(settings) + data = { + "schema": [{"name": "name", "type": "text"}, {"name": "age", "type": "long"}], + "total": 1, + "datarows": [["Tim", [24, 25]]], + "size": 1, + "status": 200, + } + + results = formatter.format_output(data) + + expected = [ + "fetched rows / total rows = 1/1", + "+--------+---------+", + "| name | age |", + "|--------+---------|", + "| Tim | [24,25] |", + "+--------+---------+", + ] + assert list(results) == expected + + def test_format_output_vertical(self): + settings = OutputSettings(table_format="psql", max_width=1) + formatter = Formatter(settings) + data = { + "schema": [{"name": "name", "type": "text"}, {"name": "age", "type": "long"}], + "total": 1, + "datarows": [["Tim", 24]], + "size": 1, + "status": 200, + } + + expanded = [ + "fetched rows / total rows = 1/1", + "-[ RECORD 1 ]-------------------------", + "name | Tim", + "age | 24", + ] + + with mock.patch("src.odfe_sql_cli.main.click.secho") as mock_secho, mock.patch("src.odfe_sql_cli.main.click.confirm") as mock_confirm: + expanded_results = formatter.format_output(data) + + mock_secho.assert_called_with(message="Output longer than terminal width", fg="red") + mock_confirm.assert_called_with("Do you want to display data vertically for better visual effect?") + + assert "\n".join(expanded_results) == "\n".join(expanded) + + def test_fake_large_output(self): + settings = OutputSettings(table_format="psql") + formatter = Formatter(settings) + fake_large_data = { + "schema": [{"name": "name", "type": "text"}, {"name": "age", "type": "long"}], + "total": 1000, + "datarows": [["Tim", [24, 25]]], + "size": 200, + "status": 200, + } + + results = formatter.format_output(fake_large_data) + + expected = [ + "fetched rows / total rows = 200/1000\n" + "Attention: Use LIMIT keyword when retrieving more than 200 rows of data", + "+--------+---------+", + "| name | age |", + "|--------+---------|", + "| Tim | [24,25] |", + "+--------+---------+", + ] + assert list(results) == expected + + @pytest.mark.parametrize("term_height,term_width,text,use_pager", pager_test_data, ids=test_ids) + def test_pager(self, term_height, term_width, text, use_pager, pset_pager_mocks): + cli, mock_echo, mock_echo_via_pager, mock_cli = pset_pager_mocks + mock_cli.output.get_size.return_value = self.termsize(rows=term_height, columns=term_width) + + cli.echo_via_pager(text) + + if use_pager: + mock_echo.assert_not_called() + mock_echo_via_pager.assert_called() + else: + mock_echo_via_pager.assert_not_called() + mock_echo.assert_called() + + @pytest.mark.parametrize( + "text,expected_length", + [ + ( + "22200K .......\u001b[0m\u001b[91m... .......... ...\u001b[0m\u001b[91m.\u001b[0m\u001b[91m...... " + ".........\u001b[0m\u001b[91m.\u001b[0m\u001b[91m \u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[" + "0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m...... 50% 28.6K 12m55s", + 78, + ), + ("=\u001b[m=", 2), + ("-\u001b]23\u0007-", 2), + ], + ) + def test_color_pattern(self, text, expected_length): + assert len(COLOR_CODE_REGEX.sub("", text)) == expected_length diff --git a/sql-cli/tests/test_main.py b/sql-cli/tests/test_main.py new file mode 100644 index 0000000000..6535b1d9ef --- /dev/null +++ b/sql-cli/tests/test_main.py @@ -0,0 +1,74 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import mock +from textwrap import dedent + +from click.testing import CliRunner + +from .utils import estest, load_data, TEST_INDEX_NAME +from src.odfe_sql_cli.main import cli +from src.odfe_sql_cli.odfesql_cli import OdfeSqlCli + +INVALID_ENDPOINT = "http://invalid:9200" +ENDPOINT = "http://localhost:9200" +QUERY = "select * from %s" % TEST_INDEX_NAME + + +class TestMain: + @estest + def test_explain(self, connection): + doc = {"a": "aws"} + load_data(connection, doc) + + err_message = "Can not connect to endpoint %s" % INVALID_ENDPOINT + expected_output = {"from": 0, "size": 200} + expected_tabular_output = dedent( + """\ + fetched rows / total rows = 1/1 + +-----+ + | a | + |-----| + | aws | + +-----+""" + ) + + with mock.patch("src.odfe_sql_cli.main.click.echo") as mock_echo, mock.patch("src.odfe_sql_cli.main.click.secho") as mock_secho: + runner = CliRunner() + + # test -q -e + result = runner.invoke(cli, [f"-q{QUERY}", "-e"]) + mock_echo.assert_called_with(expected_output) + assert result.exit_code == 0 + + # test -q + result = runner.invoke(cli, [f"-q{QUERY}"]) + mock_echo.assert_called_with(expected_tabular_output) + assert result.exit_code == 0 + + # test invalid endpoint + runner.invoke(cli, [INVALID_ENDPOINT, f"-q{QUERY}", "-e"]) + mock_secho.assert_called_with(message=err_message, fg="red") + + @estest + def test_cli(self): + with mock.patch.object(OdfeSqlCli, "connect") as mock_connect, mock.patch.object( + OdfeSqlCli, "run_cli" + ) as mock_run_cli: + runner = CliRunner() + result = runner.invoke(cli) + + mock_connect.assert_called_with(ENDPOINT, None) + mock_run_cli.asset_called() + assert result.exit_code == 0 diff --git a/sql-cli/tests/test_odfesql_cli.py b/sql-cli/tests/test_odfesql_cli.py new file mode 100644 index 0000000000..d568a93bda --- /dev/null +++ b/sql-cli/tests/test_odfesql_cli.py @@ -0,0 +1,74 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import mock +import pytest +from prompt_toolkit.shortcuts import PromptSession +from prompt_toolkit.input.defaults import create_pipe_input + +from src.odfe_sql_cli.esbuffer import es_is_multiline +from .utils import estest, load_data, TEST_INDEX_NAME, ENDPOINT +from src.odfe_sql_cli.odfesql_cli import OdfeSqlCli +from src.odfe_sql_cli.esconnection import ESConnection +from src.odfe_sql_cli.esstyle import style_factory + +AUTH = None +QUERY_WITH_CTRL_D = "select * from %s;\r\x04\r" % TEST_INDEX_NAME +USE_AWS_CREDENTIALS = False + + +@pytest.fixture() +def cli(default_config_location): + return OdfeSqlCli(clirc_file=default_config_location, always_use_pager=False) + + +class TestOdfeSqlCli: + def test_connect(self, cli): + with mock.patch.object(ESConnection, "__init__", return_value=None) as mock_ESConnection, mock.patch.object( + ESConnection, "set_connection" + ) as mock_set_connectiuon: + cli.connect(endpoint=ENDPOINT) + + mock_ESConnection.assert_called_with(ENDPOINT, AUTH, USE_AWS_CREDENTIALS) + mock_set_connectiuon.assert_called() + + @estest + @pytest.mark.skip(reason="due to prompt_toolkit throwing error, no way of currently testing this") + def test_run_cli(self, connection, cli, capsys): + doc = {"a": "aws"} + load_data(connection, doc) + + # the title is colored by formatter + expected = ( + "fetched rows / total rows = 1/1" "\n+-----+\n| \x1b[38;5;47;01ma\x1b[39;00m |\n|-----|\n| aws |\n+-----+" + ) + + with mock.patch.object(OdfeSqlCli, "echo_via_pager") as mock_pager, mock.patch.object( + cli, "build_cli" + ) as mock_prompt: + inp = create_pipe_input() + inp.send_text(QUERY_WITH_CTRL_D) + + mock_prompt.return_value = PromptSession( + input=inp, multiline=es_is_multiline(cli), style=style_factory(cli.syntax_style, cli.cli_style) + ) + + cli.connect(ENDPOINT) + cli.run_cli() + out, err = capsys.readouterr() + inp.close() + + mock_pager.assert_called_with(expected) + assert out.__contains__("Endpoint: %s" % ENDPOINT) + assert out.__contains__("See you next search!") diff --git a/sql-cli/tests/test_plan.md b/sql-cli/tests/test_plan.md new file mode 100644 index 0000000000..1804a3a4c9 --- /dev/null +++ b/sql-cli/tests/test_plan.md @@ -0,0 +1,60 @@ +# Test Plan + The purpose of this checklist is to guide you through the basic usage of ODFE SQL CLI, as well as a manual test process. + + +## Display + +* [ ] Auto-completion + * SQL syntax auto-completion + * index name auto-completion +* [ ] Test pagination with different output length / width. + * query for long results to see the pagination activated automatically. +* [ ] Test table formatted output. +* [ ] Test successful conversion from horizontal to vertical display with confirmation. + * resize the terminal window before launching sql cli, there will be a warning message if your terminal is too narrow for horizontal output. It will ask if you want to convert to vertical display +* [ ] Test warning message when output > 200 rows of data. (Limited by ODFE SQL syntax) + * `SELECT * FROM accounts` + * Run above command, you’ll see the max output is 200, and there will be a message at the top of your results telling you how much data was fetched. + * If you want to query more than 200 rows of data, try add a `LIMIT` with more than 200. + + +## Connection + +* [ ] Test connection to a local Elasticsearch instance + * [ ] Standard Elastic version, with/without authentication by Elastic X-pack security (https://www.elastic.co/guide/en/elasticsearch/reference/7.6/security-getting-started.html) + * [ ] OSS version, no authentication + * [ ] OSS version, install [ODFE Security plugin](https://opendistro.github.io/for-elasticsearch-docs/docs/install/plugins/) to enable authentication and SSL + * Run command like `odfesql -u -w ` to connect to instance with authentication. +* [ ] Test connection to [Amazon Elasticsearch domain](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-gsg.html) with +[Fine Grained Access Control](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/fgac.html) enabled. + * Have your aws credentials correctly configured by `aws configure` + * `odfesql --aws-auth -u -w ` +* [ ] Test connection fail when connecting to invalid endpoint. + * `odfesql invalidendpoint.com` + + +## Execution + +* [ ] Test successful execution given a query. e.g. + * `SELECT * FROM bank WHERE age >30 AND gender = 'm'` +* [ ] Test unsuccessful execution with an invalid SQL query will give an error message +* [ ] Test load config file + * `vim .config/odfesql-cli/config` + * change settings such as `table_format = github` + * restart sql cli, check the tabular output change + + +## Query Options + +* [ ] Test explain option -e + * `odfesql -q "SELECT * FROM accounts LIMIT 5;" -e` +* [ ] Test query and format option -q, -f + * `odfesql -q "SELECT * FROM accounts LIMIT 5;" -f csv` +* [ ] Test vertical output option -v + * `odfesql -q "SELECT * FROM accounts LIMIT 5;" -v` + +## OS and Python Version compatibility + +* [ ] Manually test on Linux(Ubuntu) and MacOS +* [ ] Test against python 3.X versions (optional) + diff --git a/sql-cli/tests/utils.py b/sql-cli/tests/utils.py new file mode 100644 index 0000000000..6cc693b2b1 --- /dev/null +++ b/sql-cli/tests/utils.py @@ -0,0 +1,91 @@ +""" +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +A copy of the License is located at + + http://www.apache.org/licenses/LICENSE-2.0 + +or in the "license" file accompanying this file. This file is distributed +on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied. See the License for the specific language governing +permissions and limitations under the License. +""" +import json +import pytest +from elasticsearch import ConnectionError, helpers, ConnectionPool + +from src.odfe_sql_cli.esconnection import ESConnection +from src.odfe_sql_cli.utils import OutputSettings +from src.odfe_sql_cli.formatter import Formatter + +TEST_INDEX_NAME = "odfesql_cli_test" +ENDPOINT = "http://localhost:9200" + + +def create_index(test_executor): + es = test_executor.client + es.indices.create(index=TEST_INDEX_NAME) + + +def delete_index(test_executor): + es = test_executor.client + es.indices.delete(index=TEST_INDEX_NAME) + + +def close_connection(es): + ConnectionPool.close(es) + + +def load_file(test_executor, filename="accounts.json"): + es = test_executor.client + + filepath = "./test_data/" + filename + + # generate iterable data + def load_json(): + with open(filepath, "r") as f: + for line in f: + yield json.loads(line) + + helpers.bulk(es, load_json(), index=TEST_INDEX_NAME) + + +def load_data(test_executor, doc): + es = test_executor.client + es.index(index=TEST_INDEX_NAME, body=doc) + es.indices.refresh(index=TEST_INDEX_NAME) + + +def get_connection(): + test_es_connection = ESConnection(endpoint=ENDPOINT) + test_es_connection.set_connection() + + return test_es_connection + + +def run(test_executor, query, use_console=True): + data = test_executor.execute_query(query=query, use_console=use_console) + settings = OutputSettings(table_format="psql") + formatter = Formatter(settings) + + if data: + res = formatter.format_output(data) + res = "\n".join(res) + + return res + + +# build client for testing +try: + connection = get_connection() + CAN_CONNECT_TO_ES = True + +except ConnectionError: + CAN_CONNECT_TO_ES = False + +# use @estest annotation to mark test functions +estest = pytest.mark.skipif( + not CAN_CONNECT_TO_ES, reason="Need a Elasticsearch server running at localhost:9200 accessible" +) \ No newline at end of file diff --git a/sql-cli/tox.ini b/sql-cli/tox.ini new file mode 100644 index 0000000000..f2a00f1598 --- /dev/null +++ b/sql-cli/tox.ini @@ -0,0 +1,7 @@ +[tox] +envlist = py38 +[testenv] +deps = pytest==4.6.3 + mock==3.0.5 + pexpect==3.3 +commands = pytest \ No newline at end of file diff --git a/sql-jdbc/.gitignore b/sql-jdbc/.gitignore new file mode 100644 index 0000000000..305eeb8cef --- /dev/null +++ b/sql-jdbc/.gitignore @@ -0,0 +1,6 @@ +.gradle/ +.idea/ +build/ +.DS_Store +out/ +*.iml \ No newline at end of file diff --git a/sql-jdbc/CODE_OF_CONDUCT.md b/sql-jdbc/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8543edd1cc --- /dev/null +++ b/sql-jdbc/CODE_OF_CONDUCT.md @@ -0,0 +1,2 @@ +## Code of Conduct +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). diff --git a/sql-jdbc/CONTRIBUTING.md b/sql-jdbc/CONTRIBUTING.md new file mode 100644 index 0000000000..2af8e8f96b --- /dev/null +++ b/sql-jdbc/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](../../issues), or [recently closed](../../issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass; please add unit tests for all the new code paths introduced by your change. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](../../labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct + +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). + + +## Security issue notifications + +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue. + + +## Licensing + +See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/sql-jdbc/LICENSE b/sql-jdbc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/sql-jdbc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql-jdbc/NOTICE b/sql-jdbc/NOTICE new file mode 100644 index 0000000000..7052bdc170 --- /dev/null +++ b/sql-jdbc/NOTICE @@ -0,0 +1,2 @@ +Open Distro for Elasticsearch JDBC +Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/sql-jdbc/README.md b/sql-jdbc/README.md new file mode 100644 index 0000000000..be2bb4ec04 --- /dev/null +++ b/sql-jdbc/README.md @@ -0,0 +1,526 @@ +## Open Distro for ElasticSearch - JDBC + +This is the driver for JDBC connectivity to a cluster running with Open Distro for Elasticsearch SQL support. + +## Specifications + +The driver is compatible with JDBC 4.2 specification and requires a minimum of Java 8. + +## Using the driver + +The driver comes in the form of a single jar file. To use it, simply place it on the classpath of the +Java application that needs to use it. + +If using with JDBC compatible BI tools, refer to the tool documentation on configuring a new JDBC driver. Typically, +all that's required is to make the tool aware of the location of the driver jar and then use it to setup database (i.e +Elasticsearch) connections. + +### Connection URL and other settings + +To setup a connection, the driver requires a JDBC connection URL. The connection URL is of the form: +``` + jdbc:elasticsearch://[scheme://][host][:port][/context-path]?[property-key=value]&[property-key2=value2]..&[property-keyN=valueN] +``` + + +* scheme + + Can be one of *http* or *https*. Default is *http*. + +* host + + Hostname or IP address of the target cluster. Default is *localhost*. + +* port + + Port number on which the cluster's REST interface is listening. Default value depends on the *scheme* selected. For + *http*, the default is 9200. For *https*, the default is 443. + +* context-path + + The context path at which the cluster REST interface is rooted. Not needed if the REST interface is simply available on the '/' context path. + +* property key=value + + The query string portion of the connection URL can contain desired connection settings in the form of one or more + *property-key=value* pairs. The possible configuration properties are provided in the table below. The property keys are case sensitive but values are not unless otherwise indicated. + + Note that JDBC provides multiple APIs for specifying connection properties of which specifying them in the connection + URL is just one. When directly coding with the driver you can choose any of the other options (refer sample + code below). If you are setting up a connection via a tool, it is likely the tool will allow you to specify the + connection URL with just the scheme, host, port and context-path components) while the the connection properties are provided separately. + For example, you may not wish to place the user and password in the connection URL. Check the tool you are using for + such support. + + The configurable connection properties are: + + | Property Key | Description | Accepted Value(s) | Default value | + | ------------- |-------------| -----|---------| + | user | Connection username. mandatory if `auth` property selects a authentication scheme that mandates a username value | any string | `null` | + | password | Connection password. mandatory if `auth` property selects a authentication scheme that mandates a password value | any string | `null` | + | fetchSize | Cursor page size | positive integer value. Max value is limited by `index.max_result_window` Elasticsearch setting | `0` (for non-paginated response) | + | logOutput | location where driver logs should be emitted | a valid file path | `null` (logs are disabled) | + | logLevel | severity level for which driver logs should be emitted | in order from highest(least logging) to lowest(most logging): OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL | OFF (logs are disabled) | + | auth | authentication mechanism to use | `NONE` (no auth), `BASIC` (HTTP Basic), `AWS_SIGV4` (AWS SIGV4) | `basic` if username and/or password is specified, `NONE` otherwise | + | awsCredentialsProvider | The AWS credential provider to be used when authentication mechanism is `AWS_SIGV4` (AWS SIGV4). If not set, the driver will use DefaultAWSCredentialsProviderChain to sign the request. Note that the driver renamed the namespaces of its dependencies, so the value has to be an instance of com.amazonaws.opendistro.elasticsearch.sql.jdbc.shadow.com.amazonaws.auth.AWSCredentialsProvider| Instance of an AWSCredentialProvider | DefaultAWSCredentialsProviderChain | + | region | if authentication type is `aws_sigv4`, then this is the region value to use when signing requests. Only needed if the driver can not determine the region for the host endpoint. The driver will detect the region if the host endpoint matches a known url pattern. | a valid AWS region value e.g. us-east-1 | `null` (auto-detected if possible from the host endpoint) | + | requestCompression | whether to indicate acceptance of compressed (gzip) responses when making server requests | `true` or `false` | `false` | + | useSSL | whether to establish the connection over SSL/TLS | `true` or `false` | `false` if scheme is `http`, `true` if scheme is `https` | + | trustStoreLocation | location of the SSL/TLS truststore to use | file path or URL as appropriate to the type of truststore | `null` | + | trustStoreType | type of the truststore | valid truststore type recognized by available Java security providers | JKS | + | trustStorePassword | password to access the Trust Store | any string | `null` | + | keyStoreLocation | location of the SSL/TLS keystore to use | file path or URL as appropriate to the type of keystore | `null` | + | keyStoreType | type of the keystore | valid keystore type recognized by available Java security providers | JKS | + | keyStorePassword | password to access the keystore | any string | `null` | + | trustSelfSigned | shortcut way to indicate that any self-signed certificate should be accepted. A truststore is not required to be configured. | `true` or `false` | `false` | + | hostnameVerification | indicate whether certificate hostname verification should be performed when using SSL/TLS | `true` or `false` | `true` | + +### Connecting using the DriverManager interface + +The main Driver class is `com.amazon.opendistroforelasticsearch.jdbc.Driver`. If the driver jar is on the application classpath, no other configuration is required. + +Code samples to open a connection for some typical scenarios are given below: + +* Connect to localhost on port 9200 with no authentication over a plain connection + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://localhost:9200"; + +Connection con = DriverManager.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with no authentication + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; + +Connection con = DriverManager.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +or, + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://remote-host-name"; + +Properties properties = new Properties(); +properties.put("useSSL", "true"); + +Connection con = DriverManager.getConnection(url, properties); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host with HTTP Basic authentication over an SSL/TLS connection on the default SSL/TLS port. Note - if a username and password are provided and `auth` property is not provided, basic auth is implicitly used. + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; +String user = "username"; +String password = "password"; + +Connection con = DriverManager.getConnection(url, user, password); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +or, + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://remote-host-name"; + +Properties properties = new Properties(); +properties.put("useSSL", "true"); +properties.put("user", "username"); +properties.put("password", "password"); + +Connection con = DriverManager.getConnection(url, properties); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host with HTTP Basic authentication over an SSL/TLS connection, allowing any self-signed certificate and optionally turning off hostname verification. This may be useful for a dev/test setup. + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://remote-host-name"; + +Properties properties = new Properties(); +properties.put("useSSL", "true"); +properties.put("trustSelfSigned", "true"); + +// uncomment below to turn off hostname verification +// properties.put("hostnameVerification", "false"); + +properties.put("user", "username"); +properties.put("password", "password"); + +Connection con = DriverManager.getConnection(url, properties); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with AWS Sig V4 authentication. The driver will determine the credentials used to sign the request just like the standard aws-sdk i.e. in standard directories, environment variables etc. + + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name?auth=aws_sigv4"; + +Connection con = DriverManager.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` +or, + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; + +Properties properties = new Properties(); +properties.put("auth", "aws_sigv4"); + +Connection con = DriverManager.getConnection(url, properties); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with AWS Sig V4 authentication, explicitly specifying the AWSCredentialProvider to use + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; + +Properties properties = new Properties(); +properties.put("awsCredentialsProvider", new EnvironmentVariableCredentialsProvider()); + +Connection con = DriverManager.getConnection(url, properties); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with AWS Sig V4 authentication, explicitly specifying the region to use in the request signing. + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name?auth=aws_sigv4®ion=us-west-1"; + +Connection con = DriverManager.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +or, + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; + +Properties properties = new Properties(); +properties.put("auth", "aws_sigv4"); +properties.put("region", "us-west-2"); + +Connection con = DriverManager.getConnection(url, properties); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` +### Connecting using the DataSource interface + +The driver also provides a javax.sql.DataSource implementation via the `com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource` class that can be used to obtain a connection. Here are some typical code samples: + + +* Connect to localhost on port 9200 with no authentication over a plain connection + +``` +import java.sql.Connection; +import java.sql.Statement; +import javax.sql.DataSource; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource; + +. +. +String url = "jdbc:elasticsearch://localhost:9200"; + +ElasticsearchDataSource ds = new ElasticsearchDataSource(); +ds.setUrl(url); + +Connection con = ds.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with no authentication + +``` +import java.sql.Connection; +import java.sql.Statement; +import javax.sql.DataSource; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource; + +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; + +ElasticsearchDataSource ds = new ElasticsearchDataSource(); +ds.setUrl(url); + +Connection con = ds.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host with HTTP Basic authentication over an SSL/TLS connection on the default SSL/TLS port. + +``` +import java.sql.Connection; +import java.sql.Statement; +import javax.sql.DataSource; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource; + +. +. +String url = "jdbc:elasticsearch://https://remote-host-name"; + +ElasticsearchDataSource ds = new ElasticsearchDataSource(); +ds.setUrl(url); + +Connection con = ds.getConnection(url, "user", "password"); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with AWS Sig V4 authentication. The driver will determine the credentials used to sign the request just like the standard aws-sdk i.e. in standard directories, environment variables etc. + + +``` +import java.sql.Connection; +import java.sql.Statement; +import javax.sql.DataSource; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource; + +. +. +String url = "jdbc:elasticsearch://https://remote-host-name?auth=aws_sigv4"; + +ElasticsearchDataSource ds = new ElasticsearchDataSource(); +ds.setUrl(url); + +Connection con = ds.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with AWS Sig V4 authentication, explicitly specifying the AWSCredentialProvider to use + +``` +import java.sql.Connection; +import java.sql.Statement; +import javax.sql.DataSource; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource; + +. +. +String url = "jdbc:elasticsearch://https://remote-host-name?auth=aws_sigv4®ion=us-west-1"; + +ElasticsearchDataSource ds = new ElasticsearchDataSource(); +ds.setUrl(url); +ds.setAwsCredentialProvider(new EnvironmentVariableCredentialsProvider()); + +Connection con = ds.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +* Connect to a remote host on default SSL port with AWS Sig V4 authentication, explicitly specifying the region to use in the request signing. + +``` +import java.sql.Connection; +import java.sql.Statement; +import javax.sql.DataSource; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchDataSource; + +. +. +String url = "jdbc:elasticsearch://https://remote-host-name?auth=aws_sigv4®ion=us-west-1"; + +ElasticsearchDataSource ds = new ElasticsearchDataSource(); +ds.setUrl(url); + +Connection con = ds.getConnection(url); +Statement st = con.createStatement(); +. +// use the connection +. +// close connection +con.close(); +``` + +## Download and Installation + +The driver will be available through standard open source repositories for Java artifacts. + +## Building from source + +The driver is built as a shadow jar so that its dependencies are bundled within itself. This way no additional libraries besides the driver jar need to be placed on an application classpath for the driver to be used. The namespaces of the bundled dependencies are modified to ensure they do not conflict with other classes on the application classpath. + +### Run unit tests and build the driver jar + +``` +./gradlew clean test shadowJar +``` + +### Build the driver jar without unit tests + +``` +./gradlew shadowJar +``` + +### Publish the built driver jar to local maven repo + +``` +./gradlew publishToMavenLocal +``` + +## Documentation + +Please refer to the [documentation](https://opendistro.github.io/for-elasticsearch-docs/) for detailed information on installing and configuring opendistro-elasticsearch-security plugin. + +## Code of Conduct + +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). + + +## Security issue notifications + +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue. + + +## Licensing + +See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +## Copyright + +Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + diff --git a/sql-jdbc/THIRD-PARTY b/sql-jdbc/THIRD-PARTY new file mode 100644 index 0000000000..c1db99ed7e --- /dev/null +++ b/sql-jdbc/THIRD-PARTY @@ -0,0 +1,341 @@ +** Apache-httpComponents-HttpClient; version 4.5.6 -- https://hc.apache.org/httpcomponents-client-ga/ +** Apache-HttpComponents-HttpCore; version 4.4.10 -- https://hc.apache.org/httpcomponents-core-ga/ +** aws-java-sdk-core; version 1.11.452 -- https://aws.amazon.com/sdk-for-java/ +** commons-codec; version 1.10 -- http://commons.apache.org/proper/commons-codec/ +** commons-logging; version 1.2 -- http://commons.apache.org/proper/commons-logging/ +** ion-java; version 1.0.2 -- https://github.com/amzn/ion-java +** Jackson-annotations; version 2.9.0 -- https://github.com/FasterXML/jackson-annotations/ +** Jackson-core; version 2.9.7 -- https://github.com/FasterXML/jackson-core +** Jackson-databind; version 2.9.7 -- https://github.com/FasterXML/jackson-databind +** jackson-dataformat-cbor; version 2.6.7 -- https://github.com/FasterXML/jackson-dataformat-cbor/ +** joda-time; version 2.8.1 -- http://www.joda.org/joda-time/ + +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND +DISTRIBUTION + +1. Definitions. + + + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this +document. + + + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + + + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + + + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + + + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and +configuration files. + + + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object +code, generated documentation, and conversions to other media +types. + + + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that is +included in or attached to the work (an example is provided in the Appendix +below). + + + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works +thereof. + + + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the purposes +of this definition, "submitted" means any form of electronic, verbal, or +written communication sent to the Licensor or its representatives, including +but not limited to communication on electronic mailing lists, source code +control systems, and issue tracking systems that are managed by, or on behalf +of, the Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise designated in +writing by the copyright owner as "Not a Contribution." + + + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this +section) patent license to make, have made, use, offer to sell, sell, import, +and otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily infringed by +their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent +litigation against any entity (including a cross-claim or counterclaim in a +lawsuit) alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent +licenses granted to You under this License for that Work shall terminate as of +the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +(a) You must give any other recipients of the Work or Derivative Works a copy +of this License; and + +(b) You must cause any modified files to carry prominent notices stating that +You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from the +Source form of the Work, excluding those notices that do not pertain to any +part of the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its distribution, then +any Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise +complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, work +stoppage, computer failure or malfunction, or any and all other commercial +damages or losses), even if such Contributor has been advised of the +possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree +to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. END OF TERMS AND +CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification +within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software + +distributed under the License is distributed on an "AS IS" BASIS, + +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and + +limitations under the License. + +* For Apache-httpComponents-HttpClient see also this required NOTICE: +Apache HttpComponents Client +Copyright 1999-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). +* For Apache-HttpComponents-HttpCore see also this required NOTICE: +Apache HttpComponents Core +Copyright 2005-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). +* For aws-java-sdk-core see also this required NOTICE: +Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* For commons-codec see also this required NOTICE: +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been +translated +from the original php source code available at +http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. +* For commons-logging see also this required NOTICE: +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). +* For ion-java see also this required NOTICE: +Copyright 2007-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +* For Jackson-annotations see also this required NOTICE: +Apache 2.0 copyright +* For Jackson-core see also this required NOTICE: +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and +has +been in development since 2007. +It is currently developed by a community of developers, as well as +supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different +licenses. +To find the details that apply to this artifact see the accompanying LICENSE +file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. +* For Jackson-databind see also this required NOTICE: +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and +has +been in development since 2007. +It is currently developed by a community of developers, as well as +supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different +licenses. +To find the details that apply to this artifact see the accompanying LICENSE +file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. +* For jackson-dataformat-cbor see also this required NOTICE: +This copy of Jackson JSON processor databind module is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. +* For joda-time see also this required NOTICE: +Copyright 2018 Joda.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/sql-jdbc/build.gradle b/sql-jdbc/build.gradle new file mode 100644 index 0000000000..4719102196 --- /dev/null +++ b/sql-jdbc/build.gradle @@ -0,0 +1,206 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +buildscript { + repositories { + jcenter() + } +} + +plugins { + id 'java' + id 'com.github.johnrengelman.shadow' version '4.0.1' + id 'jacoco' + id 'maven' + id 'maven-publish' + id 'signing' +} + +group 'com.amazon.opendistroforelasticsearch.client' + +// keep version in sync with version in Driver source +version '1.9.0.0' + +boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true")); +if (snapshot) { + version += "-SNAPSHOT" +} + +jacoco { + toolVersion = "0.8.3" +} + +sourceCompatibility = 8 +targetCompatibility = 8 + +repositories { + jcenter() +} + +dependencies { + implementation group: 'org.apache.httpcomponents', name: 'httpclient', version: '4.5.6' + implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.9.7' + implementation group: 'com.amazonaws', name: 'aws-java-sdk-core', version: '1.11.452' + + testImplementation('org.junit.jupiter:junit-jupiter-api:5.3.1') + testImplementation('org.junit.jupiter:junit-jupiter-params:5.3.1') + testImplementation('com.github.tomakehurst:wiremock:2.20.0') + testImplementation('org.mockito:mockito-core:2.23.0') + testImplementation('org.junit.jupiter:junit-jupiter-engine:5.3.1') + testImplementation('org.junit-pioneer:junit-pioneer:0.3.0') + testImplementation('org.eclipse.jetty:jetty-server:9.2.24.v20180105') + + testRuntimeOnly('org.slf4j:slf4j-simple:1.7.25') // capture WireMock logging +} + +tasks.withType(JavaCompile) { + options.compilerArgs << "-Xlint:deprecation" << "-Xlint:unchecked" +} + +static def getShadowPath(String path) { + return 'com.amazonaws.opendistro.elasticsearch.sql.jdbc.shadow.' + path +} + +shadowJar { + baseName = rootProject.name + classifier = '' + exclude 'META-INF/maven/commons-*/**' + exclude 'META-INF/maven/org.apache.*/**' + exclude 'META-INF/maven/joda-time/**' + exclude 'META-INF/maven/com.fasterxml.*/**' + exclude 'META-INF/services/com.fasterxml.*' + exclude 'META-INF/services/org.apache.logging*/**' + exclude 'META-INF/maven/com.amazonaws/**' + exclude 'META-INF/maven/software.amazon.*/**' + exclude 'META-INF/LICENSE*' + exclude 'META-INF/NOTICE*' + exclude 'META-INF/DEPENDENCIES' + + relocate('com.amazonaws', getShadowPath('com.amazonaws')) { + exclude 'com.amazonaws.opendistro.*/**' + } + + relocate 'org.apache', getShadowPath('org.apache') + relocate 'org.joda', getShadowPath('org.joda') + relocate 'com.fasterxml', getShadowPath('com.fasterxml') + relocate 'software.amazon', getShadowPath('software.amazon') +} + +test { + useJUnitPlatform() +} + +task sourcesJar(type: Jar) { + classifier = 'sources' + from sourceSets.main.allJava +} + +task javadocJar(type: Jar) { + classifier "javadoc" + from javadoc.destinationDir +} + +publishing { + publications { + shadow(MavenPublication) { publication -> + project.shadow.component(publication) + artifact sourcesJar + artifact javadocJar + + pom { + name = "Open Distro For Elasticsearch SQL JDBC Driver" + packaging = "jar" + url = "https://github.com/opendistro-for-elasticsearch/sql-jdbc" + description = "Open Distro For Elasticsearch SQL JDBC driver" + scm { + connection = "scm:git@github.com:opendistro-for-elasticsearch/sql-jdbc.git" + developerConnection = "scm:git@github.com:opendistro-for-elasticsearch/sql-jdbc.git" + url = "git@github.com:opendistro-for-elasticsearch/sql-jdbc.git" + } + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + id = "amazonwebservices" + organization = "Amazon Web Services" + organizationUrl = "https://aws.amazon.com" + } + } + } + } + } + + repositories { + maven { + name = "internal-snapshots" + url = "s3://snapshots.opendistroforelasticsearch.amazon.com/maven" + authentication { + awsIm(AwsImAuthentication) // load from EC2 role or env var + } + } + maven { + name = "internal-releases" + url = "s3://artifacts.opendistroforelasticsearch.amazon.com/maven" + authentication { + awsIm(AwsImAuthentication) // load from EC2 role or env var + } + } + maven { + name = "sonatype-staging" + url "https://aws.oss.sonatype.org/service/local/staging/deploy/maven2" + credentials { + username project.hasProperty('ossrhUsername') ? project.property('ossrhUsername') : '' + password project.hasProperty('ossrhPassword') ? project.property('ossrhPassword') : '' + } + } + } + + // TODO - enabled debug logging for the time being, remove this eventually + gradle.startParameter.setShowStacktrace(ShowStacktrace.ALWAYS) + gradle.startParameter.setLogLevel(LogLevel.DEBUG) +} + +signing { + required { gradle.taskGraph.hasTask("publishShadowPublicationToSonatype-stagingRepository") } + sign publishing.publications.shadow +} + +jacoco { + toolVersion = "0.8.3" +} + +jacocoTestReport { + reports { + html.enabled true + } +} +test.finalizedBy(project.tasks.jacocoTestReport) + +jacocoTestCoverageVerification { + violationRules { + rule { + limit { + minimum = 0.4 + } + } + } +} + +check.dependsOn jacocoTestCoverageVerification diff --git a/sql-jdbc/docs/img/tableau_connection.PNG b/sql-jdbc/docs/img/tableau_connection.PNG new file mode 100644 index 0000000000..4fbced1cd2 Binary files /dev/null and b/sql-jdbc/docs/img/tableau_connection.PNG differ diff --git a/sql-jdbc/docs/img/tableau_database.PNG b/sql-jdbc/docs/img/tableau_database.PNG new file mode 100644 index 0000000000..27a9403f78 Binary files /dev/null and b/sql-jdbc/docs/img/tableau_database.PNG differ diff --git a/sql-jdbc/docs/img/tableau_graph.PNG b/sql-jdbc/docs/img/tableau_graph.PNG new file mode 100644 index 0000000000..40437a1067 Binary files /dev/null and b/sql-jdbc/docs/img/tableau_graph.PNG differ diff --git a/sql-jdbc/docs/tableau.md b/sql-jdbc/docs/tableau.md new file mode 100644 index 0000000000..eded78d03e --- /dev/null +++ b/sql-jdbc/docs/tableau.md @@ -0,0 +1,88 @@ +# Connecting Tableau with Open Distro for Elasticsearch + +## Download and Installation + +* Download and install [Tableau Desktop](https://www.tableau.com/en-ca/products/desktop/download). +* Install and configure [Open Distro for Elasticsearch](https://opendistro.github.io/for-elasticsearch-docs/docs/install/). +* Download the [Open Distro for ElasticSearch JDBC Driver](https://github.com/opendistro-for-elasticsearch/sql-jdbc#download-and-installation). + +## Setup + +### Specify the JDBC driver + +Place the `opendistro-sql-jdbc-x.x.x.x.jar` file in the folder for your operating system. (Create the folder if it doesn't already exist.) + +| Platform | Driver Path | +|---|---| +| Windows | C:\Program Files\Tableau\Drivers | +| Mac | ~/Library/Tableau/Drivers | +| Linux | /opt/tableau/tableau_driver/jdbc | + +### Create TDC file + +TDC file is required to add customization for the data connection. For reference, see the following sample `elasticsearch.tdc` file. +``` + + + + + + + + + + + + + + + +``` +* Using a text editor, add `` section. +* Name the file `elasticsearch.tdc` and save it to `My Tableau Repository\Datasources`. +* Restart Tableau to apply the change. + +For futher details check [using a .tdc file with Tableau](https://kb.tableau.com/articles/howto/using-a-tdc-file-with-tableau-server) + + ### Connection information + +You will need: +* [JDBC connection string](https://github.com/opendistro-for-elasticsearch/sql-jdbc#connection-url-and-other-settings) to enter in the URL field when you connect. + + Sample connection string for connecting to localhost: `jdbc:elasticsearch://localhost:9200`. + +* Credentials for signing in to the server (user name and password). +* (Optional) JDBC properties file to customize the driver behavior. For more details check [Customize JDBC Connections Using a Properties File](https://community.tableau.com/docs/DOC-17978) + * Create a properties file called `elasticsearch.properties`. + * Save the file to the `My Tableau Repository\Datasources` directory. + + +## Make the connection + +1. Start Tableau and under **Connect**, select **Other Databases (JDBC)**. If not visible, select **More** under **To a Server** to find it in the list. +2. Enter the JDBC connection string in the **URL** field. +3. Select the **Dialect** as **MySQL** from the drop-down list. +4. Enter the user name and password for signing into the server. +5. (Optional) Browse to the JDBC properties file. +6. Select **Sign in** + +![Image of Tableau Connection](img/tableau_connection.PNG) + +### Prepare data + +* Select a single table by double-clicking or dragging the required table to the canvas. +* To add data from two or more tables: + 1. Double click or drag tables to canvas. + 2. Select the desired type of join operation. Tableau supports inner join, right join, left join and full outer join. + 3. Select columns for join. (For meaningful result, type of column should be the same.) + +![Image of Tableau Database](img/tableau_database.PNG) + +### Build charts and analyze data + +1. Select **Sheet 1** on the bottom left corner. +2. Double click or drag desired fields under **Measure**. +3. Double click or drag desired fields under **Dimensions**. +4. Select available visualization under **Show Me**. + +![Image of Tableau Graph](img/tableau_graph.PNG) diff --git a/sql-jdbc/gradle/wrapper/gradle-wrapper.jar b/sql-jdbc/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000..29953ea141 Binary files /dev/null and b/sql-jdbc/gradle/wrapper/gradle-wrapper.jar differ diff --git a/sql-jdbc/gradle/wrapper/gradle-wrapper.properties b/sql-jdbc/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000000..91a7af6eda --- /dev/null +++ b/sql-jdbc/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionSha256Sum=b7aedd369a26b177147bcb715f8b1fc4fe32b0a6ade0d7fd8ee5ed0c6f731f2c +distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-all.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/sql-jdbc/gradlew b/sql-jdbc/gradlew new file mode 100755 index 0000000000..cccdd3d517 --- /dev/null +++ b/sql-jdbc/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/sql-jdbc/gradlew.bat b/sql-jdbc/gradlew.bat new file mode 100644 index 0000000000..e95643d6a2 --- /dev/null +++ b/sql-jdbc/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.7.0.0.md b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.7.0.0.md new file mode 100644 index 0000000000..07d7ba095c --- /dev/null +++ b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.7.0.0.md @@ -0,0 +1,3 @@ +## 2020-05-07, Version 1.7.0.0 +### Changes +* Feature [#76](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/76): Cursor integration. (issue: [#74](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/74)) diff --git a/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.8.0.0.md b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.8.0.0.md new file mode 100644 index 0000000000..b514816571 --- /dev/null +++ b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.8.0.0.md @@ -0,0 +1,3 @@ +## 2020-05-18, Version 1.8.0.0 +### Maintenance +* [Adds workflow to upload jar to Maven](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/81): diff --git a/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.9.0.0.md b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.9.0.0.md new file mode 100644 index 0000000000..e2063c8ca4 --- /dev/null +++ b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes-1.9.0.0.md @@ -0,0 +1,4 @@ +## 2020-06-23, Version 1.9.0.0 (Current) +### Features +* Feature [#87](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/87): Elasticsearch 7.8.0 compatibility +(issue: [#86](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/86)) diff --git a/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes.md b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes.md new file mode 100644 index 0000000000..5b459611f6 --- /dev/null +++ b/sql-jdbc/release-notes/opendistro-elasticsearch-jdbc.release-notes.md @@ -0,0 +1,84 @@ +## 2020-04-29, Version 1.6.1.0 +### Enhancement +* Enhancement [#72](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/72): Use default holdability for prepareStatement. (issue: [#63](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/63)) + +## 2020-03-24, Version 1.6.0.0 +### Enhancement +* Enhancement [#49](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/49): Implementation of the execute method in the PreparedStatementImpl class (issue: [#62](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/62)) + +### Bugfix +* BugFix [#68](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/68): Change the request body encoding to UTF-8 (issues: [#54](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/54), [#66](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/66), [#opendistro for elasticsearch/sql#392](https://github.com/opendistro-for-elasticsearch/sql/issues/392) ) + +## 2020-1-26, Version 1.4.0 + +### Features + +#### Documentation +* Feature [#37](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/37): Tableau documentation +* Feature [#35](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/35): Add documentation for connecting Tableau with OpenDistro for Elasticsearch using JDBC Driver + +### Bugfixes +* BugFix [#47](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/47): Result set metadata returns Elasticsearch type (issue: [#43](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/43)) +* BugFix [#45](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/45): Add missing Elasticsearch type : object (issue: [#44](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/43)) +* BugFix [#32](https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/32): Added IP type and mapped with JDBC type of varchar + +## 2019-10-29, Version 1.3.0 + +### Changes + +* Elasticsearch 7.3.2 compatibility +* BugFix: support negative float + +## 2019-08-16, Version 1.2.0 + +### Changes + +* Elasticsearch 7.2.0 compatibility +* Support for custom AWS Credentials providers + +## 2019-06-24, Version 1.1.0 + +### Changes + +* Elasticsearch 7.1.1 compatibility + +## 2019-06-06, Version 1.0.0 + +### Changes + +* Updated the LocalDateTime to Timestamp conversion to support timezone [issue #6] +* Updated the connection URL template in README.md + +## 2019-04-19, Version 0.9.0 + +No update in this release. + + +## 2019-04-02, Version 0.8.0 + +### Notable Changes + +* Feature [#4](https://github.com/opendistro-for-elasticsearch/sql-jdbc/issues/4): Add support for Elasticsearch 6.6 + + +## 2019-03-11, Version 0.7.0 + +### Notable Changes + +This is the first release of OpenES-JDBC. + +OpenES-JDBC provides a driver for JDBC connectivity for OpenES-SQL. The driver has been developed from scratch and offers the following features in this initial release: + +* JDBC API implementation as per JDBC 4.2 specifications +* java.sql.DriverManager and javax.sql.DataSource interface implementation for creating JDBC connections to Elasticsearch clusters running with OpenES-SQL plugin +* java.sql.Statement implementation to allow creation and submission of SQL queries to OpenES-SQL +* java.sql.ResultSet and java.sql.ResultSetMetadata implementation for parsing query results +* java.sql.PreparedStatement implementation for creation and submission of parameterized SQL queries to OpenES-SQL +* Support for HTTP BASIC and AWS SIGV4 authentication mechanisms +* Full support for Elasticsearch Datatypes: BOOLEAN, BYTE, SHORT, INTEGER, LONG, HALF_FLOAT, FLOAT, DOUBLE, SCALED_FLOAT, KEYWORD, TEXT +* Support Elasticsearch DATE data type with some limitations + + +### Commits +The code has been developed from scratch so their are numerous commits over the course of development work. +A single squash commit shall be created for the first release. diff --git a/sql-jdbc/settings.gradle b/sql-jdbc/settings.gradle new file mode 100644 index 0000000000..ae072c6b5e --- /dev/null +++ b/sql-jdbc/settings.gradle @@ -0,0 +1,17 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +rootProject.name = 'opendistro-sql-jdbc' diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ConnectionImpl.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ConnectionImpl.java new file mode 100644 index 0000000000..7a5fce4822 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ConnectionImpl.java @@ -0,0 +1,523 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.internal.Version; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.JavaUtil; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ClusterMetadata; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ConnectionResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Protocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.transport.Transport; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportFactory; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.ApacheHttpTransportFactory; + +import java.io.IOException; +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; + +public class ConnectionImpl implements ElasticsearchConnection, JdbcWrapper, LoggingSource { + + private String url; + private String user; + private Logger log; + private int fetchSize; + private boolean open = false; + private Transport transport; + private Protocol protocol; + private ClusterMetadata clusterMetadata; + + public ConnectionImpl(ConnectionConfig connectionConfig, Logger log) throws SQLException { + this(connectionConfig, ApacheHttpTransportFactory.INSTANCE, JsonHttpProtocolFactory.INSTANCE, log); + } + + public ConnectionImpl(ConnectionConfig connectionConfig, TransportFactory transportFactory, + ProtocolFactory protocolFactory, Logger log) throws SQLException { + this.log = log; + this.url = connectionConfig.getUrl(); + this.user = connectionConfig.getUser(); + this.fetchSize = connectionConfig.getFetchSize(); + + try { + this.transport = transportFactory.getTransport(connectionConfig, log, getUserAgent()); + } catch (TransportException te) { + logAndThrowSQLException( + log, new SQLNonTransientException("Could not initialize transport for the connection: "+te.getMessage(), te) + ); + } + + this.protocol = protocolFactory.getProtocol(connectionConfig, this.transport); + + log.debug(() -> logMessage("Initialized Transport: %s, Protocol: %s", transport, protocol)); + + try { + ConnectionResponse connectionResponse = this.protocol.connect(connectionConfig.getLoginTimeout() * 1000); + this.clusterMetadata = connectionResponse.getClusterMetadata(); + this.open = true; + } catch (ResponseException | IOException ex) { + logAndThrowSQLException(log, new SQLException("Connection error "+ex.getMessage(), ex)); + } + + } + + public String getUser() { + return user; + } + + public int getFetchSize() { + return fetchSize; + } + + @Override + public Statement createStatement() throws SQLException { + log.debug(() -> logEntry("createStatement()")); + Statement st = createStatementX(); + log.debug(() -> logExit("createStatement", st)); + return st; + } + + public Statement createStatementX() throws SQLException { + return new StatementImpl(this, log); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + log.debug(() -> logEntry("prepareStatment (%s)", sql)); + checkOpen(); + PreparedStatement pst = prepareStatementX(sql); + log.debug(() -> logExit("prepareStatement", pst)); + return pst; + } + + private PreparedStatement prepareStatementX(String sql) throws SQLException { + return new PreparedStatementImpl(this, sql, log); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException("prepareCall is not supported."); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + checkOpen(); + return sql; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + checkOpen(); + if (!autoCommit) { + throw new SQLNonTransientException("autoCommit can not be disabled."); + } + } + + @Override + public boolean getAutoCommit() throws SQLException { + checkOpen(); + return true; + } + + @Override + public void commit() throws SQLException { + checkOpen(); + throw new SQLNonTransientException("autoCommit is enabled on the connection."); + } + + @Override + public void rollback() throws SQLException { + checkOpen(); + throw new SQLNonTransientException("autoCommit is enabled on the connection."); + } + + @Override + public void close() throws SQLException { + log.debug(() -> logEntry("close ()")); + closeX(); + } + + private void closeX() throws SQLException { + open = false; + try { + transport.close(); + } catch (TransportException te) { + log.error(() -> logMessage("Exception closing transport: "+te), te); + } + log.close(); + } + + @Override + public boolean isClosed() throws SQLException { + return isClosedX(); + } + + protected boolean isClosedX() throws SQLException { + return !open; + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + log.debug(() -> logEntry("getMetaData()")); + DatabaseMetaData dbmd = new DatabaseMetaDataImpl(this, log); + log.debug(() -> logExit("getMetaData", dbmd)); + return dbmd; + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + if (!readOnly) + throw new SQLNonTransientException("read-only mode can not be disabled."); + } + + @Override + public boolean isReadOnly() throws SQLException { + checkOpen(); + return true; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + checkOpen(); + // no-op + } + + @Override + public String getCatalog() throws SQLException { + return getClusterName(); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + checkOpen(); + if (level != TRANSACTION_NONE) + throw new SQLNonTransientException("Only TRANSACTION_NONE is supported."); + } + + @Override + public int getTransactionIsolation() throws SQLException { + checkOpen(); + return TRANSACTION_NONE; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + log.debug(() -> logEntry("createStatement (%d, %d)", resultSetType, resultSetConcurrency)); + checkOpen(); + validateResultSetCharacteristics(resultSetType, resultSetConcurrency, ResultSet.HOLD_CURSORS_OVER_COMMIT); + Statement st = createStatementX(); + log.debug(() -> logExit("createStatement", st)); + return st; + } + + private void validateResultSetCharacteristics( + int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + validateResultSetType(resultSetType); + validateResulSetConcurrency(resultSetConcurrency); + validateResultSetHoldability(resultSetHoldability); + } + + private void validateResultSetType(int resultSetType) throws SQLException { + if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) + throw new SQLNonTransientException("Only ResultSets of TYPE_FORWARD_ONLY are supported."); + } + + private void validateResulSetConcurrency(int resultSetConcurrency) throws SQLException { + if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) + throw new SQLNonTransientException("Only ResultSets with concurrency CONCUR_READ_ONLY are supported."); + } + + private void validateResultSetHoldability(int holdability) throws SQLException { + if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT) + throw new SQLNonTransientException("Only HOLD_CURSORS_OVER_COMMIT holdability is supported."); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + log.debug(() -> logEntry("prepareStatement (%s, %d, %d)", sql, resultSetType, resultSetConcurrency)); + checkOpen(); + validateResultSetCharacteristics(resultSetType, resultSetConcurrency, ResultSet.HOLD_CURSORS_OVER_COMMIT); + PreparedStatement pst = prepareStatementX(sql); + log.debug(() -> logExit("prepareStatement", pst)); + return pst; + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + throw new SQLFeatureNotSupportedException("prepareCall is not supported"); + } + + @Override + public Map> getTypeMap() throws SQLException { + return null; + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + throw new SQLFeatureNotSupportedException("setTypeMap is not supported"); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + checkOpen(); + validateResultSetHoldability(holdability); + } + + @Override + public int getHoldability() throws SQLException { + checkOpen(); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + throw new SQLFeatureNotSupportedException("Transactions and savepoints are not supported."); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + throw new SQLFeatureNotSupportedException("Transactions and savepoints are not supported."); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + throw new SQLFeatureNotSupportedException("Transactions are not supported."); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + throw new SQLFeatureNotSupportedException("Transactions and savepoints are not supported."); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + log.debug(() -> logEntry("createStatement (%d, %d, %d)", resultSetType, resultSetConcurrency, resultSetHoldability)); + checkOpen(); + validateResultSetCharacteristics(resultSetType, resultSetConcurrency, resultSetHoldability); + Statement st = createStatementX(); + log.debug(() -> logExit("createStatment", st)); + return st; + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + log.debug(() -> logEntry("prepareStatement (%s, %d, %d, %d)", sql, resultSetType, resultSetConcurrency, resultSetHoldability)); + checkOpen(); + validateResultSetCharacteristics(resultSetType, resultSetConcurrency, resultSetHoldability); + PreparedStatement pst = prepareStatementX(sql); + log.debug(() -> logExit("prepareStatement", pst)); + return pst; + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + throw new SQLFeatureNotSupportedException("prepareCall is not supported"); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + log.debug(() -> logEntry("prepareStatement (%s, %d)", sql, autoGeneratedKeys)); + checkOpen(); + if (autoGeneratedKeys != Statement.NO_GENERATED_KEYS) { + throw new SQLFeatureNotSupportedException("Auto generated keys are not supported."); + } + PreparedStatement pst = prepareStatementX(sql); + log.debug(() -> logExit("prepareStatement", pst)); + return pst; + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + throw new SQLFeatureNotSupportedException("Auto generated keys are not supported."); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + throw new SQLFeatureNotSupportedException("Auto generated keys are not supported."); + } + + @Override + public Clob createClob() throws SQLException { + throw new SQLFeatureNotSupportedException("Clob is not supported."); + } + + @Override + public Blob createBlob() throws SQLException { + throw new SQLFeatureNotSupportedException("Blob is not supported."); + } + + @Override + public NClob createNClob() throws SQLException { + throw new SQLFeatureNotSupportedException("NClob is not supported."); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML is not supported."); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + // TODO - implement through a HEAD or a GET to "/", or a dummy SQL? + log.debug(() -> logEntry("isValid (%d)", timeout)); + + boolean isValid = true; + + log.debug(() -> logExit("isValid", isValid)); + return isValid; + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + throw new SQLClientInfoException("Client info is not supported.", null); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + throw new SQLClientInfoException("Client info is not supported.", null); + } + + @Override + public String getClientInfo(String name) throws SQLException { + checkOpen(); + return null; + } + + @Override + public Properties getClientInfo() throws SQLException { + checkOpen(); + return null; + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + throw new SQLFeatureNotSupportedException("Array is not supported."); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + throw new SQLFeatureNotSupportedException("Struct is not supported."); + } + + @Override + public void setSchema(String schema) throws SQLException { + // no-op + } + + @Override + public String getSchema() throws SQLException { + return ""; + } + + @Override + public void abort(Executor executor) throws SQLException { + log.debug(() -> logEntry("abort (%s) ", executor)); + closeX(); + log.debug(() -> logExit("abort")); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + checkOpen(); + // no-op, not supported yet + } + + @Override + public int getNetworkTimeout() throws SQLException { + return 0; + } + + public String getUrl() { + return url; + } + + private void checkOpen() throws SQLException { + if (isClosedX()) { + logAndThrowSQLException(log, new SQLException("Connection is closed.")); + } + } + + @Override + public String getClusterName() throws SQLException { + checkOpen(); + return clusterMetadata.getClusterName(); + } + + @Override + public String getClusterUUID() throws SQLException { + checkOpen(); + return clusterMetadata.getClusterUUID(); + } + + public ClusterMetadata getClusterMetadata() throws SQLException { + checkOpen(); + return this.clusterMetadata; + } + + public Transport getTransport() { + return transport; + } + + public Protocol getProtocol() { + return protocol; + } + + public Logger getLog() { + return log; + } + + private String getUserAgent() { + return String.format("openes-jdbc/%s (Java %s)", + Version.Current.getFullVersion(), JavaUtil.getJavaVersion()); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/DatabaseMetaDataImpl.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/DatabaseMetaDataImpl.java new file mode 100644 index 0000000000..638058f2a0 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/DatabaseMetaDataImpl.java @@ -0,0 +1,1289 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.internal.Version; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ColumnDescriptor; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.RowIdLifetime; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + + +public class DatabaseMetaDataImpl implements DatabaseMetaData, JdbcWrapper, LoggingSource { + + private ConnectionImpl connection; + private Logger log; + + public DatabaseMetaDataImpl(ConnectionImpl connection, Logger log) { + this.connection = connection; + this.log = log; + } + + @Override + public boolean allProceduresAreCallable() throws SQLException { + return true; + } + + @Override + public boolean allTablesAreSelectable() throws SQLException { + return true; + } + + @Override + public String getURL() throws SQLException { + return connection.getUrl(); + } + + @Override + public String getUserName() throws SQLException { + return connection.getUser(); + } + + @Override + public boolean isReadOnly() throws SQLException { + return true; + } + + @Override + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedLow() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtStart() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtEnd() throws SQLException { + return true; + } + + @Override + public String getDatabaseProductName() throws SQLException { + return "Elasticsearch"; + } + + @Override + public String getDatabaseProductVersion() throws SQLException { + return connection.getClusterMetadata().getVersion().getFullVersion(); + } + + @Override + public String getDriverName() throws SQLException { + return "Elasticsearch JDBC Driver"; + } + + @Override + public String getDriverVersion() throws SQLException { + return Version.Current.getFullVersion(); + } + + @Override + public int getDriverMajorVersion() { + return Version.Current.getMajor(); + } + + @Override + public int getDriverMinorVersion() { + return Version.Current.getMinor(); + } + + @Override + public boolean usesLocalFiles() throws SQLException { + return true; + } + + @Override + public boolean usesLocalFilePerTable() throws SQLException { + return true; + } + + @Override + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public String getIdentifierQuoteString() throws SQLException { + // space to indicate quoting not supported currently + return " "; + } + + @Override + public String getSQLKeywords() throws SQLException { + return ""; + } + + @Override + public String getNumericFunctions() throws SQLException { + return ""; + } + + @Override + public String getStringFunctions() throws SQLException { + return ""; + } + + @Override + public String getSystemFunctions() throws SQLException { + return ""; + } + + @Override + public String getTimeDateFunctions() throws SQLException { + return ""; + } + + @Override + public String getSearchStringEscape() throws SQLException { + return "\\"; + } + + @Override + public String getExtraNameCharacters() throws SQLException { + return ""; + } + + @Override + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + @Override + public boolean nullPlusNonNullIsNull() throws SQLException { + return true; + } + + @Override + public boolean supportsConvert() throws SQLException { + return false; + } + + @Override + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() throws SQLException { + return true; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } + + @Override + public boolean supportsOrderByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupBy() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByBeyondSelect() throws SQLException { + return true; + } + + @Override + public boolean supportsLikeEscapeClause() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsNonNullableColumns() throws SQLException { + return true; + } + + @Override + public boolean supportsMinimumSQLGrammar() throws SQLException { + return true; + } + + @Override + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return true; + } + + @Override + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + @Override + public boolean supportsOuterJoins() throws SQLException { + return true; + } + + @Override + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsLimitedOuterJoins() throws SQLException { + return true; + } + + @Override + public String getSchemaTerm() throws SQLException { + return "schema"; + } + + @Override + public String getProcedureTerm() throws SQLException { + return "procedure"; + } + + @Override + public String getCatalogTerm() throws SQLException { + return "clusterName"; + } + + @Override + public boolean isCatalogAtStart() throws SQLException { + return false; + } + + @Override + public String getCatalogSeparator() throws SQLException { + return "."; + } + + @Override + public boolean supportsSchemasInDataManipulation() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + @Override + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + @Override + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + @Override + public boolean supportsUnion() throws SQLException { + return false; + } + + @Override + public boolean supportsUnionAll() throws SQLException { + return true; + } + + @Override + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + @Override + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + @Override + public int getMaxConnections() throws SQLException { + return 0; + } + + @Override + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxIndexLength() throws SQLException { + return 0; + } + + @Override + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxRowSize() throws SQLException { + return 0; + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return true; + } + + @Override + public int getMaxStatementLength() throws SQLException { + return 0; + } + + @Override + public int getMaxStatements() throws SQLException { + return 0; + } + + @Override + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + @Override + public int getDefaultTransactionIsolation() throws SQLException { + return Connection.TRANSACTION_NONE; + } + + @Override + public boolean supportsTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return level == Connection.TRANSACTION_NONE; + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + @Override + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { + return emptyResultSet(log, + rscd("PROCEDURE_CAT"), + rscd("PROCEDURE_SCHEM"), + rscd("PROCEDURE_NAME"), + rscd("RESERVED4"), + rscd("RESERVED5"), + rscd("RESERVED6"), + rscd("REMARKS"), + rscd("PROCEDURE_TYPE", ElasticsearchType.SHORT.getTypeName()), + rscd("SPECIFIC_NAME") + ); + } + + @Override + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) + throws SQLException { + return emptyResultSet(log, + rscd("PROCEDURE_CAT"), + rscd("PROCEDURE_SCHEM"), + rscd("PROCEDURE_NAME"), + rscd("COLUMN_NAME"), + rscd("COLUMN_TYPE", ElasticsearchType.SHORT.getTypeName()), + rscd("DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("TYPE_NAME"), + rscd("PRECISION", ElasticsearchType.INTEGER.getTypeName()), + rscd("LENGTH", ElasticsearchType.INTEGER.getTypeName()), + rscd("SCALE", ElasticsearchType.SHORT.getTypeName()), + rscd("RADIX", ElasticsearchType.SHORT.getTypeName()), + rscd("NULLABLE", ElasticsearchType.SHORT.getTypeName()), + rscd("REMARKS"), + rscd("COLUMN_DEF"), + rscd("SQL_DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("SQL_DATETIME_SUB", ElasticsearchType.INTEGER.getTypeName()), + rscd("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER.getTypeName()), + rscd("ORDINAL_POSITION", ElasticsearchType.INTEGER.getTypeName()), + rscd("IS_NULLABLE"), + rscd("SPECIFIC_NAME") + ); + } + + @Override + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { + // TODO - when server plugin supports PreparedStatement fully, implement this as a preparedStatment with params + log.debug(() -> logMessage("getTables(%s, %s, %s, %s)", + catalog, schemaPattern, tableNamePattern, Arrays.toString(types))); + + PreparedStatement pst = connection.prepareStatement("SHOW TABLES LIKE " + + (tableNamePattern == null ? "%" : tableNamePattern)); + + ResultSet resultSet = pst.executeQuery(); + + log.debug(() -> logMessage("getTables returning: " + resultSet)); + return resultSet; + } + + @Override + public ResultSet getSchemas() throws SQLException { + log.debug(() -> logMessage("getSchemas()")); + + ResultSet resultSet = getSchemasX(null, null); + + log.debug(() -> logMessage("getSchemas() returning: " + resultSet)); + return resultSet; + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + log.debug(() -> logMessage("getSchemas(%s, %s)", catalog, schemaPattern)); + + ResultSet resultSet = getSchemasX(catalog, schemaPattern); + + log.debug(() -> logMessage("getSchemas() returning: %s", resultSet)); + return resultSet; + } + + private ResultSet getSchemasX(String catalog, String schemaPattern) throws SQLException { + List columnDescriptors = new ArrayList<>(); + columnDescriptors.add(rscd("TABLE_SCHEM")); + columnDescriptors.add(rscd("TABLE_CATALOG")); + + List> dataRows = new ArrayList<>(); + + if (clusterCatalogMatches(catalog) && clusterSchemaMatches(schemaPattern)) { + dataRows.add(Arrays.asList("", getClusterCatalogName())); + } + return new ResultSetImpl(null, columnDescriptors, dataRows, log); + } + + public Logger getLog() { + return log; + } + + @Override + public ResultSet getCatalogs() throws SQLException { + log.debug(() -> logMessage("getCatalogs()")); + ResultSet resultSet; + + List columnDescriptors = new ArrayList<>(); + columnDescriptors.add(rscd("TABLE_CAT")); + + List> dataRows = new ArrayList<>(); + dataRows.add(Arrays.asList(getClusterCatalogName())); + + resultSet = new ResultSetImpl(null, columnDescriptors, dataRows, log); + + log.debug(() -> logMessage("getCatalogs() returning: %s", resultSet)); + return resultSet; + } + + @Override + public ResultSet getTableTypes() throws SQLException { + log.debug(() -> logMessage("getTableTypes()")); + ResultSet resultSet; + + List columnDescriptors = new ArrayList<>(); + columnDescriptors.add(rscd("TABLE_TYPE")); + + List> dataRows = new ArrayList<>(); + dataRows.add(Arrays.asList("BASE TABLE")); + + resultSet = new ResultSetImpl(null, columnDescriptors, dataRows, log); + + log.debug(() -> logMessage("getTableTypes() returning: %s", resultSet)); + return resultSet; + } + + @Override + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + log.debug(() -> logMessage("getColumns(%s, %s, %s, %s)", + catalog, schemaPattern, tableNamePattern, columnNamePattern)); + ColumnMetadataStatement statement = new ColumnMetadataStatement(connection, tableNamePattern, columnNamePattern, log); + ResultSet resultSet = statement.executeQuery(); + log.debug(() -> logMessage("getColumns() returning: %s", resultSet)); + return resultSet; + } + + @Override + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { + throw new SQLFeatureNotSupportedException("Privileges are not supported"); + } + + @Override + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + throw new SQLFeatureNotSupportedException("Privileges are not supported"); + } + + @Override + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { + throw new SQLFeatureNotSupportedException("Row identifiers are not supported"); + } + + @Override + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Version columns are not supported"); + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Primary keys are not supported"); + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Primary and Foreign keys are not supported"); + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Primary and Foreign keys are not supported"); + } + + @Override + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, + String foreignSchema, String foreignTable) throws SQLException { + throw new SQLFeatureNotSupportedException("Primary and Foreign keys are not supported"); + } + + @Override + public ResultSet getTypeInfo() throws SQLException { + log.debug(() -> logMessage("getTypeInfo()")); + ResultSet resultSet; + + List columnDescriptors = new ArrayList<>(); + columnDescriptors.add(rscd("TYPE_NAME")); + columnDescriptors.add(rscd("DATA_TYPE", ElasticsearchType.INTEGER.getTypeName())); + columnDescriptors.add(rscd("PRECISION", ElasticsearchType.INTEGER.getTypeName())); + columnDescriptors.add(rscd("LITERAL_PREFIX")); + columnDescriptors.add(rscd("LITERAL_SUFFIX")); + columnDescriptors.add(rscd("CREATE_PARAMS")); + columnDescriptors.add(rscd("NULLABLE", ElasticsearchType.SHORT.getTypeName())); + columnDescriptors.add(rscd("CASE_SENSITIVE", ElasticsearchType.BOOLEAN.getTypeName())); + columnDescriptors.add(rscd("SEARCHABLE", ElasticsearchType.SHORT.getTypeName())); + columnDescriptors.add(rscd("UNSIGNED_ATTRIBUTE", ElasticsearchType.BOOLEAN.getTypeName())); + columnDescriptors.add(rscd("FIXED_PREC_SCALE", ElasticsearchType.BOOLEAN.getTypeName())); + columnDescriptors.add(rscd("AUTO_INCREMENT", ElasticsearchType.BOOLEAN.getTypeName())); + columnDescriptors.add(rscd("LOCAL_TYPE_NAME")); + columnDescriptors.add(rscd("MINIMUM_SCALE", ElasticsearchType.SHORT.getTypeName())); + columnDescriptors.add(rscd("MAXIMUM_SCALE", ElasticsearchType.SHORT.getTypeName())); + columnDescriptors.add(rscd("SQL_DATA_TYPE", ElasticsearchType.INTEGER.getTypeName())); + columnDescriptors.add(rscd("SQL_DATETIME_SUB", ElasticsearchType.INTEGER.getTypeName())); + columnDescriptors.add(rscd("NUM_PREC_RADIX", ElasticsearchType.INTEGER.getTypeName())); + + List> dataRows = new ArrayList<>(); + for (ElasticsearchType esType : ElasticsearchType.values()) { + dataRows.add(Arrays.asList( + esType.name(), + esType.getJdbcType().getVendorTypeNumber(), + esType.getPrecision(), + "'", + "'", + null, + typeNullableUnknown, + (esType == ElasticsearchType.TEXT || esType == ElasticsearchType.KEYWORD), // case sensitive + typeSearchable, + !esType.isSigned(), + false, + false, + null, + null, // min scale - derive from Java type? + null, // max scale - derive from Java type? + null, + null, + 10 + )); + } + + resultSet = new ResultSetImpl(null, columnDescriptors, dataRows, log); + + log.debug(() -> logMessage("getTypeInfo() returning: %s", resultSet)); + return resultSet; + } + + @Override + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { + throw new SQLFeatureNotSupportedException("Table indices are not supported"); + } + + @Override + public boolean supportsResultSetType(int type) throws SQLException { + return type == ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return type == ResultSet.TYPE_FORWARD_ONLY && ResultSet.CONCUR_READ_ONLY == concurrency; + } + + @Override + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean supportsBatchUpdates() throws SQLException { + return false; + } + + @Override + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { + return emptyResultSet(log, + rscd("TYPE_CAT"), + rscd("TYPE_SCHEM"), + rscd("TYPE_NAME"), + rscd("CLASS_NAME"), + rscd("DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("REMARKS"), + rscd("BASE_TYPE", ElasticsearchType.SHORT.getTypeName()) + ); + } + + @Override + public Connection getConnection() throws SQLException { + return connection; + } + + @Override + public boolean supportsSavepoints() throws SQLException { + return false; + } + + @Override + public boolean supportsNamedParameters() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + @Override + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return emptyResultSet(log, + rscd("TYPE_CAT"), + rscd("TYPE_SCHEM"), + rscd("TYPE_NAME"), + rscd("SUPERTYPE_CAT"), + rscd("SUPERTYPE_SCHEM"), + rscd("SUPERTYPE_NAME") + ); + } + + @Override + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return emptyResultSet(log, + rscd("TABLE_CAT"), + rscd("TABLE_SCHEM"), + rscd("TABLE_NAME"), + rscd("SUPERTABLE_NAME") + ); + } + + @Override + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) + throws SQLException { + return emptyResultSet(log, + rscd("TYPE_CAT"), + rscd("TYPE_SCHEM"), + rscd("TYPE_NAME"), + rscd("ATTR_NAME"), + rscd("DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("ATTR_TYPE_NAME"), + rscd("ATTR_SIZE", ElasticsearchType.INTEGER.getTypeName()), + rscd("DECIMAL_DIGITS", ElasticsearchType.INTEGER.getTypeName()), + rscd("NUM_PREC_RADIX", ElasticsearchType.INTEGER.getTypeName()), + rscd("NULLABLE", ElasticsearchType.INTEGER.getTypeName()), + rscd("REMARKS"), + rscd("ATTR_DEF"), + rscd("SQL_DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("SQL_DATETIME_SUB", ElasticsearchType.INTEGER.getTypeName()), + rscd("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER.getTypeName()), + rscd("ORDINAL_POSITION", ElasticsearchType.INTEGER.getTypeName()), + rscd("IS_NULLABLE"), + rscd("SCOPE_CATALOG"), + rscd("SCOPE_SCHEMA"), + rscd("SCOPE_TABLE"), + rscd("SOURCE_DATA_TYPE", ElasticsearchType.SHORT.getTypeName()) + ); + } + + @Override + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return connection.getClusterMetadata().getVersion().getMajor(); + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return connection.getClusterMetadata().getVersion().getMinor(); + } + + @Override + public int getJDBCMajorVersion() throws SQLException { + return 4; + } + + @Override + public int getJDBCMinorVersion() throws SQLException { + return 2; + } + + @Override + public int getSQLStateType() throws SQLException { + return DatabaseMetaData.sqlStateSQL; + } + + @Override + public boolean locatorsUpdateCopy() throws SQLException { + return true; + } + + @Override + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + @Override + public RowIdLifetime getRowIdLifetime() throws SQLException { + return RowIdLifetime.ROWID_UNSUPPORTED; + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + @Override + public ResultSet getClientInfoProperties() throws SQLException { + throw new SQLException("Client info not implemented yet"); + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { + return emptyResultSet(log, + rscd("FUNCTION_CAT"), + rscd("FUNCTION_SCHEM"), + rscd("FUNCTION_NAME"), + rscd("REMARKS"), + rscd("FUNCTION_TYPE", ElasticsearchType.SHORT.getTypeName()), + rscd("SPECIFIC_NAME") + ); + } + + @Override + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) + throws SQLException { + return emptyResultSet(log, + rscd("FUNCTION_CAT"), + rscd("FUNCTION_SCHEM"), + rscd("FUNCTION_NAME"), + rscd("COLUMN_NAME"), + rscd("COLUMN_TYPE", ElasticsearchType.SHORT.getTypeName()), + rscd("DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("TYPE_NAME"), + rscd("PRECISION", ElasticsearchType.INTEGER.getTypeName()), + rscd("LENGTH", ElasticsearchType.INTEGER.getTypeName()), + rscd("SCALE", ElasticsearchType.SHORT.getTypeName()), + rscd("RADIX", ElasticsearchType.SHORT.getTypeName()), + rscd("NULLABLE", ElasticsearchType.SHORT.getTypeName()), + rscd("REMARKS"), + rscd("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER.getTypeName()), + rscd("ORDINAL_POSITION", ElasticsearchType.INTEGER.getTypeName()), + rscd("IS_NULLABLE"), + rscd("SPECIFIC_NAME") + ); + } + + @Override + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + + return emptyResultSet(log, + rscd("TABLE_CAT"), + rscd("TABLE_SCHEM"), + rscd("TABLE_NAME"), + rscd("COLUMN_NAME"), + rscd("DATA_TYPE", ElasticsearchType.INTEGER.getTypeName()), + rscd("COLUMN_SIZE", ElasticsearchType.INTEGER.getTypeName()), + rscd("DECIMAL_DIGITS", ElasticsearchType.INTEGER.getTypeName()), + rscd("NUM_PREC_RADIX", ElasticsearchType.INTEGER.getTypeName()), + rscd("COLUMN_USAGE"), + rscd("REMARKS"), + rscd("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER.getTypeName()), + rscd("IS_NULLABLE") + ); + } + + @Override + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + static ResultSetColumnDescriptor rscd(String name) { + return rscd(name, ElasticsearchType.TEXT.getTypeName(), null); + } + + static ResultSetColumnDescriptor rscd(String name, String type) { + return rscd(name, type, null); + } + + static ResultSetColumnDescriptor rscd(String name, String type, String label) { + return new ResultSetColumnDescriptor(name, type, label); + } + + private String getClusterCatalogName() throws SQLException { + return connection.getClusterMetadata().getClusterName(); + } + + private boolean clusterCatalogMatches(String catalog) throws SQLException { + return catalog == null || "%".equalsIgnoreCase(catalog) || catalog.equalsIgnoreCase(getClusterCatalogName()); + } + + private boolean clusterSchemaMatches(String schema) { + return schema == null || schema.equals("%") || schema.equals(""); + } + + private static ResultSetImpl emptyResultSet(Logger log, ResultSetColumnDescriptor... resultSetColumnDescriptors) + throws SQLException { + List> rows = new ArrayList<>(0); + return new ResultSetImpl(null, Arrays.asList(resultSetColumnDescriptors), rows, log); + } + + public static class ResultSetColumnDescriptor implements ColumnDescriptor { + + private String name; + private String type; + private String label; + + public ResultSetColumnDescriptor(String name, String type, String label) { + this.name = name; + this.type = type; + this.label = label; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getLabel() { + return label; + } + } + + + static class ColumnMetadataStatement extends PreparedStatementImpl { + // a special statement with custom logic for building the + // ResultSet it returns on execution + + ColumnMetadataStatement(ConnectionImpl connection, String tableNamePattern, String columnNamePattern, Logger log) + throws SQLException { + // TODO - once sql plugin supports PreparedStatement fully, do this through a preparedStatement with params + super(connection, "DESCRIBE TABLES LIKE " + tableNamePattern + + (columnNamePattern != null ? (" COLUMNS LIKE " + columnNamePattern) : ""), + log); + } + + static class ColumnMetadataResultSet extends ResultSetImpl { + + ColumnMetadataResultSet(StatementImpl statement, List columnDescriptors, + List> dataRows, Logger log) throws SQLException { + super(statement, columnDescriptors, dataRows, log); + } + + private int getDataType() { + String esDataType = (String) cursor.getColumn(5); + return ElasticsearchType.fromTypeName(esDataType, false).sqlTypeNumber(); + } + + private String getDataTypeName() { + String esDataType = (String) cursor.getColumn(5); + return ElasticsearchType.fromTypeName(esDataType, false).name(); + } + + private int getColumnSize() { + String esDataType = (String) cursor.getColumn(5); + return ElasticsearchType.fromTypeName(esDataType, false).getPrecision(); + } + + @Override + protected Object getColumnFromCursor(int columnIndex) { + // override behavior/return value of some of the columns + // received from the server + Object columnData = null; + + switch (columnIndex) { + case 5: + columnData = getDataType(); + break; + case 6: + columnData = getDataTypeName(); + break; + case 7: + columnData = getColumnSize(); + break; + default: + columnData = super.getColumnFromCursor(columnIndex); + + } + + return columnData; + } + } + + @Override + protected ResultSetImpl buildResultSet(QueryResponse queryResponse) throws SQLException { + // enrich/update the resultSet with some JDBC specific data type info + List columnDescriptors = new ArrayList<>(); + + for (ColumnDescriptor cd : queryResponse.getColumnDescriptors()) { + if ("DATA_TYPE".equals(cd.getName()) || "COLUMN_SIZE".equals(cd.getName())) { + columnDescriptors.add( + rscd(cd.getName(), ElasticsearchType.INTEGER.getTypeName())); + } else { + columnDescriptors.add(rscd(cd.getName(), cd.getType())); + } + } + + return new ColumnMetadataResultSet(this, columnDescriptors, queryResponse.getDatarows(), log); + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/Driver.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/Driver.java new file mode 100644 index 0000000000..03e8a7420e --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/Driver.java @@ -0,0 +1,113 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.UrlParser; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazon.opendistroforelasticsearch.jdbc.internal.Version; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggerFactory; +import com.amazon.opendistroforelasticsearch.jdbc.logging.NoOpLogger; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Properties; +import java.util.logging.Logger; + +public class Driver implements java.sql.Driver, LoggingSource { + + // + // Register with the DriverManager + // + static { + try { + java.sql.DriverManager.registerDriver(new Driver()); + } catch (SQLException E) { + throw new RuntimeException("Can't register driver!"); + } + } + + @Override + public Connection connect(String url, Properties info) throws SQLException { + ConnectionConfig connectionConfig = ConnectionConfig.builder() + .setUrl(url) + .setProperties(info) + .build(); + com.amazon.opendistroforelasticsearch.jdbc.logging.Logger log = initLog(connectionConfig); + log.debug(() -> logMessage("connect (%s, %s)", url, info == null ? "null" : info.toString())); + log.debug(() -> logMessage("Opening connection using config: %s", connectionConfig)); + return new ConnectionImpl(connectionConfig, log); + } + + @Override + public boolean acceptsURL(String url) throws SQLException { + return UrlParser.isAcceptable(url); + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + // TODO - implement this? + return new DriverPropertyInfo[0]; + } + + @Override + public int getMajorVersion() { + return Version.Current.getMajor(); + } + + @Override + public int getMinorVersion() { + return Version.Current.getMinor(); + } + + @Override + public boolean jdbcCompliant() { + return false; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + static com.amazon.opendistroforelasticsearch.jdbc.logging.Logger initLog(ConnectionConfig connectionConfig) { + // precedence: + // 1. explicitly supplied logWriter + // 2. logOutput property + // 3. DriverManager logWriter + if (connectionConfig.getLogWriter() != null) { + + return LoggerFactory.getLogger(connectionConfig.getLogWriter(), connectionConfig.getLogLevel()); + + } else if (connectionConfig.getLogOutput() != null) { + + return LoggerFactory.getLogger(connectionConfig.getLogOutput(), connectionConfig.getLogLevel()); + + } else if (DriverManager.getLogWriter() != null) { + + return LoggerFactory.getLogger(DriverManager.getLogWriter(), connectionConfig.getLogLevel()); + + } else { + + return NoOpLogger.INSTANCE; + } + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchConnection.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchConnection.java new file mode 100644 index 0000000000..60e75e6d7e --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchConnection.java @@ -0,0 +1,27 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import java.sql.SQLException; + +public interface ElasticsearchConnection extends java.sql.Connection { + + String getClusterName() throws SQLException; + + String getClusterUUID() throws SQLException; + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchDataSource.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchDataSource.java new file mode 100644 index 0000000000..59593247f4 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchDataSource.java @@ -0,0 +1,187 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.AwsCredentialsProviderProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.config.LoginTimeoutConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.UserConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.UrlParser; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazonaws.auth.AWSCredentialsProvider; + +import javax.sql.DataSource; +import java.io.PrintWriter; +import java.net.URISyntaxException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.logging.Logger; + +/** + * Elasticsearch {@link DataSource} implementation. + *

+ * Connection properties are expected to be included in a + * JDBC connection URL and supplied to the DataSource via + * {@link #setUrl(String)}. + *

+ * Where properties like login timeout + * have explicit setters in the DataSource API as well, the property + * value specified directly on the DataSource overrides the + * corresponding property value in the JDBC URL. Also, getter methods + * for such properties return the value that was explicitly set on + * the DataSource, and not the value that might be present on the JDBC URL. + */ +public class ElasticsearchDataSource implements DataSource, JdbcWrapper, LoggingSource { + + private String url; + + // properties that may come from URL as also be directly set on + // DataSource are recorded in this map to help maintain the + // precedence order of applying properties: + // - values directly set on DataSource override values in URL and, + // - if no value is explicitly set on the DataSource, then the + // value in the URL is applied. + private Map connectionProperties = new HashMap<>(); + private PrintWriter logWriter; + + @Override + public Connection getConnection() throws SQLException { + return getConnection(null); + } + + @Override + public Connection getConnection(String username, String password) throws SQLException { + Map overrideProperties = new HashMap<>(); + overrideProperties.put(UserConnectionProperty.KEY, username); + overrideProperties.put(PasswordConnectionProperty.KEY, password); + + return getConnection(overrideProperties); + } + + @Override + public PrintWriter getLogWriter() throws SQLException { + return logWriter; + } + + @Override + public void setLogWriter(PrintWriter out) throws SQLException { + this.logWriter = out; + } + + @Override + public void setLoginTimeout(int seconds) throws SQLException { + // property parsed here to ensure fail-fast behavior with property validation + LoginTimeoutConnectionProperty property = new LoginTimeoutConnectionProperty(); + property.setRawValue(seconds); + connectionProperties.put(LoginTimeoutConnectionProperty.KEY, property.getValue()); + } + + @Override + public int getLoginTimeout() throws SQLException { + return (Integer) getConnectionProperty(LoginTimeoutConnectionProperty.KEY, -1); + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException("DataSource does not use java.util.logging"); + } + + /** + * Sets the JDBC connection URL for the DataSource. + * + * @param url the jdbc connection url to use for establishing and + * configuring a connection + * + * @throws SQLException if there is a problem in setting the url + */ + public void setUrl(String url) throws SQLException { + this.url = url; + try { + // fail-fast on invalid url + UrlParser.parseProperties(url); + } catch (URISyntaxException e) { + throw new SQLException("Invalid connection URL", e); + } + } + + public void setAwsCredentialProvider(AWSCredentialsProvider awsCredentialProvider) { + connectionProperties.put(AwsCredentialsProviderProperty.KEY, awsCredentialProvider); + } + + /** + * Updates DataSource configuration properties from the specified + * {@link Properties} object. + *

+ * All properties already set on the DataSource - either from a + * prior call to this method or any other setters on this DataSource + * are discarded, and the properties passed in the specified + * {@link Properties} object are applied. All properties in the + * properties object, including any default values get applied on the + * DataSource. + * + * @param properties The property object containing properties to + * apply. + * + * @throws SQLException if there is a problem in applying the + * specified properties + */ + public void setProperties(Properties properties) throws SQLException { + this.connectionProperties.clear(); + + if (properties != null) { + Enumeration propertyNames = properties.propertyNames(); + while (propertyNames.hasMoreElements()) { + String propertyName = (String) propertyNames.nextElement(); + this.connectionProperties.put(propertyName, properties.getProperty(propertyName)); + } + } + } + + public String getUrl() throws SQLException { + return url; + } + + + private Object getConnectionProperty(String key, Object defaultValue) { + return connectionProperties.getOrDefault(key, defaultValue); + } + + private Connection getConnection(Map overrideProperties) + throws SQLException { + ConnectionConfig connectionConfig = getConnectionConfig(overrideProperties); + com.amazon.opendistroforelasticsearch.jdbc.logging.Logger log = Driver.initLog(connectionConfig); + log.debug(() -> logMessage("Opening connection using config: %s", connectionConfig)); + return new ConnectionImpl(connectionConfig, log); + } + + ConnectionConfig getConnectionConfig(Map overrideProperties) + throws SQLException { + return ConnectionConfig.builder() + .setUrl(url) + .setPropertyMap(connectionProperties) + .setLogWriter(logWriter) + .overrideProperties(overrideProperties) + .build(); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchVersion.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchVersion.java new file mode 100644 index 0000000000..b2b413b6de --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ElasticsearchVersion.java @@ -0,0 +1,27 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +public interface ElasticsearchVersion { + int getMajor(); + + int getMinor(); + + int getRevision(); + + String getFullVersion(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/PreparedStatementImpl.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/PreparedStatementImpl.java new file mode 100644 index 0000000000..ae83d1116d --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/PreparedStatementImpl.java @@ -0,0 +1,507 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcQueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import com.amazon.opendistroforelasticsearch.jdbc.types.TypeConverters; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.SqlParser; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcDateTimeFormatter; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcQueryParam; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Arrays; +import java.util.Calendar; +import java.util.HashMap; +import java.util.Map; + +public class PreparedStatementImpl extends StatementImpl implements PreparedStatement { + // TODO - support String representations containing Timestamp With Timezone + protected String sql; + private JdbcQueryParam[] parameters; + + public PreparedStatementImpl(ConnectionImpl connection, String sql, Logger log) throws SQLException { + super(connection, log); + this.sql = sql; + + try { + parameters = new JdbcQueryParam[SqlParser.countParameterMarkers(sql)]; + } catch (IllegalArgumentException iae) { + logAndThrowSQLException( + log, + new SQLNonTransientException( "Error preparing SQL statement: "+iae.getMessage(), iae)); + } + } + + @Override + public ResultSet executeQuery() throws SQLException { + log.debug(() -> logEntry("executeQuery()")); + checkOpen(); + ResultSet rs = executeQueryX(getFetchSize()); + log.debug(() -> logExit("executeQuery", rs)); + return rs; + } + + protected ResultSet executeQueryX(int fetchSize) throws SQLException { + checkParamsFilled(); + JdbcQueryRequest jdbcQueryRequest = new JdbcQueryRequest(sql, fetchSize); + jdbcQueryRequest.setParameters(Arrays.asList(parameters)); + return executeQueryRequest(jdbcQueryRequest); + } + + @Override + public int executeUpdate() throws SQLException { + throw new SQLFeatureNotSupportedException("Updates are not supported"); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + log.debug(() -> logEntry("setNull(%d, %d)", parameterIndex, sqlType)); + checkOpen(); + setParameter(parameterIndex, ElasticsearchType.fromJdbcType(JDBCType.valueOf(sqlType)).getTypeName(), null); + log.debug(() -> logExit("setNull")); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + log.debug(() -> logEntry("setBoolean(%d, %s)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.BOOLEAN); + log.debug(() -> logExit("setBoolean")); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + log.debug(() -> logEntry("setByte(%d, %d)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.TINYINT); + log.debug(() -> logExit("setByte")); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + log.debug(() -> logEntry("setShort(%d, %d)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.SMALLINT); + log.debug(() -> logExit("setShort")); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + log.debug(() -> logEntry("setInt(%d, %d)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.INTEGER); + log.debug(() -> logExit("setInt")); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + log.debug(() -> logEntry("setLong(%d, %d)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.BIGINT); + log.debug(() -> logExit("setLong")); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + log.debug(() -> logEntry("setFloat(%d, %f)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.REAL); + log.debug(() -> logExit("setFloat")); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + log.debug(() -> logEntry("setDouble(%d, %f)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.DOUBLE); + log.debug(() -> logExit("setDouble")); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal is not supported"); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + log.debug(() -> logEntry("setString(%d, %s)", parameterIndex, x)); + checkOpen(); + setParameter(parameterIndex, ElasticsearchType.fromJdbcType(JDBCType.VARCHAR).getTypeName(), x); + log.debug(() -> logExit("setString")); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException("Setting byte arrays is not supported"); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + log.debug(() -> logEntry("setDate(%d, %s)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.DATE); + log.debug(() -> logExit("setDate")); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException("Setting Time is not supported"); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + log.debug(() -> logEntry("setTimestamp(%d, %s)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, Types.TIMESTAMP); + log.debug(() -> logExit("setTimestamp")); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Setting ASCII Stream is not supported"); + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Setting Unicode Stream is not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Setting Binary Stream is not supported"); + } + + @Override + public void clearParameters() throws SQLException { + log.debug(() -> logEntry("clearParameters()")); + for (int i = 0; i < parameters.length; i++) + parameters[i] = null; + log.debug(() -> logExit("clearParameters")); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + log.debug(() -> logEntry("setObject(%d, %s, %d)", parameterIndex, x, targetSqlType)); + checkOpen(); + setObjectX(parameterIndex, x, targetSqlType, null); + log.debug(() -> logExit("setObject")); + } + + private void setObjectX(int parameterIndex, Object x, int targetSqlType) throws SQLException { + setObjectX(parameterIndex, x, targetSqlType, null); + } + + private void setObjectX(int parameterIndex, Object x, int targetSqlType, Map conversionParams) + throws SQLException { + JDBCType jdbcType = JDBCType.valueOf(targetSqlType); + ElasticsearchType esType = ElasticsearchType.fromJdbcType(jdbcType); + + Object value = TypeConverters.getInstance(jdbcType).convert(x, null, conversionParams); + + // flow date/times in JDBC escape format + if (jdbcType == JDBCType.TIMESTAMP) { + value = JdbcDateTimeFormatter.JDBC_FORMAT.format((Timestamp) value); + } else if (jdbcType == JDBCType.DATE) { + value = JdbcDateTimeFormatter.JDBC_FORMAT.format((Date) value); + } + + setParameter(parameterIndex, esType.getTypeName(), value); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + log.debug(() -> logEntry("setObject(%d, %s)", parameterIndex, x)); + checkOpen(); + setObjectX(parameterIndex, x, javaToSqlType(x)); + log.debug(() -> logExit("setObject")); + } + + private JDBCType javaToJDBCType(Object x) throws SQLException { + // Based on JDBC spec, Table B-4 + // TODO support java.time.* and JDBCType.TIME_WITH_TIMEZONE/JDBCType.TIMESTAMP_WITH_TIMEZONE + + if (x instanceof String) + return JDBCType.VARCHAR; + else if (x instanceof Boolean) + return JDBCType.BOOLEAN; + else if (x instanceof Byte) + return JDBCType.TINYINT; + else if (x instanceof Short) + return JDBCType.SMALLINT; + else if (x instanceof Integer) + return JDBCType.INTEGER; + else if (x instanceof Long) + return JDBCType.BIGINT; + else if (x instanceof Float) + return JDBCType.REAL; + else if (x instanceof Double) + return JDBCType.DOUBLE; + else if (x instanceof byte[]) + return JDBCType.VARBINARY; + else if (x instanceof java.sql.Date) + return JDBCType.DATE; + else if (x instanceof java.sql.Timestamp) + return JDBCType.TIMESTAMP; + else + throw new SQLDataException("Objects of type " + x.getClass().getName() + " not supported."); + } + + private int javaToSqlType(Object x) throws SQLException { + return javaToJDBCType(x).getVendorTypeNumber(); + } + + @Override + public boolean execute() throws SQLException { + log.debug(() -> logEntry("execute()")); + checkOpen(); + executeQueryX(getFetchSize()); + log.debug(() -> logExit("execute", true)); + return true; + } + + @Override + public void addBatch() throws SQLException { + throw new SQLFeatureNotSupportedException("addBatch is not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("setCharacterStream is not supported"); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("setRef is not supported"); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("setBlob is not supported"); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("setClob is not supported"); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("setArray is not supported"); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + // can only return metadata after query execution + log.debug(() -> logEntry("getMetaData ()")); + ResultSetMetaData rsmd = resultSet != null ? resultSet.getMetaData() : null; + log.debug(() -> logExit("getMetaData", rsmd)); + return rsmd; + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + log.debug(() -> logEntry("setDate (%d, %s, %s)", parameterIndex, x, + cal == null ? "null" : "Calendar TZ= " + cal.getTimeZone())); + checkOpen(); + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", cal); + + setObjectX(parameterIndex, x, Types.DATE, conversionParams); + log.debug(() -> logExit("setDate")); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + // TODO - implement + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + log.debug(() -> logEntry("setTimestamp (%d, %s, %s)", parameterIndex, x, + cal == null ? "null" : "Calendar TZ= " + cal.getTimeZone())); + checkOpen(); + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", cal); + setObjectX(parameterIndex, x, Types.TIMESTAMP, conversionParams); + log.debug(() -> logExit("setTimestamp")); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + // TODO - implement? + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + throw new SQLFeatureNotSupportedException("setURL not supported"); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + // can't determine parameterMetadata as we don't have a query + // "prepare" phase that could return us this info from the server + // where the query gets parsed + return null; + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("setRowId not supported"); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + throw new SQLFeatureNotSupportedException("setNString not supported"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setNCharacterStream not supported"); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + throw new SQLFeatureNotSupportedException("setNClob not supported"); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setClob not supported"); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setBlob not supported"); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setNClob not supported"); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("setSQLXML not supported"); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + // currently ignore scaleOrLength + setObjectX(parameterIndex, x, targetSqlType); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setAsciiStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setBinaryStream not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("setCharacterStream not supported"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("setAsciiStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("setBinaryStream not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("setCharacterStream not supported"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + throw new SQLFeatureNotSupportedException("setNCharacterStream not supported"); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("setClob not supported"); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("setBlob not supported"); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("setNClob not supported"); + } + + private void checkParamsFilled() throws SQLException { + int filled = 0; + + for (int i = 0; i < parameters.length; i++) { + if (parameters[i] != null) { + filled++; + } + } + + if (filled < parameters.length) + logAndThrowSQLException(log, new SQLDataException( + String.format("Missing parameter values. The PreparedStatement " + + "requires %d parameter values but only %d were found.", + parameters.length, filled))); + } + + protected void setParameter(int index, String type, Object value) throws SQLException { + checkParamIndex(index); + parameters[index - 1] = new JdbcQueryParam(type, value); + } + + private void checkParamIndex(int index) throws SQLException { + if (parameters == null || index < 1 || index > parameters.length) + logAndThrowSQLException(log, new SQLDataException("Invalid parameter index " + index)); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetImpl.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetImpl.java new file mode 100644 index 0000000000..14a4ae849b --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetImpl.java @@ -0,0 +1,1480 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.ColumnMetaData; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Cursor; +import com.amazon.opendistroforelasticsearch.jdbc.internal.exceptions.ObjectClosedException; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Row; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Schema; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ColumnDescriptor; +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.InternalServerErrorException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JdbcCursorQueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonCursorHttpProtocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonCursorHttpProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; +import com.amazon.opendistroforelasticsearch.jdbc.types.TypeConverter; +import com.amazon.opendistroforelasticsearch.jdbc.types.TypeConverters; +import com.amazon.opendistroforelasticsearch.jdbc.types.UnrecognizedElasticsearchTypeException; + +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Elasticsearch implementaion for a {@link ResultSet} + *

+ * Column names or labels received in APIs are treated in a + * case-sensitive manner since Elasticsearch field names are + * case-sensitive. + *

+ * The lookup + */ +public class ResultSetImpl implements ResultSet, JdbcWrapper, LoggingSource { + + private StatementImpl statement; + protected Cursor cursor; + private String cursorId; + private boolean open = false; + private boolean wasNull = false; + private boolean afterLast = false; + private boolean beforeFirst = true; + private Logger log; + + public ResultSetImpl(StatementImpl statement, QueryResponse queryResponse, Logger log) throws SQLException { + this(statement, queryResponse.getColumnDescriptors(), queryResponse.getDatarows(), queryResponse.getCursor(), log); + } + + public ResultSetImpl(StatementImpl statement, List columnDescriptors, + List> dataRows, Logger log) throws SQLException { + this(statement, columnDescriptors, dataRows, null, log); + } + + public ResultSetImpl(StatementImpl statement, List columnDescriptors, + List> dataRows, String cursorId, Logger log) throws SQLException { + this.statement = statement; + this.log = log; + + final Schema schema; + try { + schema = new Schema(columnDescriptors + .stream() + .map(ColumnMetaData::new) + .collect(Collectors.toList())); + + List rows = getRowsFromDataRows(dataRows); + + this.cursor = new Cursor(schema, rows); + this.cursorId = cursorId; + this.open = true; + + } catch (UnrecognizedElasticsearchTypeException ex) { + logAndThrowSQLException(log, new SQLException("Exception creating a ResultSet.", ex)); + } + + } + + @Override + public boolean next() throws SQLException { + log.debug(() -> logEntry("next()")); + checkOpen(); + boolean next = cursor.next(); + + if (!next && this.cursorId != null) { + log.debug(() -> logEntry("buildNextPageFromCursorId()")); + buildNextPageFromCursorId(); + log.debug(() -> logExit("buildNextPageFromCursorId()")); + next = cursor.next(); + } + + if (next) { + beforeFirst = false; + } else { + afterLast = true; + } + boolean finalNext = next; + log.debug(() -> logExit("next", finalNext)); + return next; + } + + /** + * TODO: Refactor as suggested https://github.com/opendistro-for-elasticsearch/sql-jdbc/pull/76#discussion_r421571383 + * + * This method has side effects. It creates a new Cursor to hold rows from new pages. + * Ideally fetching next set of rows using cursorId should be delegated to Cursor. + * In addition, the cursor should be final. + * + **/ + protected void buildNextPageFromCursorId() throws SQLException { + try { + JdbcCursorQueryRequest jdbcCursorQueryRequest = new JdbcCursorQueryRequest(this.cursorId); + JsonCursorHttpProtocolFactory protocolFactory = JsonCursorHttpProtocolFactory.INSTANCE; + ConnectionImpl connection = (ConnectionImpl) statement.getConnection(); + + JsonCursorHttpProtocol protocol = protocolFactory.getProtocol(null, (HttpTransport) connection.getTransport()); + QueryResponse queryResponse = protocol.execute(jdbcCursorQueryRequest); + + if (queryResponse.getError() != null) { + throw new InternalServerErrorException( + queryResponse.getError().getReason(), + queryResponse.getError().getType(), + queryResponse.getError().getDetails()); + } + + cursor = new Cursor(cursor.getSchema(), getRowsFromDataRows(queryResponse.getDatarows())); + cursorId = queryResponse.getCursor(); + + } catch (ResponseException | IOException ex) { + logAndThrowSQLException(log, new SQLException("Error executing cursor query", ex)); + } + } + + private List getRowsFromDataRows(List> dataRows) { + return dataRows + .parallelStream() + .map(Row::new) + .collect(Collectors.toList()); + } + + @Override + public void close() throws SQLException { + log.debug(() -> logEntry("close()")); + closeX(true); + log.debug(() -> logExit("close")); + } + + protected void closeX(boolean closeStatement) throws SQLException { + cursor = null; + open = false; + if (statement != null) { + statement.resultSetClosed(this, closeStatement); + } + } + + @Override + public boolean wasNull() throws SQLException { + return wasNull; + } + + @Override + public String getString(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getString (%d)", columnIndex)); + checkCursorOperationPossible(); + String value = getStringX(columnIndex); + log.debug(() -> logExit("getString", value)); + return value; + } + + private String getStringX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, String.class); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getBoolean (%d)", columnIndex)); + checkCursorOperationPossible(); + boolean value = getBooleanX(columnIndex); + log.debug(() -> logExit("getBoolean", value)); + return value; + } + + private boolean getBooleanX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, Boolean.class); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getByte (%d)", columnIndex)); + checkCursorOperationPossible(); + byte value = getByteX(columnIndex); + log.debug(() -> logExit("getByte", value)); + return value; + } + + private byte getByteX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, Byte.class); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getShort (%d)", columnIndex)); + checkCursorOperationPossible(); + short value = getShortX(columnIndex); + log.debug(() -> logExit("getShort", value)); + return value; + } + + private short getShortX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, Short.class); + } + + @Override + public int getInt(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getInt (%d)", columnIndex)); + checkCursorOperationPossible(); + int value = getIntX(columnIndex); + log.debug(() -> logExit("getInt", value)); + return value; + } + + private int getIntX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, Integer.class); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getLong (%d)", columnIndex)); + checkCursorOperationPossible(); + long value = getLongX(columnIndex); + log.debug(() -> logExit("getLong", value)); + return value; + } + + private long getLongX(int columnIndex) throws SQLException { + checkCursorOperationPossible(); + return getObjectX(columnIndex, Long.class); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getFloat (%d)", columnIndex)); + checkCursorOperationPossible(); + float value = getFloatX(columnIndex); + log.debug(() -> logExit("getFloat", value)); + return value; + } + + private float getFloatX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, Float.class); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getDouble (%d)", columnIndex)); + checkCursorOperationPossible(); + double value = getDoubleX(columnIndex); + log.debug(() -> logExit("getDouble", value)); + return value; + } + + private double getDoubleX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, Double.class); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + log.debug(() -> logEntry("getBigDecimal (%d, %d)", columnIndex, scale)); + checkCursorOperationPossible(); + BigDecimal value = getBigDecimalX(columnIndex, scale); + log.debug(() -> logExit("getBigDecimal", value)); + return value; + } + + private BigDecimal getBigDecimalX(int columnIndex, int scale) throws SQLException { + checkOpen(); + // TODO - add support? + throw new SQLFeatureNotSupportedException("BigDecimal is not supported"); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getBytes (%d)", columnIndex)); + checkCursorOperationPossible(); + byte[] value = getBytesX(columnIndex); + log.debug(() -> logExit("getBytes", + String.format("%s, length(%s)", value, value != null ? value.length : 0))); + return value; + } + + private byte[] getBytesX(int columnIndex) throws SQLException { + // TODO - add ByteArrayType support + return getObjectX(columnIndex, byte[].class); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getDate (%d)", columnIndex)); + checkCursorOperationPossible(); + Date value = getDateX(columnIndex, null); + log.debug(() -> logExit("getDate", value)); + return value; + } + + private Date getDateX(int columnIndex, Calendar calendar) throws SQLException { + Map conversionParams = null; + if (calendar != null) { + conversionParams = new HashMap<>(); + conversionParams.put("calendar", calendar); + } + return getObjectX(columnIndex, Date.class, conversionParams); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getTime (%d)", columnIndex)); + checkCursorOperationPossible(); + Time value = getTimeX(columnIndex); + log.debug(() -> logExit("getTime", value)); + return value; + } + + private Time getTimeX(int columnIndex) throws SQLException { + // TODO - add/check support + return getObjectX(columnIndex, Time.class); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getTimestamp (%d)", columnIndex)); + checkCursorOperationPossible(); + Timestamp value = getTimestampX(columnIndex, null); + log.debug(() -> logExit("getTimestamp", value)); + return value; + } + + private Timestamp getTimestampX(int columnIndex, Calendar calendar) throws SQLException { + Map conversionParams = null; + if (calendar != null) { + conversionParams = new HashMap<>(); + conversionParams.put("calendar", calendar); + } + return getObjectX(columnIndex, Timestamp.class, conversionParams); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public String getString(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getString (%s)", columnLabel)); + checkCursorOperationPossible(); + String value = getStringX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getString", value)); + return value; + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getBoolean (%s)", columnLabel)); + checkCursorOperationPossible(); + boolean value = getBooleanX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getBoolean", value)); + return value; + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getByte (%s)", columnLabel)); + checkCursorOperationPossible(); + byte value = getByteX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getByte", value)); + return value; + } + + @Override + public short getShort(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getShort (%s)", columnLabel)); + checkCursorOperationPossible(); + short value = getShortX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getShort", value)); + return value; + } + + @Override + public int getInt(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getInt (%s)", columnLabel)); + checkCursorOperationPossible(); + int value = getIntX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getInt", value)); + return value; + } + + @Override + public long getLong(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getLong (%s)", columnLabel)); + checkCursorOperationPossible(); + long value = getLongX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getLong", value)); + return value; + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getFloat (%s)", columnLabel)); + checkCursorOperationPossible(); + float value = getFloatX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getFloat", value)); + return value; + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getDouble (%s)", columnLabel)); + checkCursorOperationPossible(); + double value = getDoubleX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getDouble", value)); + return value; + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + log.debug(() -> logEntry("getBigDecimal (%s, %d)", columnLabel, scale)); + checkCursorOperationPossible(); + BigDecimal value = getBigDecimalX(getColumnIndex(columnLabel), scale); + log.debug(() -> logExit("getBigDecimal", value)); + return value; + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getBytes (%s)", columnLabel)); + checkCursorOperationPossible(); + byte[] value = getBytesX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getBytes", + String.format("%s, length(%s)", value, value != null ? value.length : 0))); + return value; + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getDate (%s)", columnLabel)); + checkCursorOperationPossible(); + Date value = getDateX(getColumnIndex(columnLabel), null); + log.debug(() -> logExit("getDate", value)); + return value; + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getTime (%s)", columnLabel)); + checkCursorOperationPossible(); + Time value = getTimeX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getTime", value)); + return value; + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getTimestamp (%s)", columnLabel)); + checkCursorOperationPossible(); + Timestamp value = getTimestampX(getColumnIndex(columnLabel), null); + log.debug(() -> logExit("getTimestamp", value)); + return value; + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + } + + @Override + public String getCursorName() throws SQLException { + throw new SQLFeatureNotSupportedException("Cursor name is not supported"); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + checkOpen(); + return new ResultSetMetaDataImpl(this, cursor.getSchema()); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getObject (%d)", columnIndex)); + checkCursorOperationPossible(); + Object value = getObjectX(columnIndex); + log.debug(() -> logExit("getObject", + value != null ? "(" + value.getClass().getName() + ") " + value : "null")); + return value; + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getObject (%s)", columnLabel)); + checkCursorOperationPossible(); + Object value = getObjectX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getObject", + value != null ? "(" + value.getClass().getName() + ") " + value : "null")); + return value; + } + + private Object getObjectX(int columnIndex) throws SQLException { + return getObjectX(columnIndex, (Class) null); + } + + protected T getObjectX(int columnIndex, Class javaClass) throws SQLException { + return getObjectX(columnIndex, javaClass, null); + } + + protected T getObjectX(int columnIndex, Class javaClass, Map conversionParams) throws SQLException { + Object value = getColumn(columnIndex); + TypeConverter tc = TypeConverters.getInstance(getColumnMetaData(columnIndex).getEsType().getJdbcType()); + return tc.convert(value, javaClass, conversionParams); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + checkOpen(); + return getColumnIndex(columnLabel); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + // TODO - add support? + checkOpen(); + throw new SQLFeatureNotSupportedException("BigDecimal is not supported"); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + // TODO - add support? + checkOpen(); + throw new SQLFeatureNotSupportedException("BigDecimal is not supported"); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + checkOpen(); + return beforeFirst; + } + + @Override + public boolean isAfterLast() throws SQLException { + checkOpen(); + return afterLast; + } + + private boolean isBeforeFirstX() throws SQLException { + return beforeFirst; + } + + private boolean isAfterLastX() throws SQLException { + return afterLast; + } + + @Override + public boolean isFirst() throws SQLException { + return false; + } + + @Override + public boolean isLast() throws SQLException { + return false; + } + + @Override + public void beforeFirst() throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public void afterLast() throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public boolean first() throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public boolean last() throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public int getRow() throws SQLException { + // not supported yet + return 0; + } + + @Override + public boolean absolute(int row) throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public boolean relative(int rows) throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public boolean previous() throws SQLException { + checkOpen(); + throw new SQLDataException("Illegal operation on ResultSet of type TYPE_FORWARD_ONLY"); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + checkOpen(); + if (direction != ResultSet.FETCH_FORWARD) { + throw new SQLDataException("The ResultSet only supports FETCH_FORWARD direction"); + } + } + + @Override + public int getFetchDirection() throws SQLException { + checkOpen(); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkOpen(); + // no-op + } + + @Override + public int getFetchSize() throws SQLException { + checkOpen(); + return 0; + } + + @Override + public int getType() throws SQLException { + checkOpen(); + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + checkOpen(); + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public boolean rowUpdated() throws SQLException { + checkOpen(); + return false; + } + + @Override + public boolean rowInserted() throws SQLException { + checkOpen(); + return false; + } + + @Override + public boolean rowDeleted() throws SQLException { + checkOpen(); + return false; + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void insertRow() throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateRow() throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void deleteRow() throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void refreshRow() throws SQLException { + + } + + @Override + public void cancelRowUpdates() throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void moveToInsertRow() throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void moveToCurrentRow() throws SQLException { + + } + + @Override + public Statement getStatement() throws SQLException { + return statement; + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + log.debug(() -> logEntry("getObject (%d, %s)", columnIndex, map)); + + Object value = getObjectX(columnIndex, map); + + log.debug(() -> logExit("getObject", value)); + return value; + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref is not supported"); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob is not supported"); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob is not supported"); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Array is not supported"); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + log.debug(() -> logEntry("getObject (%s, %s)", columnLabel, map)); + Object value = getObjectX(getColumnIndex(columnLabel), map); + log.debug(() -> logExit("getObject", value)); + return value; + } + + private Object getObjectX(int columnIndex, Map> map) throws SQLException { + String columnSQLTypeName = null; + Class targetClass = null; + if (map != null) { + columnSQLTypeName = getColumnMetaData(columnIndex).getEsType().getJdbcType().getName(); + targetClass = map.get(columnSQLTypeName); + } + + if (log.isDebugEnabled()) { + log.debug(logMessage("Column SQL Type is: %s. Target class retrieved from custom mapping: %s", + columnSQLTypeName, targetClass)); + } + return getObjectX(columnIndex, targetClass); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref is not supported"); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob is not supported"); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob is not supported"); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Array is not supported"); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + log.debug(() -> logEntry("getDate (%d, %s)", columnIndex, + cal == null ? "null" : "Calendar TZ= " + cal.getTimeZone())); + checkCursorOperationPossible(); + Date value = getDateX(columnIndex, cal); + log.debug(() -> logExit("getDate", value)); + return value; + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + log.debug(() -> logEntry("getDate (%s, %s)", columnLabel, + cal == null ? "null" : "Calendar TZ= " + cal.getTimeZone())); + checkCursorOperationPossible(); + Date value = getDateX(getColumnIndex(columnLabel), cal); + log.debug(() -> logExit("getDate", value)); + return value; + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + // TODO - implement? + return null; + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + // TODO - implement? + return null; + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + log.debug(() -> logEntry("getTimestamp (%d, %s)", columnIndex, + cal == null ? "null" : "Calendar TZ= " + cal.getTimeZone())); + checkCursorOperationPossible(); + Timestamp value = getTimestampX(columnIndex, cal); + log.debug(() -> logExit("getTimestamp", value)); + return value; + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + log.debug(() -> logEntry("getTimestamp (%s, %s)", columnLabel, + cal == null ? "null" : "Calendar TZ= " + cal.getTimeZone())); + checkCursorOperationPossible(); + Timestamp value = getTimestampX(getColumnIndex(columnLabel), cal); + log.debug(() -> logExit("getTimestamp", value)); + return value; + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + // TODO - implement + return null; + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + // TODO - implement + return null; + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId is not supported"); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId is not supported"); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public int getHoldability() throws SQLException { + checkOpen(); + return HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public boolean isClosed() throws SQLException { + return !open; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob is not supported"); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob is not supported"); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML is not supported"); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML is not supported"); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + log.debug(() -> logEntry("getNString (%d)", columnIndex)); + String value = getStringX(columnIndex); + log.debug(() -> logExit("getNString", value)); + return value; + } + + @Override + public String getNString(String columnLabel) throws SQLException { + log.debug(() -> logEntry("getNString (%s)", columnLabel)); + String value = getStringX(getColumnIndex(columnLabel)); + log.debug(() -> logExit("getNString", value)); + return value; + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Streams are not supported"); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + log.debug(() -> logEntry("getObject(%d, %s)", columnIndex, type)); + T value = getObjectX(columnIndex, type); + log.debug(() -> logExit("getObject", value)); + return value; + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + log.debug(() -> logEntry("getObject(%d, %s)", columnLabel, type)); + T value = getObjectX(getColumnIndex(columnLabel), type); + log.debug(() -> logExit("getObject", value)); + return value; + } + + private int getColumnIndex(String columnLabel) throws SQLException { + Integer index = cursor.findColumn(columnLabel); + + if (index == null) + logAndThrowSQLException(log, new SQLDataException("Column '" + columnLabel + "' not found.")); + + // +1 to adjust for JDBC indices that start from 1 + return index + 1; + } + + protected Object getColumn(int columnIndex) throws SQLException { + checkColumnIndex(columnIndex); + Object columnData = getColumnFromCursor(columnIndex); + + wasNull = (columnData == null); + return columnData; + } + + protected Object getColumnFromCursor(int columnIndex) { + return cursor.getColumn(columnIndex - 1); + } + + private ColumnMetaData getColumnMetaData(int columnIndex) throws SQLException { + checkColumnIndex(columnIndex); + return cursor.getSchema().getColumnMetaData(columnIndex - 1); + } + + protected void checkColumnIndex(int columnIndex) throws SQLException { + if (columnIndex < 1 || columnIndex > cursor.getColumnCount()) + logAndThrowSQLException(log, new SQLDataException("Column index out of range.")); + } + + protected void checkCursorOperationPossible() throws SQLException { + checkOpen(); + checkValidCursorPosition(); + } + + protected void checkOpen() throws SQLException { + if (isClosed()) { + logAndThrowSQLException(log, new ObjectClosedException("ResultSet closed.")); + } + } + + private void checkValidCursorPosition() throws SQLException { + if (isBeforeFirstX()) + logAndThrowSQLException(log, new SQLNonTransientException("Illegal operation before start of ResultSet.")); + else if (isAfterLastX()) + logAndThrowSQLException(log, new SQLNonTransientException("Illegal operation after end of ResultSet.")); + } + + private SQLException updatesNotSupportedException() { + return new SQLFeatureNotSupportedException("Updates are not supported"); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetMetaDataImpl.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetMetaDataImpl.java new file mode 100644 index 0000000000..ec2dc9698d --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetMetaDataImpl.java @@ -0,0 +1,172 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Schema; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public class ResultSetMetaDataImpl implements ResultSetMetaData, JdbcWrapper { + + private ResultSetImpl resultSet; + private Schema schema; + + public ResultSetMetaDataImpl(ResultSetImpl resultSet, Schema schema) { + this.resultSet = resultSet; + this.schema = schema; + } + + @Override + public int getColumnCount() throws SQLException { + return schema.getNumberOfColumns(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + checkAccessible(column); + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + checkColumnIndex(column); + return true; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + checkAccessible(column); + return true; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + checkAccessible(column); + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + checkAccessible(column); + return columnNullableUnknown; + } + + @Override + public boolean isSigned(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getEsType().isSigned(); + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getEsType().getDisplaySize(); + } + + @Override + public String getColumnLabel(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getLabel(); + } + + @Override + public String getColumnName(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getName(); + } + + @Override + public String getSchemaName(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getTableSchemaName(); + } + + @Override + public int getPrecision(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getPrecision(); + } + + @Override + public int getScale(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getScale(); + } + + @Override + public String getTableName(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getTableName(); + } + + @Override + public String getCatalogName(int column) throws SQLException { + checkAccessible(column); + return schema.getColumnMetaData(column-1).getCatalogName(); + } + + @Override + public int getColumnType(int column) throws SQLException { + checkAccessible(column); + return schema.getElasticsearchType(column-1).getJdbcType().getVendorTypeNumber(); + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + checkAccessible(column); + return schema.getElasticsearchType(column-1).getJdbcType().getName(); + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + checkAccessible(column); + return true; + } + + @Override + public boolean isWritable(int column) throws SQLException { + checkAccessible(column); + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + checkAccessible(column); + return false; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + checkAccessible(column); + return schema.getElasticsearchType(column-1).getJavaClassName(); + } + + private void checkAccessible(int columnIndex) throws SQLException { + checkOpen(); + checkColumnIndex(columnIndex); + } + + private void checkOpen() throws SQLException { + resultSet.checkOpen(); + } + + private void checkColumnIndex(int columnIndex) throws SQLException { + resultSet.checkColumnIndex(columnIndex); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/StatementImpl.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/StatementImpl.java new file mode 100644 index 0000000000..dae35b71f2 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/StatementImpl.java @@ -0,0 +1,352 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.exceptions.ObjectClosedException; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcQueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.InternalServerErrorException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; +import java.sql.SQLWarning; +import java.sql.Statement; + +public class StatementImpl implements Statement, JdbcWrapper, LoggingSource { + + protected ConnectionImpl connection; + protected boolean open = false; + protected int fetchSize; + protected ResultSetImpl resultSet; + protected Logger log; + private boolean closeOnCompletion; + + public StatementImpl(ConnectionImpl connection, Logger log) { + this.connection = connection; + this.open = true; + this.fetchSize = connection.getFetchSize(); + this.log = log; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + log.debug(()-> logEntry("executeQuery (%s)", sql)); + ResultSet rs = executeQueryX(sql, fetchSize); + log.debug(()-> logExit("executeQuery", rs)); + return rs; + } + + protected ResultSet executeQueryX(String sql, int fetchSize) throws SQLException { + JdbcQueryRequest jdbcQueryRequest = new JdbcQueryRequest(sql, fetchSize); + return executeQueryRequest(jdbcQueryRequest); + } + + protected ResultSet executeQueryRequest(JdbcQueryRequest jdbcQueryRequest) throws SQLException { + + // JDBC Spec: A ResultSet object is automatically closed when the Statement + // object that generated it is closed, re-executed, or used to retrieve the + // next result from a sequence of multiple results. + closeResultSet(false); + + try { + QueryResponse queryResponse = connection.getProtocol().execute(jdbcQueryRequest); + + if (queryResponse.getError() != null) { + throw new InternalServerErrorException( + queryResponse.getError().getReason(), + queryResponse.getError().getType(), + queryResponse.getError().getDetails()); + } + + resultSet = buildResultSet(queryResponse); + + } catch (ResponseException | IOException ex) { + logAndThrowSQLException(log, new SQLException("Error executing query", ex)); + } + return resultSet; + } + + protected ResultSetImpl buildResultSet(QueryResponse queryResponse) throws SQLException { + return new ResultSetImpl(this, queryResponse, log); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Updates are not supported."); + } + + @Override + public void close() throws SQLException { + log.debug(()->logEntry("close ()")); + open = false; + log.debug(()->logExit("close")); + } + + private void closeX() throws SQLException { + open = false; + } + + @Override + public int getMaxFieldSize() throws SQLException { + return 0; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + + } + + @Override + public int getMaxRows() throws SQLException { + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + checkOpen(); + } + + @Override + public int getQueryTimeout() throws SQLException { + return 0; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + // no-op + } + + @Override + public void cancel() throws SQLException { + throw new SQLFeatureNotSupportedException("cancel not supported"); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public void clearWarnings() throws SQLException { + + } + + @Override + public void setCursorName(String name) throws SQLException { + checkOpen(); + // no-op + } + + @Override + public boolean execute(String sql) throws SQLException { + log.debug(()->logEntry("execute (%s)", sql)); + checkOpen(); + executeQueryX(sql, fetchSize); + log.debug(() -> logExit("execute", true)); + return true; + } + + @Override + public ResultSet getResultSet() throws SQLException { + log.debug(() -> logEntry("getResultSet ()")); + checkOpen(); + log.debug(() -> logExit("getResultSet", resultSet)); + return resultSet; + } + + @Override + public int getUpdateCount() throws SQLException { + checkOpen(); + return -1; + } + + @Override + public boolean getMoreResults() throws SQLException { + checkOpen(); + closeResultSet(true); + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + + } + + @Override + public int getFetchDirection() throws SQLException { + return 0; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + return fetchSize; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public void addBatch(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException("Batch execution is not supported"); + } + + @Override + public void clearBatch() throws SQLException { + throw new SQLFeatureNotSupportedException("Batch execution is not supported"); + } + + @Override + public int[] executeBatch() throws SQLException { + throw new SQLFeatureNotSupportedException("Batch execution is not supported"); + } + + @Override + public Connection getConnection() throws SQLException { + return connection; + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return false; + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return null; + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw updatesNotSupportedException(); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + log.debug(()->logEntry("execute (%s, %d)", sql, autoGeneratedKeys)); + checkOpen(); + if (autoGeneratedKeys != Statement.NO_GENERATED_KEYS) { + throw new SQLNonTransientException("Auto generated keys are not supported"); + } + executeQueryX(sql, fetchSize); + log.debug(() -> logExit("execute", true)); + return true; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLNonTransientException("Auto generated keys are not supported"); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLNonTransientException("Auto generated keys are not supported"); + } + + @Override + public int getResultSetHoldability() throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public boolean isClosed() throws SQLException { + return isClosedX(); + } + + protected boolean isClosedX() throws SQLException { + return !open; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + checkOpen(); + // no-op + } + + @Override + public boolean isPoolable() throws SQLException { + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return closeOnCompletion; + } + + protected void checkOpen() throws SQLException { + if (isClosedX()) { + throw new ObjectClosedException("Statement closed."); + } + } + + protected void closeResultSet(boolean closeStatement) throws SQLException { + if (resultSet != null) { + resultSet.closeX(closeStatement); + } + } + + void resultSetClosed(ResultSet rs, boolean closeStatement) throws SQLException { + if (closeOnCompletion && closeStatement) { + log.debug(() -> logMessage("Child ResultSet closed and closeOnCompletion is enabled. Closing statement.")); + closeX(); + } + } + + private SQLException updatesNotSupportedException() { + return new SQLFeatureNotSupportedException("Updates are not supported"); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/auth/AuthenticationType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/auth/AuthenticationType.java new file mode 100644 index 0000000000..9a143f3bc2 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/auth/AuthenticationType.java @@ -0,0 +1,39 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.auth; + +/** + * Enum representing supported authentication methods + * + */ +public enum AuthenticationType { + + /** + * No authentication + */ + NONE, + + /** + * HTTP Basic authentication + */ + BASIC, + + /** + * AWS Signature V4 + */ + AWS_SIGV4; +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/AuthConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/AuthConnectionProperty.java new file mode 100644 index 0000000000..ca6e447d35 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/AuthConnectionProperty.java @@ -0,0 +1,56 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +import com.amazon.opendistroforelasticsearch.jdbc.auth.AuthenticationType; + +import java.util.Locale; + +public class AuthConnectionProperty extends ConnectionProperty { + + public static final String KEY = "auth"; + + public AuthConnectionProperty() { + super(KEY); + } + + @Override + protected AuthenticationType parseValue(Object rawValue) throws ConnectionPropertyException { + if (rawValue == null) { + return getDefault(); + } else if (rawValue instanceof String) { + String stringValue = (String) rawValue; + try { + return AuthenticationType.valueOf(stringValue.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException iae) { + throw new ConnectionPropertyException(getKey(), + String.format("Invalid value specified for the property \"%s\". " + + "Unknown authentication type \"%s\".", getKey(), stringValue)); + } + } + + throw new ConnectionPropertyException(getKey(), + String.format("Property \"%s\" requires a valid String matching a known authentication type. " + + "Invalid value of type: %s specified.", getKey(), rawValue.getClass().getName())); + + } + + @Override + public AuthenticationType getDefault() { + return AuthenticationType.NONE; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/AwsCredentialsProviderProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/AwsCredentialsProviderProperty.java new file mode 100644 index 0000000000..08a178b0f4 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/AwsCredentialsProviderProperty.java @@ -0,0 +1,31 @@ +package com.amazon.opendistroforelasticsearch.jdbc.config; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; + +public class AwsCredentialsProviderProperty extends ConnectionProperty { + + public static final String KEY = "awsCredentialsProvider"; + + public AwsCredentialsProviderProperty() { + super(KEY); + } + + @Override + public AWSCredentialsProvider getDefault() { + return new DefaultAWSCredentialsProviderChain(); + } + + @Override + protected AWSCredentialsProvider parseValue(Object rawValue) throws ConnectionPropertyException { + if (null == rawValue) { + return null; + } else if (rawValue instanceof AWSCredentialsProvider) { + return (AWSCredentialsProvider) rawValue; + } + + throw new ConnectionPropertyException(getKey(), + String.format("Property \"%s\" requires a valid AWSCredentialsProvider instance. " + + "Invalid value of type: %s specified.", getKey(), rawValue.getClass().getName())); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/BoolConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/BoolConnectionProperty.java new file mode 100644 index 0000000000..89f7eaf349 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/BoolConnectionProperty.java @@ -0,0 +1,45 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class BoolConnectionProperty extends ConnectionProperty { + + public BoolConnectionProperty(String key) { + super(key); + } + + @Override + protected Boolean parseValue(Object value) throws ConnectionPropertyException { + + if (value == null) { + return getDefault(); + } else if (value instanceof Boolean) { + return (Boolean) value; + } else if (value instanceof String) { + return Boolean.parseBoolean((String) value); + } + + throw new ConnectionPropertyException(getKey(), + String.format("Property %s requires a valid boolean. Invalid property value of type %s. ", + getKey(), value.getClass().getName())); + } + + @Override + public Boolean getDefault() { + return false; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionConfig.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionConfig.java new file mode 100644 index 0000000000..ecd813331f --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionConfig.java @@ -0,0 +1,580 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +import com.amazon.opendistroforelasticsearch.jdbc.auth.AuthenticationType; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LogLevel; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.AwsHostNameUtil; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.UrlParser; +import com.amazonaws.auth.AWSCredentialsProvider; + +import java.io.PrintWriter; +import java.net.URISyntaxException; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +public class ConnectionConfig { + + private String url; + private String host; + private int port; + private int fetchSize; + private String path; + private boolean useSSL; + private int loginTimeout; + private String logOutput; + private PrintWriter logWriter; + private String user; + private String password; + private boolean requestCompression; + private AuthenticationType authenticationType; + private AWSCredentialsProvider awsCredentialsProvider; + private String region; + private LogLevel logLevel; + + private String keyStoreLocation; + private String keyStorePassword; + private String keyStoreType; + private String trustStoreLocation; + private String trustStorePassword; + private String trustStoreType; + private boolean trustSelfSigned; + private boolean hostnameVerification; + + private ConnectionConfig(Builder builder) { + this.url = builder.getUrl(); + this.host = builder.getHostProperty().getValue(); + this.port = builder.getPortProperty().getValue(); + this.fetchSize = builder.getFetchSizeProperty().getValue(); + this.path = builder.getPathProperty().getValue(); + this.useSSL = builder.getUseSSLProperty().getValue(); + + this.logOutput = builder.getLogOutputProperty().getValue(); + this.logLevel = builder.getLogLevelConnectionProperty().getValue(); + this.logWriter = builder.getLogWriter(); + + this.loginTimeout = builder.getLoginTimeoutProperty().getValue(); + + this.user = builder.getUserProperty().getValue(); + this.password = builder.getPasswordProperty().getValue(); + + this.requestCompression = builder.getRequestCompressionProperty().getValue(); + this.authenticationType = builder.getAuthConnectionProperty().getValue(); + this.awsCredentialsProvider = builder.getAwsCredentialProvider().getValue(); + this.region = builder.getRegionConnectionProperty().getValue(); + + this.keyStoreLocation = builder.getKeyStoreLocationConnectionProperty().getValue(); + this.keyStorePassword = builder.getKeyStorePasswordConnectionProperty().getValue(); + this.keyStoreType = builder.getKeyStoreTypeConnectionProperty().getValue(); + + this.trustStoreLocation = builder.getTrustStoreLocationConnectionProperty().getValue(); + this.trustStorePassword = builder.getTrustStorePasswordConnectionProperty().getValue(); + this.trustStoreType = builder.getTrustStoreTypeConnectionProperty().getValue(); + + this.trustSelfSigned = builder.getTrustSelfSignedConnectionProperty().getValue(); + + this.hostnameVerification = builder.getHostnameVerificationConnectionProperty().getValue(); + } + + public static Builder builder() { + return new Builder(); + } + + public String getUrl() { + return url; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + public int getFetchSize() { + return fetchSize; + } + + public String getPath() { + return path; + } + + public boolean isUseSSL() { + return useSSL; + } + + public boolean requestCompression() { + return requestCompression; + } + + public int getLoginTimeout() { + return loginTimeout; + } + + public String getLogOutput() { + return logOutput; + } + + public LogLevel getLogLevel() { + return logLevel; + } + + public String getUser() { + return user; + } + + public String getPassword() { + return password; + } + + public AuthenticationType getAuthenticationType() { + return authenticationType; + } + + public AWSCredentialsProvider getAwsCredentialsProvider() { + return awsCredentialsProvider; + } + + public String getRegion() { + return region; + } + + public PrintWriter getLogWriter() { + return logWriter; + } + + public String getKeyStoreLocation() { + return keyStoreLocation; + } + + public String getKeyStorePassword() { + return keyStorePassword; + } + + public String getKeyStoreType() { + return keyStoreType; + } + + public String getTrustStoreLocation() { + return trustStoreLocation; + } + + public String getTrustStorePassword() { + return trustStorePassword; + } + + public String getTrustStoreType() { + return trustStoreType; + } + + public boolean trustSelfSigned() { + return trustSelfSigned; + } + + public boolean hostnameVerification() { + return hostnameVerification; + } + + @Override + public String toString() { + return "ConnectionConfig{" + + "url='" + url + '\'' + + ", host='" + host + '\'' + + ", port=" + port + + ", fetchSize=" + fetchSize + + ", path='" + path + '\'' + + ", useSSL=" + useSSL + + ", loginTimeout=" + loginTimeout + + ", logOutput='" + logOutput + '\'' + + ", logWriter=" + logWriter + + ", user='" + user + '\'' + + ", password='" + mask(password) + '\'' + + ", requestCompression=" + requestCompression + + ", authenticationType=" + authenticationType + + ", awsCredentialsProvider=" + awsCredentialsProvider + + ", region='" + region + '\'' + + ", logLevel=" + logLevel + + ", keyStoreLocation='" + keyStoreLocation + '\'' + + ", keyStorePassword='" + mask(keyStorePassword) + '\'' + + ", keyStoreType='" + keyStoreType + '\'' + + ", trustStoreLocation='" + trustStoreLocation + '\'' + + ", trustStorePassword='" + mask(trustStorePassword) + '\'' + + ", trustStoreType='" + trustStoreType + '\'' + + ", trustSelfSigned='" + trustSelfSigned + '\'' + + ", hostnameVerification='" + hostnameVerification + '\'' + + '}'; + } + + private String mask(String string) { + return string == null || string.length() == 0 ? "" : ""; + } + + public static class Builder { + + private HostConnectionProperty hostProperty = new HostConnectionProperty(); + private PortConnectionProperty portProperty = new PortConnectionProperty(); + private FetchSizeProperty fetchSizeProperty = new FetchSizeProperty(); + private LoginTimeoutConnectionProperty loginTimeoutProperty = new LoginTimeoutConnectionProperty(); + private UseSSLConnectionProperty useSSLProperty = new UseSSLConnectionProperty(); + private PathConnectionProperty pathProperty = new PathConnectionProperty(); + private LogOutputConnectionProperty logOutputProperty = new LogOutputConnectionProperty(); + private UserConnectionProperty userProperty = new UserConnectionProperty(); + private PasswordConnectionProperty passwordProperty = new PasswordConnectionProperty(); + private RequestCompressionConnectionProperty requestCompressionProperty = + new RequestCompressionConnectionProperty(); + private AuthConnectionProperty authConnectionProperty = new AuthConnectionProperty(); + private RegionConnectionProperty regionConnectionProperty = new RegionConnectionProperty(); + private LogLevelConnectionProperty logLevelConnectionProperty = new LogLevelConnectionProperty(); + + private KeyStoreLocationConnectionProperty keyStoreLocationConnectionProperty + = new KeyStoreLocationConnectionProperty(); + private KeyStorePasswordConnectionProperty keyStorePasswordConnectionProperty + = new KeyStorePasswordConnectionProperty(); + private KeyStoreTypeConnectionProperty keyStoreTypeConnectionProperty + = new KeyStoreTypeConnectionProperty(); + + private TrustStoreLocationConnectionProperty trustStoreLocationConnectionProperty + = new TrustStoreLocationConnectionProperty(); + private TrustStorePasswordConnectionProperty trustStorePasswordConnectionProperty + = new TrustStorePasswordConnectionProperty(); + private TrustStoreTypeConnectionProperty trustStoreTypeConnectionProperty + = new TrustStoreTypeConnectionProperty(); + + private TrustSelfSignedConnectionProperty trustSelfSignedConnectionProperty + = new TrustSelfSignedConnectionProperty(); + + private AwsCredentialsProviderProperty awsCredentialsProviderProperty + = new AwsCredentialsProviderProperty(); + + private HostnameVerificationConnectionProperty hostnameVerificationConnectionProperty + = new HostnameVerificationConnectionProperty(); + + ConnectionProperty[] connectionProperties = new ConnectionProperty[]{ + hostProperty, + portProperty, + fetchSizeProperty, + loginTimeoutProperty, + useSSLProperty, + pathProperty, + logOutputProperty, + logLevelConnectionProperty, + userProperty, + passwordProperty, + requestCompressionProperty, + authConnectionProperty, + awsCredentialsProviderProperty, + regionConnectionProperty, + keyStoreLocationConnectionProperty, + keyStorePasswordConnectionProperty, + keyStoreTypeConnectionProperty, + trustStoreLocationConnectionProperty, + trustStorePasswordConnectionProperty, + trustStoreTypeConnectionProperty, + trustSelfSignedConnectionProperty, + hostnameVerificationConnectionProperty + }; + + private String url = null; + private PrintWriter logWriter = null; + private Map propertyMap; + private Map overrideMap; + private Properties urlProperties; + private Properties properties; + + + public ConnectionProperty[] getConnectionProperties() { + return connectionProperties; + } + + public HostConnectionProperty getHostProperty() { + return hostProperty; + } + + public PortConnectionProperty getPortProperty() { + return portProperty; + } + + public FetchSizeProperty getFetchSizeProperty() { + return fetchSizeProperty; + } + + public LoginTimeoutConnectionProperty getLoginTimeoutProperty() { + return loginTimeoutProperty; + } + + public UseSSLConnectionProperty getUseSSLProperty() { + return useSSLProperty; + } + + public PathConnectionProperty getPathProperty() { + return pathProperty; + } + + public LogOutputConnectionProperty getLogOutputProperty() { + return logOutputProperty; + } + + public UserConnectionProperty getUserProperty() { + return userProperty; + } + + public PasswordConnectionProperty getPasswordProperty() { + return passwordProperty; + } + + public RequestCompressionConnectionProperty getRequestCompressionProperty() { + return requestCompressionProperty; + } + + public AuthConnectionProperty getAuthConnectionProperty() { + return authConnectionProperty; + } + + public AwsCredentialsProviderProperty getAwsCredentialProvider() { + return awsCredentialsProviderProperty; + } + + public RegionConnectionProperty getRegionConnectionProperty() { + return regionConnectionProperty; + } + + public LogLevelConnectionProperty getLogLevelConnectionProperty() { + return logLevelConnectionProperty; + } + + public PrintWriter getLogWriter() { + return logWriter; + } + + public KeyStoreLocationConnectionProperty getKeyStoreLocationConnectionProperty() { + return keyStoreLocationConnectionProperty; + } + + public KeyStorePasswordConnectionProperty getKeyStorePasswordConnectionProperty() { + return keyStorePasswordConnectionProperty; + } + + public KeyStoreTypeConnectionProperty getKeyStoreTypeConnectionProperty() { + return keyStoreTypeConnectionProperty; + } + + public TrustStoreLocationConnectionProperty getTrustStoreLocationConnectionProperty() { + return trustStoreLocationConnectionProperty; + } + + public TrustStorePasswordConnectionProperty getTrustStorePasswordConnectionProperty() { + return trustStorePasswordConnectionProperty; + } + + public TrustStoreTypeConnectionProperty getTrustStoreTypeConnectionProperty() { + return trustStoreTypeConnectionProperty; + } + + public TrustSelfSignedConnectionProperty getTrustSelfSignedConnectionProperty() { + return trustSelfSignedConnectionProperty; + } + + public HostnameVerificationConnectionProperty getHostnameVerificationConnectionProperty() { + return hostnameVerificationConnectionProperty; + } + + public Builder setLogWriter(PrintWriter printWriter) { + this.logWriter = printWriter; + return this; + } + + public String getUrl() { + return url; + } + + public Builder setUrl(String url) { + this.url = url; + return this; + } + + public Builder setPropertyMap(Map map) { + if (map != null) { + propertyMap = new HashMap<>(); + propertyMap.putAll(map); + } + return this; + } + + /** + * Accumulates property values to override. Successive calls + * are applied in the order they are made i.e. a property value + * supplied in the most recent invocation overrides any value + * supplied in a prior invocations. + * + * @param map map containing property key-value pairs + * + * @return + */ + public Builder overrideProperties(Map map) { + if (map != null) { + if (overrideMap == null) { + overrideMap = new HashMap<>(); + } + this.overrideMap.putAll(map); + } + return this; + } + + public Builder setProperties(Properties properties) { + if (properties != null) { + this.properties = new Properties(); + + Enumeration enumeration = properties.propertyNames(); + + while (enumeration.hasMoreElements()) { + String key = (String) enumeration.nextElement(); + this.properties.setProperty(key, properties.getProperty(key)); + } + } + return this; + } + + public ConnectionConfig build() throws ConnectionPropertyException { + if (url != null) { + try { + urlProperties = UrlParser.parseProperties(url); + } catch (URISyntaxException e) { + throw new ConnectionPropertyException("Invalid connection URL", e); + } + } + + for (ConnectionProperty connectionProperty : connectionProperties) { + setRawValue(connectionProperty); + } + + validateConfig(); + + return new ConnectionConfig(this); + } + +// public DriverPropertyInfo[] buildDriverPropertyInfo() throws ConnectionPropertyException { +// // Return connection properties that need more +// try { +// build(); +// } catch (ConnectionPropertyException cpe) { +// +// } +// validateConfig(); +// +// return new ConnectionConfig(this); +// } + + private void setRawValue(ConnectionProperty connectionProperty) throws ConnectionPropertyException { + Object value = getPropertyValueToSet(connectionProperty.getKey()); + connectionProperty.setRawValue(value); + } + + /** + * Validate the overall configuration to be applied. + * + * @throws ConnectionPropertyException if the configuration attempted + * fails validation checks + */ + private void validateConfig() throws ConnectionPropertyException { + AuthenticationType authenticationType = authConnectionProperty.getValue(); + + if (authenticationType == AuthenticationType.NONE) { + + // Use Basic auth if it a username is provided but an + // explicit auth type is not set + + if (userProperty.getValue() != null) { + authConnectionProperty.setRawValue(AuthenticationType.BASIC.name()); + } + + } else if (authenticationType == AuthenticationType.BASIC && + userProperty.getValue() == null) { + + throw new ConnectionPropertyException(authConnectionProperty.getKey(), + "Basic authentication requires a valid username but none was provided."); + + } else if (authenticationType == AuthenticationType.AWS_SIGV4 && + regionConnectionProperty.getValue() == null) { + + // aws sdk auto-detection does not work for AWS ES endpoints + String region = AwsHostNameUtil.parseRegion(hostProperty.getValue()); + + if (region == null) { + throw new ConnectionPropertyException(authConnectionProperty.getKey(), + String.format("AWS Signature V4 authentication requires a region to be used, but " + + "a valid value could not be determined from the specified hostname. " + + "Provide an explicit region value (e.g. us-east-1) " + + "via the \"%s\" connection property.", regionConnectionProperty.getKey())); + } else { + regionConnectionProperty.setRawValue(region); + } + } + + if (portProperty.getRawValue() == null && useSSLProperty.getValue() == true) { + // port is not explicitly specified, but SSL is enabled + // change the default port to use to 443 + portProperty.setRawValue(443); + } + + if (fetchSizeProperty.getValue() < 0) { + throw new ConnectionPropertyException(fetchSizeProperty.getKey(), + "Cursor fetch size value should be greater or equal to zero"); + } + } + + /** + * Computes the effective value for a connection property + * as per the necessary precedence order. + *

+ * Properties specified via overrideProperties have the + * highest precedence, followed by properties specified via + * setPropertyMap, followed by properties specified via + * setProperties and finally property values specified via + * the connection URL. + * + * @param key name of the property + * + * @return effective value + */ + private Object getPropertyValueToSet(String key) { + if (overrideMap != null && overrideMap.containsKey(key)) { + return overrideMap.get(key); + } + + if (propertyMap != null && propertyMap.containsKey(key)) { + return propertyMap.get(key); + } + + if (properties != null) { + Object value = properties.getProperty(key); + if (value != null) + return value; + } + + if (urlProperties != null) { + Object value = urlProperties.getProperty(key); + if (value != null) + return value; + } + + return null; + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionProperty.java new file mode 100644 index 0000000000..f6209bf8c9 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionProperty.java @@ -0,0 +1,172 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +/** + * Represents a Connection configuration property. + *

+ * A property has an associated raw value and a parsed value. + * The raw value is any Object that is provided by a user when setting + * the property, while the parsed value is the effective value computed + * from the raw value. + *

+ * The raw value and parsed value need not have similar instance types + * since properties may accept a varied set of input raw values to compute + * the parsed value e.g a String "true" or "false" may be accepted to + * compute a Boolean property. + *

+ * During Connection initialization, all defined connection properties + * are expected to be initialized with the value provided by the user or + * a null if no value was provided by the user. Each property defines + * its own behavior of how it gets initialized. + * + * @param The type of the parsed value of the property. + */ +public abstract class ConnectionProperty { + + private final String key; + private Object rawValue; + private T parsedValue; + private boolean parsed = false; + + public ConnectionProperty(final String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + /** + * @return the raw value provided to set the property + */ + public Object getRawValue() { + return rawValue; + } + + /** + * Set the property using a specified raw value. + * + * @param rawValue the raw value to use + * + * @throws ConnectionPropertyException if the raw input value can not + * be parsed or fails validation constraints applicable on the + * property value. + */ + public void setRawValue(Object rawValue) throws ConnectionPropertyException { + this.rawValue = rawValue; + this.parsed = false; + parse(); + } + + /** + * Returns the computed value of the property after parsing the + * raw value provided for the property. + * + * @return the effective value of the property + * + * @throws IllegalStateException is a valid value has not been + * set for this property + */ + public T getValue() { + verifyParsed(); + return parsedValue; + } + + /** + * @return true if the raw property value has been successfully parsed + * to compute the parsed value, false otherwise + */ + public boolean isParsed() { + return parsed; + } + + /** + * The default value the property should be set to in the absence of + * an explicitly configured value. + * + * @return default value for the property + */ + public abstract T getDefault(); + + /** + * Method that pre-processes a supplied raw value for a + * property prior to it being passed into the property's + * parseValue function. + *

+ * This function is meant to offload any value pre-processing + * like trimming of String input values prior to the value being + * used in the parseValue function. + *

+ * Currently, the only pre-processing applied is whitespace trimming of + * the input in case the raw input is a String. Subclass properties may + * override this method to modify or extend the default pre-processing + * of their raw input values. + * + * @param value - The raw value provided for the property + * + * @return The value that should be used by the parseValue function + */ + protected Object preProcess(Object value) { + if (value instanceof String) { + return ((String) value).trim(); + } else { + return value; + } + } + + /** + * Given a raw value for a property, the method returns the actual + * value that the property should be set to. + * + * @param rawValue raw property input value + * + * @return the actual value the property should be set to + * + * @throws ConnectionPropertyException if the raw input value can not + * be parsed or fails validation constraints applicable on the + * property value. + */ + protected abstract T parseValue(Object rawValue) throws ConnectionPropertyException; + + /** + * Execute parsing of the raw value + * + * @throws ConnectionPropertyException if the raw input value can not + * be parsed or fails validation constraints applicable on the + * property value. + */ + private void parse() throws ConnectionPropertyException { + if (!parsed) { + this.parsedValue = parseValue(preProcess(rawValue)); + parsed = true; + } + } + + /** + * Verify if the supplied value for this property was successfully + * parsed. + * + * @throws {@link IllegalStateException} if a valid property value + * has not been provided. + */ + private void verifyParsed() { + if (!isParsed()) { + throw new IllegalStateException(String.format("Property %s is not yet successfully parsed.", getKey())); + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionPropertyException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionPropertyException.java new file mode 100644 index 0000000000..06450ec3e7 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionPropertyException.java @@ -0,0 +1,48 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +import java.sql.SQLException; + +public class ConnectionPropertyException extends SQLException { + + String propertyKey; + + public ConnectionPropertyException(String key) { + super(); + this.propertyKey = key; + } + + public ConnectionPropertyException(String key, String message) { + super(message); + this.propertyKey = key; + } + + public ConnectionPropertyException(String key, String message, Throwable cause) { + super(message, cause); + this.propertyKey = key; + } + + public ConnectionPropertyException(String key, Throwable cause) { + super(cause); + this.propertyKey = key; + } + + public String getPropertyKey() { + return propertyKey; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/FetchSizeProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/FetchSizeProperty.java new file mode 100644 index 0000000000..baf75d8fa5 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/FetchSizeProperty.java @@ -0,0 +1,10 @@ +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class FetchSizeProperty extends IntConnectionProperty { + + public static final String KEY = "fetchSize"; + + public FetchSizeProperty() { + super(KEY); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/HostConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/HostConnectionProperty.java new file mode 100644 index 0000000000..ed25fe6930 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/HostConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class HostConnectionProperty extends StringConnectionProperty { + public static final String KEY = "host"; + + public HostConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return "localhost"; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/HostnameVerificationConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/HostnameVerificationConnectionProperty.java new file mode 100644 index 0000000000..4a41e5b207 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/HostnameVerificationConnectionProperty.java @@ -0,0 +1,31 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class HostnameVerificationConnectionProperty extends BoolConnectionProperty { + + public static final String KEY = "hostnameVerification"; + + public HostnameVerificationConnectionProperty() { + super(KEY); + } + + @Override + public Boolean getDefault() { + return true; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/IntConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/IntConnectionProperty.java new file mode 100644 index 0000000000..ee71dd0d95 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/IntConnectionProperty.java @@ -0,0 +1,48 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class IntConnectionProperty extends ConnectionProperty { + + public IntConnectionProperty(String key) { + super(key); + } + + @Override + protected Integer parseValue(Object value) throws ConnectionPropertyException { + + if (value == null) { + return getDefault(); + } else if (value instanceof Integer) { + return (Integer) value; + } else if (value instanceof String) { + try { + return Integer.parseInt((String) value); + } catch (NumberFormatException nfe) { + // invalid value + } + } + + throw new ConnectionPropertyException(getKey(), + String.format("Property %s requires a valid integer. Invalid property value %s. ", getKey(), value)); + } + + @Override + public Integer getDefault() { + return 0; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStoreLocationConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStoreLocationConnectionProperty.java new file mode 100644 index 0000000000..d21bbbb2ea --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStoreLocationConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class KeyStoreLocationConnectionProperty extends StringConnectionProperty { + public static final String KEY = "keyStoreLocation"; + + public KeyStoreLocationConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return null; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStorePasswordConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStorePasswordConnectionProperty.java new file mode 100644 index 0000000000..f1637a6193 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStorePasswordConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class KeyStorePasswordConnectionProperty extends StringConnectionProperty { + public static final String KEY = "keyStorePassword"; + + public KeyStorePasswordConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return null; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStoreTypeConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStoreTypeConnectionProperty.java new file mode 100644 index 0000000000..f1dd0f7aaa --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/KeyStoreTypeConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class KeyStoreTypeConnectionProperty extends StringConnectionProperty { + public static final String KEY = "keyStoreType"; + + public KeyStoreTypeConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return "JKS"; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LogLevelConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LogLevelConnectionProperty.java new file mode 100644 index 0000000000..fc34efbd17 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LogLevelConnectionProperty.java @@ -0,0 +1,55 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +import com.amazon.opendistroforelasticsearch.jdbc.logging.LogLevel; + +import java.util.Locale; + +public class LogLevelConnectionProperty extends ConnectionProperty { + + public static final String KEY = "logLevel"; + + public LogLevelConnectionProperty() { + super(KEY); + } + + @Override + protected LogLevel parseValue(Object rawValue) throws ConnectionPropertyException { + if (rawValue == null) { + return getDefault(); + } else if (rawValue instanceof String) { + String stringValue = (String) rawValue; + try { + return LogLevel.valueOf(stringValue.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException iae) { + throw new ConnectionPropertyException(getKey(), + String.format("Invalid value specified for the property \"%s\". " + + "Unknown log level \"%s\".", getKey(), stringValue)); + } + } + + throw new ConnectionPropertyException(getKey(), + String.format("Property \"%s\" requires a valid String matching a known log level. " + + "Invalid value of type: %s specified.", getKey(), rawValue.getClass().getName())); + } + + @Override + public LogLevel getDefault() { + return LogLevel.OFF; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LogOutputConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LogOutputConnectionProperty.java new file mode 100644 index 0000000000..122b58a96d --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LogOutputConnectionProperty.java @@ -0,0 +1,27 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class LogOutputConnectionProperty extends StringConnectionProperty { + + public static final String KEY = "logOutput"; + + public LogOutputConnectionProperty() { + super(KEY); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LoginTimeoutConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LoginTimeoutConnectionProperty.java new file mode 100644 index 0000000000..96c003a341 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/LoginTimeoutConnectionProperty.java @@ -0,0 +1,41 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +/** + * Login / Read timeout in seconds + */ +public class LoginTimeoutConnectionProperty extends IntConnectionProperty { + + public static final String KEY = "loginTimeout"; + + public LoginTimeoutConnectionProperty() { + super(KEY); + } + + @Override + protected Integer parseValue(Object value) throws ConnectionPropertyException { + int intValue = super.parseValue(value); + + if (intValue < 0) { + throw new ConnectionPropertyException(getKey(), + String.format("Login timeout property requires a valid integer >=0. Invalid value: %d", intValue)); + } + return intValue; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PasswordConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PasswordConnectionProperty.java new file mode 100644 index 0000000000..7d9f491755 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PasswordConnectionProperty.java @@ -0,0 +1,27 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class PasswordConnectionProperty extends StringConnectionProperty { + + public static final String KEY = "password"; + + public PasswordConnectionProperty() { + super(KEY); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PathConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PathConnectionProperty.java new file mode 100644 index 0000000000..9b37448428 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PathConnectionProperty.java @@ -0,0 +1,50 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +/** + * The Path connection property. + * + * A trailing '/' is not expected or required in the + * input value but is ignored if present. + * + */ +public class PathConnectionProperty extends StringConnectionProperty { + + public static final String KEY = "path"; + + public PathConnectionProperty() { + super(KEY); + } + + @Override + protected String parseValue(Object value) throws ConnectionPropertyException { + String stringValue = super.parseValue(value); + + // Remove the trailing '/' as all internal calls + // will implicitly apply this. + if (stringValue.length() > 1 && stringValue.endsWith("/")) { + stringValue = stringValue.substring(0, stringValue.length()-1); + } + return stringValue; + } + + @Override + public String getDefault() { + return ""; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PortConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PortConnectionProperty.java new file mode 100644 index 0000000000..c067e3bfca --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/PortConnectionProperty.java @@ -0,0 +1,43 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class PortConnectionProperty extends IntConnectionProperty { + + public static final String KEY = "port"; + + public PortConnectionProperty() { + super(KEY); + } + + @Override + protected Integer parseValue(Object value) throws ConnectionPropertyException { + int intValue = super.parseValue(value); + + if (intValue < 0 || intValue > 65535) { + throw new ConnectionPropertyException(getKey(), + String.format("Port number property requires a valid integer (0-65535). Invalid value: %d", intValue)); + } + + return intValue; + } + + @Override + public Integer getDefault() { + return 9200; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/RegionConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/RegionConnectionProperty.java new file mode 100644 index 0000000000..5df4e510c6 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/RegionConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class RegionConnectionProperty extends StringConnectionProperty { + + public static final String KEY = "region"; + + public RegionConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return null; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/RequestCompressionConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/RequestCompressionConnectionProperty.java new file mode 100644 index 0000000000..d687e894dc --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/RequestCompressionConnectionProperty.java @@ -0,0 +1,26 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class RequestCompressionConnectionProperty extends BoolConnectionProperty { + + public static final String KEY = "requestCompression"; + + public RequestCompressionConnectionProperty() { + super(KEY); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/StringConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/StringConnectionProperty.java new file mode 100644 index 0000000000..991a701ce6 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/StringConnectionProperty.java @@ -0,0 +1,44 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class StringConnectionProperty extends ConnectionProperty { + + public StringConnectionProperty(String key) { + super(key); + } + + @Override + protected String parseValue(Object value) throws ConnectionPropertyException { + + if (value == null) { + return getDefault(); + } else if (value instanceof String) { + return (String) value; + } + + throw new ConnectionPropertyException(getKey(), + String.format("Property %s requires a valid string. " + + "Invalid value of type: %s specified.", getKey(), value.getClass().getName())); + + } + + @Override + public String getDefault() { + return null; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustSelfSignedConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustSelfSignedConnectionProperty.java new file mode 100644 index 0000000000..13d3c7b615 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustSelfSignedConnectionProperty.java @@ -0,0 +1,26 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class TrustSelfSignedConnectionProperty extends BoolConnectionProperty { + + public static final String KEY = "trustSelfSigned"; + + public TrustSelfSignedConnectionProperty() { + super(KEY); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStoreLocationConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStoreLocationConnectionProperty.java new file mode 100644 index 0000000000..f10bb5e8cb --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStoreLocationConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class TrustStoreLocationConnectionProperty extends StringConnectionProperty { + public static final String KEY = "trustStoreLocation"; + + public TrustStoreLocationConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return null; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStorePasswordConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStorePasswordConnectionProperty.java new file mode 100644 index 0000000000..67cf341524 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStorePasswordConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class TrustStorePasswordConnectionProperty extends StringConnectionProperty { + public static final String KEY = "trustStorePassword"; + + public TrustStorePasswordConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return null; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStoreTypeConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStoreTypeConnectionProperty.java new file mode 100644 index 0000000000..0cbad549cb --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/TrustStoreTypeConnectionProperty.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class TrustStoreTypeConnectionProperty extends StringConnectionProperty { + public static final String KEY = "trustStoreType"; + + public TrustStoreTypeConnectionProperty() { + super(KEY); + } + + public String getDefault() { + return "JKS"; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/UseSSLConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/UseSSLConnectionProperty.java new file mode 100644 index 0000000000..fdcb33cfb4 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/UseSSLConnectionProperty.java @@ -0,0 +1,26 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class UseSSLConnectionProperty extends BoolConnectionProperty { + + public static final String KEY = "useSSL"; + + public UseSSLConnectionProperty() { + super(KEY); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/UserConnectionProperty.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/UserConnectionProperty.java new file mode 100644 index 0000000000..6a31a78486 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/config/UserConnectionProperty.java @@ -0,0 +1,27 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +public class UserConnectionProperty extends StringConnectionProperty { + + public static final String KEY = "user"; + + public UserConnectionProperty() { + super(KEY); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/JdbcWrapper.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/JdbcWrapper.java new file mode 100644 index 0000000000..90276e8d5f --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/JdbcWrapper.java @@ -0,0 +1,37 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal; + +import java.sql.SQLException; +import java.sql.Wrapper; + +public interface JdbcWrapper extends Wrapper { + + @Override + default boolean isWrapperFor(Class iface) throws SQLException { + return iface != null && iface.isInstance(this); + } + + @Override + default T unwrap(Class iface) throws SQLException { + try { + return iface.cast(this); + } catch (ClassCastException cce) { + throw new SQLException("Unable to unwrap to " + iface.toString(), cce); + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/Version.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/Version.java new file mode 100644 index 0000000000..debe3333ef --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/Version.java @@ -0,0 +1,58 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal; + +public enum Version { + + // keep this in sync with the gradle version + Current(1, 9, 0, 0); + + private int major; + private int minor; + private int build; + private int revision; + + private String fullVersion; + + Version(int major, int minor, int build, int revision) { + this.major = major; + this.minor = minor; + this.build = build; + this.revision = revision; + this.fullVersion = String.format("%d.%d.%d.%d", major, minor, build, revision); + } + + public int getMajor() { + return this.major; + } + + public int getMinor() { + return this.minor; + } + + public int getBuild() { + return this.build; + } + + public int getRevision() { + return this.revision; + } + + public String getFullVersion() { + return this.fullVersion; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/exceptions/ObjectClosedException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/exceptions/ObjectClosedException.java new file mode 100644 index 0000000000..d5b36af7b1 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/exceptions/ObjectClosedException.java @@ -0,0 +1,46 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.exceptions; + +import java.sql.SQLException; +import java.sql.SQLNonTransientException; + +/** + * Exception indicating JDBC operation can not occur due to the + * target object being in Closed state + */ +public class ObjectClosedException extends SQLNonTransientException { + + public ObjectClosedException(String reason, String SQLState, int vendorCode) { + super(reason, SQLState, vendorCode); + } + + public ObjectClosedException(String reason, String SQLState) { + super(reason, SQLState); + } + + public ObjectClosedException(String reason) { + super(reason); + } + + public ObjectClosedException() { + } + + public ObjectClosedException(Throwable cause) { + super(cause); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/ColumnMetaData.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/ColumnMetaData.java new file mode 100644 index 0000000000..2e080b67b2 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/ColumnMetaData.java @@ -0,0 +1,87 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.results; + +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ColumnDescriptor; + +public class ColumnMetaData { + private String name; + private String label; + private String tableSchemaName; + private int precision = -1; + private int scale = -1; + private String tableName; + private String catalogName; + private String esTypeName; + private ElasticsearchType esType; + + public ColumnMetaData(ColumnDescriptor descriptor) { + this.name = descriptor.getName(); + + // if a label isn't specified, the name is the label + this.label = descriptor.getLabel() == null ? this.name : descriptor.getLabel(); + + this.esTypeName = descriptor.getType(); + this.esType = ElasticsearchType.fromTypeName(esTypeName); + + // use canned values until server can return this + this.precision = this.esType.getPrecision(); + this.scale = 0; + + // JDBC has these, but our protocol does not yet convey these + this.tableName = ""; + this.catalogName = ""; + this.tableSchemaName = ""; + } + + public String getName() { + return name; + } + + public String getLabel() { + return label; + } + + public String getTableSchemaName() { + return tableSchemaName; + } + + public int getPrecision() { + return precision; + } + + public int getScale() { + return scale; + } + + public String getTableName() { + return tableName; + } + + public String getCatalogName() { + return catalogName; + } + + public ElasticsearchType getEsType() { + return esType; + } + + public String getEsTypeName() { + return esTypeName; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Cursor.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Cursor.java new file mode 100644 index 0000000000..9988b6a319 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Cursor.java @@ -0,0 +1,70 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.results; + + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class Cursor { + private Schema schema; + private List rows; + private int currentRow = -1; + private Map labelToIndexMap; + + public Cursor(Schema schema, List rows) { + this.schema = schema; + this.rows = rows; + initLabelToIndexMap(); + } + + public Schema getSchema() { + return schema; + } + + public Object getColumn(int index) { + if (index < 0 || index >= getColumnCount()) + throw new IllegalArgumentException("Column Index out of range: " + index); + return rows.get(currentRow).get(index); + } + + public int getColumnCount() { + return schema.getNumberOfColumns(); + } + + public boolean next() { + if (currentRow < rows.size() - 1) { + currentRow++; + return true; + } else { + return false; + } + } + + public Integer findColumn(String label) { + return labelToIndexMap.get(label); + } + + private void initLabelToIndexMap() { + labelToIndexMap = new HashMap<>(); + for (int i=0; i < schema.getNumberOfColumns(); i++) { + ColumnMetaData columnMetaData = schema.getColumnMetaData(i); + labelToIndexMap.put(columnMetaData.getLabel(), i); + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Row.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Row.java new file mode 100644 index 0000000000..f8c569377e --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Row.java @@ -0,0 +1,31 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.results; + +import java.util.List; + +public class Row { + private List columnData; + + public Row(List columnData) { + this.columnData = columnData; + } + + public Object get(int index) { + return columnData.get(index); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Schema.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Schema.java new file mode 100644 index 0000000000..dd309d9215 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/results/Schema.java @@ -0,0 +1,64 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.results; + +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; + +import java.util.List; + +/** + * Represents the schema for a query result + */ +public class Schema { + private final List columnMetaDataList; + private final int numberOfColumns; + + public Schema(List columnMetaDataList) { + this.columnMetaDataList = columnMetaDataList; + this.numberOfColumns = columnMetaDataList != null ? columnMetaDataList.size() : 0; + } + + /** + * @return Number of columns in result + */ + public int getNumberOfColumns() { + return this.numberOfColumns; + } + + /** + * Returns {@link ColumnMetaData} for a specific column in the result + * + * @param index the index of the column to return metadata for + * + * @return {@link ColumnMetaData} for the specified column + */ + public ColumnMetaData getColumnMetaData(int index) { + return columnMetaDataList.get(index); + } + + /** + * Returns the {@link ElasticsearchType} corresponding to a specific + * column in the result. + * + * @param index the index of the column to return the type for + * + * @return {@link ElasticsearchType} for the specified column + */ + public ElasticsearchType getElasticsearchType(int index) { + return columnMetaDataList.get(index).getEsType(); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/AwsHostNameUtil.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/AwsHostNameUtil.java new file mode 100644 index 0000000000..6c8244d0bc --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/AwsHostNameUtil.java @@ -0,0 +1,82 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Utility methods to work with AWS format hostnames + */ +public class AwsHostNameUtil { + + private static final Pattern REGION_PATTERN = + Pattern.compile("^(?:.+\\.)?([a-z0-9-]+)$"); + + private static final Pattern KNOWN_HOSTNAME_PATTERN = + Pattern.compile("^(?:.+)?(\\.es\\.[a-z0-9]+\\.com)$"); + + /** + * Returns the region name contained in a specified endpoint based + * on known conventions for endpoint formats. + * + * @param hostname the hostname to parse + * + * @return the region parsed from the hostname, or + * null if region could not be determined. + */ + public static String parseRegion(final String hostname) { + if (hostname == null) { + throw new IllegalArgumentException("hostname cannot be null"); + } + + String region = null; + int knownSuffixLength = 0; + + Matcher matcher = KNOWN_HOSTNAME_PATTERN.matcher(hostname); + if (matcher.matches()) { + knownSuffixLength = matcher.group(1).length(); + } + + if (knownSuffixLength > 0) { + // hostname has the format 'ABC.es.XYZ.com' + int index = hostname.length() - knownSuffixLength; + region = parseStandardRegionName(hostname.substring(0, index)); + } + + return region; + } + + /** + * Parses the region name from an endpoint fragment. + * + * @param fragment the portion of the endpoint up to the region name + * + * @return the parsed region name (or null if we can't tell for sure) + */ + private static String parseStandardRegionName(final String fragment) { + Matcher matcher = REGION_PATTERN.matcher(fragment); + if (matcher.matches()) { + // fragment is of the form 'domain-name.region' + // return the region component + return matcher.group(1); + } else { + return null; + } + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/JavaUtil.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/JavaUtil.java new file mode 100644 index 0000000000..3ee03cfc94 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/JavaUtil.java @@ -0,0 +1,24 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + +public class JavaUtil { + + public static String getJavaVersion() { + return System.getProperty("java.version"); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/SqlParser.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/SqlParser.java new file mode 100644 index 0000000000..07fa760963 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/SqlParser.java @@ -0,0 +1,98 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + + +/** + * Rudimentary SQL parser to help with very basic + * driver side validations. + */ +public class SqlParser { + + public static int countParameterMarkers(String sql) { + if (sql == null) + return 0; + + int count = 0; + + for (int i=0; i < sql.length(); i++) { + + char ch = sql.charAt(i); + + switch (ch) { + case '\'': + case '\"': + i = locateQuoteEnd(sql, ch, i+1); + break; + case '?': + count++; + break; + case '-': + case '/': + i = locateCommentEnd(sql, ch, i); + } + + } + return count; + } + + private static int locateCommentEnd(String s, char commentStartChar, int commentStartIndex) { + if (commentStartIndex + 1 > s.length()) + return commentStartIndex; + + int idx; + + if (commentStartChar == '-' && s.charAt(commentStartIndex + 1) == '-') { + // single line comment + idx = locateLineEnd(s, commentStartIndex + 2); + + } else if (commentStartChar == '/' && s.charAt(commentStartIndex + 1) == '*') { + // multi line comment + idx = s.indexOf("*/", commentStartIndex + 2); + + } else { + // not on a comment + return commentStartIndex; + } + + if (idx == -1) + throw new IllegalArgumentException("SQL text contains an unterminated comment."); + else + return idx; + } + + private static int locateQuoteEnd(String s, char ch, int fromIndex) { + int idx = s.indexOf(ch, fromIndex); + if (idx == -1) + throw new IllegalArgumentException("SQL text contains an unterminated string. " + + "This could possibly be due to mismatched quotes in the statement."); + return idx; + } + + + private static int locateLineEnd(String s, int fromIndex) { + int idx; + + for (idx=fromIndex; idx < s.length(); idx++) { + char ch = s.charAt(idx); + + if (ch == '\r' || ch == '\n') + break; + } + return idx; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/UrlParser.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/UrlParser.java new file mode 100644 index 0000000000..5454eecb12 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/UrlParser.java @@ -0,0 +1,143 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.HostConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PathConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PortConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.UseSSLConnectionProperty; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Properties; +import java.util.StringTokenizer; + +public class UrlParser { + + public static final String URL_PREFIX = "jdbc:elasticsearch://"; + private static final int URL_PREFIX_LEN = URL_PREFIX.length(); + + private static final String SCHEME_DELIM = "://"; + + public static boolean isAcceptable(String url) { + return url != null && url.startsWith(URL_PREFIX); + } + + /** + * Parses a JDBC url and returns the url's components as a set of + * properties. + * + * URL format expected is: + * + * [driver prefix]://[scheme]://[host:[port]]/[path][?[propertyKey=value]&] + * + * scheme, host, port and path are extracted from the url and returned + * against the same property keys as their corresponding + * {@link ConnectionProperty} + * instances. + * + * Properties specified in the url query string are returned against + * the exact property key as is used in the url. + * + * If any property that is derived from the url format - such as scheme, + * host, port etc is also specified in url query string, then the value in + * the query string overrides the derived value. + * + * + * @param inputUrl + * @return scheme, host, port, path are returned using the same + * property key as corresponding ConnectionProperty instance. + */ + public static Properties parseProperties(final String inputUrl) throws URISyntaxException { + + // TODO - support percent encoding for URL reserved characters + if (inputUrl == null || inputUrl.indexOf(URL_PREFIX) != 0) { + throw new URISyntaxException(inputUrl, + String.format("URL does not begin with the mandatory prefix %s.", URL_PREFIX)); + } + + final String trimmedUrl = inputUrl.trim(); + int schemeDelimIdx = trimmedUrl.indexOf(SCHEME_DELIM, URL_PREFIX_LEN); + + URI uri = null; + + if (schemeDelimIdx != -1){ + // user provided a scheme + uri = toURI(extractTargetUrl(trimmedUrl)); + } else if (URL_PREFIX_LEN < trimmedUrl.length()) { + // no scheme provided, but URL has more than just URL_PREFIX, + // so assume http:// scheme. + uri = toURI("http://"+extractTargetUrl(trimmedUrl)); + } + + Properties props = new Properties(); + + if (uri != null) { + String scheme = uri.getScheme(); + String host = uri.getHost(); + int port = uri.getPort(); + String path = uri.getPath(); + + if (host != null) + props.setProperty(HostConnectionProperty.KEY, host); + + if (port != -1) + props.setProperty(PortConnectionProperty.KEY, Integer.toString(port)); + + if (path != null && path.length() > 0) + props.setProperty(PathConnectionProperty.KEY, path); + + if ("https".equalsIgnoreCase(scheme)) { + props.setProperty(UseSSLConnectionProperty.KEY, "true"); + } else if ("http".equalsIgnoreCase(scheme)) { + props.setProperty(UseSSLConnectionProperty.KEY, "false"); + } else { + throw new URISyntaxException(inputUrl, "Invalid scheme:"+scheme+". Only http and https are supported."); + } + + String query = uri.getRawQuery(); + if (query != null) { + StringTokenizer tokenizer = new StringTokenizer(query, "&"); + while(tokenizer.hasMoreElements()) { + String kvp = tokenizer.nextToken(); + + String[] kv = kvp.split("="); + + if (kv.length != 2) { + throw new URISyntaxException(inputUrl, + "QueryString format of URL invalid. Found unexpected format at " + + kv[0] + + ". Expected key=value pairs"); + } else { + props.setProperty(kv[0], kv[1]); + } + } + } + } + + return props; + } + + private static String extractTargetUrl(final String url) { + return url.substring(URL_PREFIX_LEN); + } + + private static URI toURI(final String str) throws URISyntaxException { + return new URI(str); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/FilePrintWriterLogger.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/FilePrintWriterLogger.java new file mode 100644 index 0000000000..8391f799e7 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/FilePrintWriterLogger.java @@ -0,0 +1,42 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +public class FilePrintWriterLogger extends PrintWriterLogger { + + public FilePrintWriterLogger(String filePath, LogLevel logLevel, Layout layout) throws IOException { + super(new PrintWriter( + Files.newBufferedWriter( + Paths.get("").resolve(filePath), + StandardCharsets.UTF_8, + StandardOpenOption.CREATE, + StandardOpenOption.APPEND), true), logLevel, layout); + } + + @Override + public void close() { + super.close(); + printWriter.close(); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/Layout.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/Layout.java new file mode 100644 index 0000000000..12868d3728 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/Layout.java @@ -0,0 +1,21 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +public interface Layout { + String formatLogEntry(LogLevel severity, String message); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LogLevel.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LogLevel.java new file mode 100644 index 0000000000..9715096060 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LogLevel.java @@ -0,0 +1,93 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +/** + * Enumeration of possible Logging levels + */ +public enum LogLevel { + + /** + * Log nothing + */ + OFF(0), + + /** + * Log only fatal errors + */ + FATAL(10), + + /** + * Log all errors + */ + ERROR(20), + + /** + * Log all errors and warnings + */ + WARN(30), + + /** + * Log all errors, warnings and info messages + */ + INFO(40), + + /** + * Log everything up to INFO and any debug logs + */ + DEBUG(50), + + /** + * Log everything up to DEBUG and any additional fine grained + * trace logs + */ + TRACE(60), + + /** + * Log everything + */ + ALL(100); + + /** + * Internal severity level indicator for the log level + */ + private int severity; + + LogLevel(final int severity) { + this.severity = severity; + } + + /** + * @return internal severity level associated with the log level + */ + private int severity() { + return severity; + } + + /** + * Checks if this LogLevel corresponds to a severity level + * matching or exceeding the severity level of a specified LogLevel. + * + * @param level the logging level to compare this log level with + * + * @return true, if the severity of this log level matches or + * exceeds the severity of the specified log level, false otherwise. + */ + public boolean isGreaterThanOrEqualTo(LogLevel level) { + return this.severity() >= level.severity(); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/Logger.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/Logger.java new file mode 100644 index 0000000000..c9deaac1bc --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/Logger.java @@ -0,0 +1,150 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +import java.util.function.Supplier; + +public interface Logger { + + LogLevel getLevel(); + + void fatal(String message); + + void fatal(String message, Throwable t); + + default void fatal(Supplier messageSupplier) { + if (isFatalEnabled()) { + fatal(messageSupplier.get()); + } + } + + default void fatal(Supplier messageSupplier, Throwable t) { + if (isFatalEnabled()) { + fatal(messageSupplier.get(), t); + } + } + + void error(String message); + + void error(String message, Throwable t); + + default void error(Supplier messageSupplier) { + if (isErrorEnabled()) { + error(messageSupplier.get()); + } + } + + default void error(Supplier messageSupplier, Throwable t) { + if (isErrorEnabled()) { + error(messageSupplier.get(), t); + } + } + + void warn(String message); + + void warn(String message, Throwable t); + + default void warn(Supplier messageSupplier) { + if (isWarnEnabled()) { + warn(messageSupplier.get()); + } + } + + default void warn(Supplier messageSupplier, Throwable t) { + if (isWarnEnabled()) { + warn(messageSupplier.get(), t); + } + } + + void info(String message); + + void info(String message, Throwable t); + + default void info(Supplier messageSupplier) { + if (isInfoEnabled()) { + info(messageSupplier.get()); + } + } + + default void info(Supplier messageSupplier, Throwable t) { + if (isInfoEnabled()) { + info(messageSupplier.get(), t); + } + } + + void debug(String message); + + void debug(String message, Throwable t); + + default void debug(Supplier messageSupplier) { + if (isDebugEnabled()) { + debug(messageSupplier.get()); + } + } + + default void debug(Supplier messageSupplier, Throwable t) { + if (isDebugEnabled()) { + debug(messageSupplier.get(), t); + } + } + + void trace(String message); + + void trace(String message, Throwable t); + + default void trace(Supplier messageSupplier) { + if (isTraceEnabled()) { + trace(messageSupplier.get()); + } + } + + default void trace(Supplier messageSupplier, Throwable t) { + if (isTraceEnabled()) { + trace(messageSupplier.get(), t); + } + } + + default boolean isDebugEnabled() { + return getLevel().isGreaterThanOrEqualTo(LogLevel.DEBUG); + } + + default boolean isErrorEnabled() { + return isLevelEnabled(LogLevel.ERROR); + } + + default boolean isFatalEnabled() { + return isLevelEnabled(LogLevel.FATAL); + } + + default boolean isInfoEnabled() { + return isLevelEnabled(LogLevel.INFO); + } + + default boolean isTraceEnabled() { + return isLevelEnabled(LogLevel.TRACE); + } + + default boolean isWarnEnabled() { + return isLevelEnabled(LogLevel.WARN); + } + + default boolean isLevelEnabled(LogLevel level) { + return getLevel().isGreaterThanOrEqualTo(level); + } + + void close(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LoggerFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LoggerFactory.java new file mode 100644 index 0000000000..fcca13f679 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LoggerFactory.java @@ -0,0 +1,43 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +import java.io.IOException; +import java.io.PrintWriter; + +public class LoggerFactory { + + public static Logger getLogger(String filePath, LogLevel logLevel) { + return getLogger(filePath, logLevel, StandardLayout.INSTANCE); + } + + public static Logger getLogger(String filePath, LogLevel logLevel, Layout layout) { + try { + return new FilePrintWriterLogger(filePath, logLevel, layout); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + + public static Logger getLogger(PrintWriter printWriter, LogLevel logLevel) { + return getLogger(printWriter, logLevel, StandardLayout.INSTANCE); + } + + public static Logger getLogger(PrintWriter printWriter, LogLevel logLevel, Layout layout) { + return new PrintWriterLogger(printWriter, logLevel, layout); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LoggingSource.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LoggingSource.java new file mode 100644 index 0000000000..74f7da5984 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/LoggingSource.java @@ -0,0 +1,96 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +import java.sql.SQLException; + +/** + * An entity that generates log messages containing an identifier for + * the source of the log message. + */ +public interface LoggingSource { + + default String logMessage(final String format, final Object... args) { + return logMessage(String.format(format, args)); + } + + default String logMessage(final String message) { + return buildMessage(message); + } + + default String logEntry(final String format, final Object... args) { + return logMessage(String.format(format, args) +" called"); + } + + default String logExit(final String message, final Object returnValue) { + return logMessage(message +" returning: "+returnValue); + } + + default String logExit(final String message) { + return logMessage(message +" returned"); + } + + default String getSource() { + return this.getClass().getSimpleName() + "@" + Integer.toHexString(this.hashCode()); + } + + default void logAndThrowSQLException(Logger log, SQLException sqlex) throws SQLException { + logAndThrowSQLException(log, LogLevel.ERROR, sqlex); + } + + default void logAndThrowSQLException(Logger log, LogLevel severity, SQLException sqlex) throws SQLException { + logAndThrowSQLException(log, severity, sqlex.getMessage(), sqlex); + } + + default void logAndThrowSQLException(Logger log, LogLevel severity, String message, SQLException sqlex) throws SQLException { + if (log.isLevelEnabled(severity)) { + + String logMessage = buildMessage(message); + + switch (severity) { + case OFF: + break; + case INFO: + log.info(logMessage, sqlex); + break; + case WARN: + log.warn(logMessage, sqlex); + break; + case DEBUG: + log.debug(logMessage, sqlex); + break; + case ERROR: + log.error(logMessage, sqlex); + break; + case FATAL: + log.fatal(logMessage, sqlex); + break; + case TRACE: + log.trace(logMessage, sqlex); + break; + case ALL: + log.error(logMessage, sqlex); + break; + } + } + throw sqlex; + } + + default String buildMessage(final String message) { + return "["+ getSource()+"] "+message; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/NoOpLogger.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/NoOpLogger.java new file mode 100644 index 0000000000..aa6a8eec76 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/NoOpLogger.java @@ -0,0 +1,138 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +/** + * An No-Op logger that indicates to a caller that all log levels are + * disabled and does nothing for any other logging method invocations. + * + * Classes that use a Logger would typically need to do a null + * check on the handle if the handle were to be null when logging is + * disabled. To avoid having such checks on every use of a logger, + * this no-op logger can be set as the logger when logging is disabled. + * + * The consumer of this logger sees all log levels as disabled and any + * inadvertent calls to log messages are a no-op. + */ +public class NoOpLogger implements Logger { + + public static final NoOpLogger INSTANCE = new NoOpLogger(); + + private NoOpLogger() { + // singleton + } + + @Override + public LogLevel getLevel() { + return LogLevel.OFF; + } + + @Override + public void debug(String message) { + + } + + @Override + public void debug(String message, Throwable t) { + + } + + @Override + public void error(String message) { + + } + + @Override + public void error(String message, Throwable t) { + + } + + @Override + public void fatal(String message) { + + } + + @Override + public void fatal(String message, Throwable t) { + + } + + @Override + public void info(String message) { + + } + + @Override + public void info(String message, Throwable t) { + + } + + @Override + public boolean isDebugEnabled() { + return false; + } + + @Override + public boolean isErrorEnabled() { + return false; + } + + @Override + public boolean isFatalEnabled() { + return false; + } + + @Override + public boolean isInfoEnabled() { + return false; + } + + @Override + public boolean isTraceEnabled() { + return false; + } + + @Override + public boolean isWarnEnabled() { + return false; + } + + @Override + public void trace(String message) { + + } + + @Override + public void trace(String message, Throwable t) { + + } + + @Override + public void warn(String message) { + + } + + @Override + public void warn(String message, Throwable t) { + + } + + @Override + public void close() { + + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/PrintWriterLogger.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/PrintWriterLogger.java new file mode 100644 index 0000000000..1cb8ed0594 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/PrintWriterLogger.java @@ -0,0 +1,125 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +import java.io.PrintWriter; +import java.io.StringWriter; + +public class PrintWriterLogger implements Logger { + + protected final PrintWriter printWriter; + private LogLevel logLevel; + private Layout layout; + + public PrintWriterLogger(PrintWriter printWriter, LogLevel logLevel, Layout layout) { + this.printWriter = printWriter; + this.logLevel = logLevel == null ? LogLevel.OFF : logLevel; + this.layout = layout; + } + + @Override + public void fatal(String message) { + printMessage(LogLevel.FATAL, message); + } + + @Override + public void fatal(String message, Throwable t) { + printMessage(LogLevel.FATAL, message, t); + } + + @Override + public void error(String message) { + printMessage(LogLevel.ERROR, message); + } + + @Override + public void error(String message, Throwable t) { + printMessage(LogLevel.ERROR, message, t); + } + + @Override + public void warn(String message) { + printMessage(LogLevel.WARN, message); + } + + @Override + public void warn(String message, Throwable t) { + printMessage(LogLevel.WARN, message, t); + } + + private void printMessage(LogLevel severity, String message) { + printWriter.println(layout.formatLogEntry(severity, message)); + } + + private void printMessage(LogLevel severity, String message, Throwable t) { + String logMessage = buildMessageWithThrowable(message, t); + printWriter.println(layout.formatLogEntry(severity, logMessage)); + } + + private String buildMessageWithThrowable(String message, Throwable t) { + StringWriter stringWriter = new StringWriter(); + PrintWriter pw = new PrintWriter(stringWriter); + pw.println(message); + t.printStackTrace(pw); + pw.close(); + return stringWriter.toString(); + } + @Override + public void info(String message) { + printMessage(LogLevel.INFO, message); + } + + @Override + public void info(String message, Throwable t) { + printMessage(LogLevel.INFO, message, t); + } + + @Override + public void debug(String message) { + printMessage(LogLevel.DEBUG, message); + } + + @Override + public void debug(String message, Throwable t) { + printMessage(LogLevel.DEBUG, message, t); + } + + @Override + public void trace(String message) { + printMessage(LogLevel.TRACE, message); + } + + @Override + public void trace(String message, Throwable t) { + printMessage(LogLevel.TRACE, message, t); + } + + @Override + public boolean isDebugEnabled() { + return logLevel.isGreaterThanOrEqualTo(LogLevel.DEBUG); + } + + @Override + public LogLevel getLevel() { + return logLevel; + } + + @Override + public void close() { + printWriter.flush(); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/StandardLayout.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/StandardLayout.java new file mode 100644 index 0000000000..1cec15b6a4 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/logging/StandardLayout.java @@ -0,0 +1,40 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.logging; + +/** + * The default log entry layout for driver emitted logs + * + * Formats log entries with [timestamp][severity][thread-name] message + * + * Timestamp uses ISO format date and a a 24 hour clock value upto + * milliseconds: [YYYY-mm-dd HH:MM:SS.mmm] + */ +public class StandardLayout implements Layout { + public static final StandardLayout INSTANCE = new StandardLayout(); + + private StandardLayout() { + // singleton + } + + @Override + public String formatLogEntry(LogLevel severity, String message) { + long time = System.currentTimeMillis(); + return String.format("[%tF %tT.%tL][%-5s][Thread-%s]%s", + time, time, time, severity, Thread.currentThread().getName(), message); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ClusterMetadata.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ClusterMetadata.java new file mode 100644 index 0000000000..a3bca481b3 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ClusterMetadata.java @@ -0,0 +1,27 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchVersion; + +public interface ClusterMetadata { + String getClusterName(); + + String getClusterUUID(); + + ElasticsearchVersion getVersion(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ColumnDescriptor.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ColumnDescriptor.java new file mode 100644 index 0000000000..e1d8891e82 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ColumnDescriptor.java @@ -0,0 +1,36 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +public interface ColumnDescriptor { + /** + * Column name + * @return + */ + String getName(); + + /** + * Label + */ + String getLabel(); + + /** + * Column data type + * @return + */ + String getType(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ConnectionResponse.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ConnectionResponse.java new file mode 100644 index 0000000000..f2796cdbba --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ConnectionResponse.java @@ -0,0 +1,21 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +public interface ConnectionResponse { + ClusterMetadata getClusterMetadata(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcDateTimeFormatter.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcDateTimeFormatter.java new file mode 100644 index 0000000000..8710ee8902 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcDateTimeFormatter.java @@ -0,0 +1,42 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import java.sql.Timestamp; +import java.time.format.DateTimeFormatter; +import java.sql.Date; + +public enum JdbcDateTimeFormatter { + + JDBC_FORMAT("yyyy-MM-dd", "yyyy-MM-dd HH:mm:ss"); + + private DateTimeFormatter dateFormatter; + private DateTimeFormatter timestampFormatter; + + JdbcDateTimeFormatter(String dateFormat, String timestampFormat) { + this.dateFormatter = DateTimeFormatter.ofPattern(dateFormat); + this.timestampFormatter = DateTimeFormatter.ofPattern(timestampFormat); + } + + public String format(Date date) { + return date.toLocalDate().format(dateFormatter); + } + + public String format(Timestamp date) { + return date.toLocalDateTime().format(timestampFormatter); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcQueryParam.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcQueryParam.java new file mode 100644 index 0000000000..819979d832 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcQueryParam.java @@ -0,0 +1,38 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +public class JdbcQueryParam implements Parameter { + private Object value; + + private String type; + + public JdbcQueryParam(String type, Object value) { + this.type = type; + this.value = value; + } + + @Override + public Object getValue() { + return value; + } + + @Override + public String getType() { + return type; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcQueryRequest.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcQueryRequest.java new file mode 100644 index 0000000000..cdd3a0f2c6 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JdbcQueryRequest.java @@ -0,0 +1,79 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import java.util.List; +import java.util.Objects; + +public class JdbcQueryRequest implements QueryRequest { + + private String statement; + private int fetchSize; + List parameters; + + public JdbcQueryRequest(String sql) { + this.statement = sql; + } + + public JdbcQueryRequest(String sql, int fetchSize) { + this.statement = sql; + this.fetchSize = fetchSize; + } + + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof JdbcQueryRequest)) return false; + JdbcQueryRequest that = (JdbcQueryRequest) o; + return Objects.equals(statement, that.statement) && + Objects.equals(getParameters(), that.getParameters()); + } + + @Override + public int hashCode() { + return Objects.hash(statement, getParameters()); + } + + @Override + public String getQuery() { + return statement; + } + + @Override + public List getParameters() { + return parameters; + } + + public void setParameters(List parameters) { + this.parameters = parameters; + } + + @Override + public int getFetchSize() { + return fetchSize; + } + + @Override + public String toString() { + return "JdbcQueryRequest{" + + "statement='" + statement + '\'' + + ", fetchSize='" + fetchSize + '\'' + + ", parameters=" + parameters + + '}'; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/Parameter.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/Parameter.java new file mode 100644 index 0000000000..8a607fc611 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/Parameter.java @@ -0,0 +1,24 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +public interface Parameter { + + Object getValue(); + + String getType(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/Protocol.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/Protocol.java new file mode 100644 index 0000000000..1d531a58b7 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/Protocol.java @@ -0,0 +1,30 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; + +import java.io.IOException; + +public interface Protocol extends AutoCloseable { + + ConnectionResponse connect(int timeout) throws ResponseException, IOException; + + QueryResponse execute(QueryRequest request) throws ResponseException, IOException; + + void close() throws IOException; +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ProtocolFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ProtocolFactory.java new file mode 100644 index 0000000000..b4b402af2d --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/ProtocolFactory.java @@ -0,0 +1,24 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.transport.Transport; + +public interface ProtocolFactory

{ + P getProtocol(ConnectionConfig config, T transport); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/QueryRequest.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/QueryRequest.java new file mode 100644 index 0000000000..e247f6e410 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/QueryRequest.java @@ -0,0 +1,29 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import java.util.List; + +public interface QueryRequest { + + String getQuery(); + + List getParameters(); + + public int getFetchSize(); + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/QueryResponse.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/QueryResponse.java new file mode 100644 index 0000000000..c39e36c0a2 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/QueryResponse.java @@ -0,0 +1,36 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import java.util.List; + +public interface QueryResponse { + + List getColumnDescriptors(); + + List> getDatarows(); + + long getTotal(); + + long getSize(); + + int getStatus(); + + String getCursor(); + + RequestError getError(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/RequestError.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/RequestError.java new file mode 100644 index 0000000000..b7476601ef --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/RequestError.java @@ -0,0 +1,25 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +public interface RequestError { + String getReason(); + + String getDetails(); + + String getType(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/InternalServerErrorException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/InternalServerErrorException.java new file mode 100644 index 0000000000..3cd98bd4fd --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/InternalServerErrorException.java @@ -0,0 +1,50 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions; + +import java.sql.SQLNonTransientException; + +public class InternalServerErrorException extends SQLNonTransientException { + + String reason; + String type; + String details; + + public InternalServerErrorException(String reason, String type, String details) { + this.reason = reason; + this.type = type; + this.details = details; + } + + public String getReason() { + return reason; + } + + public String getType() { + return type; + } + + public String getDetails() { + return details; + } + + @Override + public String toString() { + return "Internal Server Error. Reason: "+ reason+". " + + "Type: "+ type+". Details: "+ details; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/MalformedResponseException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/MalformedResponseException.java new file mode 100644 index 0000000000..844696ee2c --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/MalformedResponseException.java @@ -0,0 +1,40 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions; + +/** + * Exception thrown when an malformed response is received from the + * server. + */ +public class MalformedResponseException extends ResponseException { + + public MalformedResponseException() { + } + + public MalformedResponseException(String message) { + super(message); + } + + public MalformedResponseException(String message, Throwable cause) { + super(message, cause); + } + + public MalformedResponseException(Throwable cause) { + super(cause); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/ResponseException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/ResponseException.java new file mode 100644 index 0000000000..cbe0154192 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/exceptions/ResponseException.java @@ -0,0 +1,63 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions; + +import java.io.IOException; +import java.sql.SQLException; + +/** + * Exception thrown when an unexpected server response is received. + */ +public class ResponseException extends Exception { + private String responsePayload = null; + + public ResponseException() { + } + + public ResponseException(String message) { + super(message); + } + + public ResponseException(String message, Throwable cause, String responsePayload) { + super(message, cause); + this.responsePayload = responsePayload; + } + + public ResponseException(String message, Throwable cause) { + super(message, cause); + } + + public ResponseException(Throwable cause) { + super(cause); + } + + public String getResponsePayload() { + return responsePayload; + } + + @Override + public String getLocalizedMessage() { + String localizedMessage = super.getLocalizedMessage(); + + if (responsePayload != null) { + localizedMessage = (localizedMessage == null ? "" : localizedMessage) + + " Raw response received: " + getResponsePayload(); + } + + return localizedMessage; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/HttpException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/HttpException.java new file mode 100644 index 0000000000..5603243da2 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/HttpException.java @@ -0,0 +1,66 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; + +/** + * Exception thrown when an unexpected HTTP response code is + * received from the server. + */ +public class HttpException extends ResponseException { + + private int statusCode; + + /** + * @param statusCode HTTP Status code due to which this exception is raised. + * @param message Message associated with the exception - can be the HTTP + * reason phrase corresponding to the status code. + */ + public HttpException(int statusCode, String message) { + super(message); + this.statusCode = statusCode; + } + + public HttpException(int statusCode, String message, Throwable cause, String responsePayload) { + super(message, cause, responsePayload); + this.statusCode = statusCode; + } + + /** + * Returns the HTTP response status code that resulted in + * this exception. + * + * @return HTTP status code + */ + public int getStatusCode() { + return statusCode; + } + + @Override + public String getLocalizedMessage() { + String message = getMessage(); + String localizedMessage = "HTTP Code: " + statusCode + + ". Message: " + (message == null ? "None" : message) + "."; + + if (this.getResponsePayload() != null) { + localizedMessage += " Raw response received: " + getResponsePayload(); + } + + return localizedMessage; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/HttpResponseHandler.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/HttpResponseHandler.java new file mode 100644 index 0000000000..6540cacc18 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/HttpResponseHandler.java @@ -0,0 +1,25 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import org.apache.http.HttpResponse; + +public interface HttpResponseHandler { + + T handleResponse(HttpResponse response) throws ResponseException; +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JdbcCursorQueryRequest.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JdbcCursorQueryRequest.java new file mode 100644 index 0000000000..048c4a3768 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JdbcCursorQueryRequest.java @@ -0,0 +1,75 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcQueryParam;; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; + + +import java.util.List; +import java.util.Objects; + +/** + * Bean to encapsulate cursor ID + * + * @author abbas hussain + * @since 07.05.20 + **/ +public class JdbcCursorQueryRequest implements QueryRequest { + + String cursor; + + public JdbcCursorQueryRequest(String cursor) { + this.cursor = cursor; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof JdbcCursorQueryRequest)) return false; + JdbcCursorQueryRequest that = (JdbcCursorQueryRequest) o; + return Objects.equals(cursor, that.cursor) && + Objects.equals(getParameters(), that.getParameters()); + } + + @Override + public int hashCode() { + return Objects.hash(cursor, getParameters()); + } + + @Override + public String getQuery() { + return cursor; + } + + @Override + public List getParameters() { + return null; + } + + @Override + public int getFetchSize() { + return 0; + } + + @Override + public String toString() { + return "JdbcQueryRequest{" + + "cursor='" + cursor + '\'' + + '}'; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonClusterMetadata.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonClusterMetadata.java new file mode 100644 index 0000000000..865b82120b --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonClusterMetadata.java @@ -0,0 +1,61 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ClusterMetadata; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class JsonClusterMetadata implements ClusterMetadata { + + @JsonProperty("cluster_name") + private String clusterName; + + @JsonProperty("cluster_uuid") + private String clusterUUID; + + @JsonProperty("version") + private JsonElasticsearchVersion version; + + @Override + public String getClusterName() { + return clusterName; + } + + @Override + public String getClusterUUID() { + return clusterUUID; + } + + @Override + public JsonElasticsearchVersion getVersion() { + return version; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public void setClusterUUID(String clusterUUID) { + this.clusterUUID = clusterUUID; + } + + public void setVersion(JsonElasticsearchVersion version) { + this.version = version; + } +} \ No newline at end of file diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonConnectionResponse.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonConnectionResponse.java new file mode 100644 index 0000000000..b5ebd300f4 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonConnectionResponse.java @@ -0,0 +1,33 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ConnectionResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ClusterMetadata; + +public class JsonConnectionResponse implements ConnectionResponse { + private ClusterMetadata clusterMetadata; + + public JsonConnectionResponse(ClusterMetadata clusterMetadata) { + this.clusterMetadata = clusterMetadata; + } + + @Override + public ClusterMetadata getClusterMetadata() { + return clusterMetadata; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorHttpProtocol.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorHttpProtocol.java new file mode 100644 index 0000000000..882528f1dc --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorHttpProtocol.java @@ -0,0 +1,67 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; +import org.apache.http.client.methods.CloseableHttpResponse; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Http protocol for cursor request and response + * + * @author abbas hussain + * @since 07.05.20 + **/ +public class JsonCursorHttpProtocol extends JsonHttpProtocol { + + public JsonCursorHttpProtocol(HttpTransport transport) { + this(transport, DEFAULT_SQL_CONTEXT_PATH); + } + + public JsonCursorHttpProtocol(HttpTransport transport, String sqlContextPath) { + super(transport, sqlContextPath); + } + + @Override + public QueryResponse execute(QueryRequest request) throws ResponseException, IOException { + try (CloseableHttpResponse response = getTransport().doPost( + getSqlContextPath(), + defaultJsonHeaders, + defaultJdbcParams, + buildQueryRequestBody(request), 0)) { + + return getJsonHttpResponseHandler().handleResponse(response, this::processQueryResponse); + + } + } + + private String buildQueryRequestBody(QueryRequest queryRequest) throws IOException { + JsonCursorQueryRequest jsonQueryRequest = new JsonCursorQueryRequest(queryRequest); + String requestBody = mapper.writeValueAsString(jsonQueryRequest); + return requestBody; + } + + private JsonQueryResponse processQueryResponse(InputStream contentStream) throws IOException { + return mapper.readValue(contentStream, JsonQueryResponse.class); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorHttpProtocolFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorHttpProtocolFactory.java new file mode 100644 index 0000000000..5c2cf77599 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorHttpProtocolFactory.java @@ -0,0 +1,41 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; + +/** + * Factory to create JsonCursorHttpProtocol objects + * + * @author abbas hussain + * @since 07.05.20 + */ +public class JsonCursorHttpProtocolFactory implements ProtocolFactory { + + public static JsonCursorHttpProtocolFactory INSTANCE = new JsonCursorHttpProtocolFactory(); + + private JsonCursorHttpProtocolFactory() { + + } + + @Override + public JsonCursorHttpProtocol getProtocol(ConnectionConfig connectionConfig, HttpTransport transport) { + return new JsonCursorHttpProtocol(transport); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorQueryRequest.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorQueryRequest.java new file mode 100644 index 0000000000..b2b3c0653a --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorQueryRequest.java @@ -0,0 +1,57 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Parameter; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.List; + +/** + * Definition of json cursor request + * + * @author abbas hussain + * @since 07.05.20 + **/ +public class JsonCursorQueryRequest implements QueryRequest { + + private final String cursor; + + public JsonCursorQueryRequest(QueryRequest queryRequest) { + this.cursor = queryRequest.getQuery(); + } + + @JsonProperty("cursor") + @Override + public String getQuery() { + return cursor; + } + + @JsonIgnore + @Override + public List getParameters() { + return null; + } + + @JsonIgnore + @Override + public int getFetchSize() { + return 0; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonElasticsearchVersion.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonElasticsearchVersion.java new file mode 100644 index 0000000000..faa6eb1a7d --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonElasticsearchVersion.java @@ -0,0 +1,70 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchVersion; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class JsonElasticsearchVersion implements ElasticsearchVersion { + + private String fullVersion; + private int[] version = new int[3]; + + public JsonElasticsearchVersion(@JsonProperty("number") String fullVersion) { + if (fullVersion == null) + return; + + this.fullVersion = fullVersion; + String[] versionTokens = fullVersion.split("[.-]"); + + for (int i=0; i Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ClusterMetadata; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ConnectionResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Protocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpParam; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.http.Header; +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.message.BasicHeader; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +public class JsonHttpProtocol implements Protocol { + + // the value is based on the API endpoint the sql plugin sets up, + // but this could be made configurable if required + public static final String DEFAULT_SQL_CONTEXT_PATH = "/_opendistro/_sql"; + + private static final Header acceptJson = new BasicHeader(HttpHeaders.ACCEPT, "application/json"); + private static final Header contentTypeJson = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"); + private static final HttpParam requestJdbcFormatParam = new HttpParam("format", "jdbc"); + protected static final Header[] defaultJsonHeaders = new Header[]{acceptJson, contentTypeJson}; + private static final Header[] defaultEmptyRequestBodyJsonHeaders = new Header[]{acceptJson}; + protected static final HttpParam[] defaultJdbcParams = new HttpParam[]{requestJdbcFormatParam}; + + protected static final ObjectMapper mapper = new ObjectMapper(); + private String sqlContextPath; + private HttpTransport transport; + private JsonHttpResponseHandler jsonHttpResponseHandler; + + public JsonHttpProtocol(HttpTransport transport) { + this(transport, DEFAULT_SQL_CONTEXT_PATH); + } + + public JsonHttpProtocol(HttpTransport transport, String sqlContextPath) { + this.transport = transport; + this.sqlContextPath = sqlContextPath; + this.jsonHttpResponseHandler = new JsonHttpResponseHandler(this); + } + + public String getSqlContextPath() { + return sqlContextPath; + } + + public HttpTransport getTransport() { + return this.transport; + } + + public JsonHttpResponseHandler getJsonHttpResponseHandler() { + return this.jsonHttpResponseHandler; + } + + @Override + public ConnectionResponse connect(int timeout) throws ResponseException, IOException { + try (CloseableHttpResponse response = transport.doGet( + "/", + defaultEmptyRequestBodyJsonHeaders, + null, timeout)) { + + return jsonHttpResponseHandler.handleResponse(response, this::processConnectionResponse); + + } + } + + @Override + public QueryResponse execute(QueryRequest request) throws ResponseException, IOException { + try (CloseableHttpResponse response = transport.doPost( + sqlContextPath, + defaultJsonHeaders, + defaultJdbcParams, + buildQueryRequestBody(request), 0)) { + + return jsonHttpResponseHandler.handleResponse(response, this::processQueryResponse); + + } + } + + private String buildQueryRequestBody(QueryRequest queryRequest) throws IOException { + JsonQueryRequest jsonQueryRequest = new JsonQueryRequest(queryRequest); + String requestBody = mapper.writeValueAsString(jsonQueryRequest); + return requestBody; + } + + @Override + public void close() throws IOException { + this.transport.close(); + } + + private JsonConnectionResponse processConnectionResponse(InputStream contentStream) throws IOException { + ClusterMetadata clusterMetadata = mapper.readValue(contentStream, JsonClusterMetadata.class); + return new JsonConnectionResponse(clusterMetadata); + } + + private JsonQueryResponse processQueryResponse(InputStream contentStream) throws IOException { + return mapper.readValue(contentStream, JsonQueryResponse.class); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonHttpProtocolFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonHttpProtocolFactory.java new file mode 100644 index 0000000000..255251e1db --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonHttpProtocolFactory.java @@ -0,0 +1,36 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; + + +public class JsonHttpProtocolFactory implements ProtocolFactory { + + public static JsonHttpProtocolFactory INSTANCE = new JsonHttpProtocolFactory(); + + private JsonHttpProtocolFactory() { + + } + + @Override + public JsonHttpProtocol getProtocol(ConnectionConfig connectionConfig, HttpTransport transport) { + return new JsonHttpProtocol(transport); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonHttpResponseHandler.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonHttpResponseHandler.java new file mode 100644 index 0000000000..92e89a48d3 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonHttpResponseHandler.java @@ -0,0 +1,158 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.MalformedResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.fasterxml.jackson.core.JsonProcessingException; +import org.apache.http.HttpResponse; +import org.apache.http.HttpStatus; +import org.apache.http.util.EntityUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class JsonHttpResponseHandler { + + protected JsonHttpProtocol protocol; + + public static final Set DEFAULT_ACCEPTABLE_HTTP_CODES = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(HttpStatus.SC_OK))); + + public JsonHttpResponseHandler(JsonHttpProtocol protocol) { + this.protocol = protocol; + } + + public R handleResponse(HttpResponse response, JsonContentParser contentParser) throws ResponseException { + return handleResponse(response, null, true, contentParser); + } + + public R handleResponse(HttpResponse response, Set acceptableHttpStatusCodes, + boolean expectResponseBody, JsonContentParser contentParser) throws ResponseException { + + try { + checkResponseForErrors(response, acceptableHttpStatusCodes, expectResponseBody); + + try (InputStream responseContent = response.getEntity().getContent()) { + return contentParser.apply(responseContent); + } + + } catch (JsonProcessingException jpe) { + throw new MalformedResponseException("Could not process server response", jpe); + } catch (IOException ioe) { + throw new ResponseException("Error reading server response", ioe); + } + } + + /** + * Checks if an HttpResponse meets the requirements to be accepted. + * + * @param response HttpResponse to check + * + * @throws ResponseException if the HttpResponse fails any of the checks + * @throws IOException if there is an I/O exception when reading the + * HttpResponse + */ + protected void checkResponseForErrors(HttpResponse response, Set acceptableHttpStatusCodes, + boolean expectResponseBodyContent) throws ResponseException, IOException { + int statusCode = response.getStatusLine().getStatusCode(); + + if (!isHttpStatusCodeAcceptable(statusCode, acceptableHttpStatusCodes)) { + + String responseBody = extractResponseBody(response); + + HttpException httpException = new HttpException(statusCode, + response.getStatusLine().getReasonPhrase(), null, responseBody); + + if (statusCode == HttpStatus.SC_METHOD_NOT_ALLOWED) { + throw new ResponseException("Could not submit SQL request to the target server. " + + "Make sure the SQL plugin is installed on the server and responding on the " + + "\"/" + protocol.getSqlContextPath() + "\" context path.", httpException); + } else { + throw httpException; + } + } + + if (expectResponseBodyContent && + (response.getEntity() == null || response.getEntity().getContent() == null)) { + throw new MalformedResponseException("Empty response."); + } + } + + + /** + * Reads and returns the entire response body content present in an + * HttpResponse as a String. + *

+ * This is meant to be used only in cases where a request to the + * server has failed. In such cases we check the response + * body contains any text or json content which could potentially + * be helpful for a user to understand why the request failed. + *

+ * We expect such content to only contain error messages of a limited + * size, so it's ok to read in the entire response body as a String. + * + * @param response HttpResponse to extract the response body from + * + * @return the response body as a String or null if no response body + * is present + */ + protected String extractResponseBody(HttpResponse response) { + // TODO - limit the amount read from response.getEntity() ? + String responseBody = null; + try { + if (response.getEntity() != null && response.getEntity().getContent() != null && + response.getEntity().getContentType() != null && + response.getEntity().getContentType().getValue() != null && + (response.getEntity().getContentType().getValue().contains("application/json") || + response.getEntity().getContentType().getValue().contains("text/plain"))) { + responseBody = EntityUtils.toString(response.getEntity()); + } + + } catch (IOException ioe) { + // ignore + } + return responseBody; + } + + /** + * HTTP Status codes that indicate success for this response + * handler + * + * @return Set of HTTP Status codes that indicate successful requests. + */ + protected Set getAcceptableHttpStatusCodes() { + return DEFAULT_ACCEPTABLE_HTTP_CODES; + } + + private boolean isHttpStatusCodeAcceptable(int statusCode, Set acceptableHttpStatusCodes) { + + if (acceptableHttpStatusCodes == null) + acceptableHttpStatusCodes = DEFAULT_ACCEPTABLE_HTTP_CODES; + + return acceptableHttpStatusCodes.contains(statusCode); + } + + @FunctionalInterface + public interface JsonContentParser { + R apply(T t) throws IOException; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonQueryRequest.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonQueryRequest.java new file mode 100644 index 0000000000..b79eb5cb1b --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonQueryRequest.java @@ -0,0 +1,56 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Parameter; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.List; + +public class JsonQueryRequest implements QueryRequest { + + private String query; + private int fetchSize; + private List parameters; + + public JsonQueryRequest(QueryRequest queryRequest) { + this.query = queryRequest.getQuery(); + this.parameters = queryRequest.getParameters(); + this.fetchSize = queryRequest.getFetchSize(); + + } + + @Override + public String getQuery() { + return query; + } + + @JsonInclude(Include.NON_NULL) + @Override + public List getParameters() { + return parameters; + } + + @JsonProperty("fetch_size") + @Override + public int getFetchSize() { + return fetchSize; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonQueryResponse.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonQueryResponse.java new file mode 100644 index 0000000000..f12823ce95 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonQueryResponse.java @@ -0,0 +1,256 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ColumnDescriptor; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.RequestError; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.List; +import java.util.Objects; + +/** + * {@link QueryResponse} implementation for the JSON protocol + */ +public class JsonQueryResponse implements QueryResponse { + + private List schema; + + private List> datarows; + + private int size; + + private int total; + + private int status; + + private String cursor; + + private JsonRequestError error; + + @Override + public List getColumnDescriptors() { + return schema; + } + + @Override + public List> getDatarows() { + return datarows; + } + + public void setSchema(List schema) { + this.schema = schema; + } + + public void setDatarows(List> datarows) { + this.datarows = datarows; + } + + public void setSize(int size) { + this.size = size; + } + + public void setTotal(int total) { + this.total = total; + } + + public void setStatus(int status) { + this.status = status; + } + + public void setCursor(String cursor) { + this.cursor = cursor; + } + + public void setError(JsonRequestError error) { + this.error = error; + } + + @Override + public long getTotal() { + return total; + } + + @Override + public long getSize() { + return size; + } + + @Override + public int getStatus() { + return status; + } + + @Override + public String getCursor() { + return cursor; + } + + @Override + public RequestError getError() { + return error; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof JsonQueryResponse)) return false; + JsonQueryResponse response = (JsonQueryResponse) o; + return getSize() == response.getSize() && + getTotal() == response.getTotal() && + getStatus() == response.getStatus() && + getCursor() == response.getCursor() && + Objects.equals(schema, response.schema) && + Objects.equals(getDatarows(), response.getDatarows()) && + Objects.equals(getError(), response.getError()); + } + + @Override + public int hashCode() { + return Objects.hash(schema, getDatarows(), getSize(), getTotal(), getStatus(), getCursor(), getError()); + } + + @Override + public String toString() { + return "JsonQueryResponse{" + + "schema=" + schema + + "cursor=" + cursor + + ", datarows=" + datarows + + ", size=" + size + + ", total=" + total + + ", status=" + status + + ", error=" + error + + '}'; + } + + + @JsonIgnoreProperties(ignoreUnknown = true) + public static class SchemaEntry implements ColumnDescriptor { + private String name; + private String type; + private String label; + + @JsonCreator + public SchemaEntry(@JsonProperty("name") String name, @JsonProperty("type") String type, + @JsonProperty("alias") String label) { + this.name = name; + this.type = type; + this.label = label; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getLabel() { + return label; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof SchemaEntry)) return false; + SchemaEntry that = (SchemaEntry) o; + return Objects.equals(getName(), that.getName()) && + Objects.equals(getType(), that.getType()) && + Objects.equals(getLabel(), that.getLabel()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getType(), getLabel()); + } + + @Override + public String toString() { + return "SchemaEntry{" + + "name='" + name + '\'' + + ", type='" + type + '\'' + + ", label='" + label + '\'' + + '}'; + } + } + + @JsonIgnoreProperties(ignoreUnknown = true) + public static class JsonRequestError implements RequestError { + + private String reason; + private String details; + private String type; + + public void setReason(String reason) { + this.reason = reason; + } + + public void setDetails(String details) { + this.details = details; + } + + public void setType(String type) { + this.type = type; + } + + @Override + public String getReason() { + return reason; + } + + @Override + public String getDetails() { + return details; + } + + @Override + public String getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof JsonRequestError)) return false; + JsonRequestError that = (JsonRequestError) o; + return Objects.equals(getReason(), that.getReason()) && + Objects.equals(getDetails(), that.getDetails()) && + Objects.equals(getType(), that.getType()); + } + + @Override + public int hashCode() { + return Objects.hash(getReason(), getDetails(), getType()); + } + + @Override + public String toString() { + return "JsonRequestError{" + + "reason='" + reason + '\'' + + ", details='" + Objects.hashCode(details) + '\'' + + ", type='" + type + '\'' + + '}'; + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/Transport.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/Transport.java new file mode 100644 index 0000000000..fe1e99b1dd --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/Transport.java @@ -0,0 +1,25 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport; + +public interface Transport { + + void close() throws TransportException; + + void setReadTimeout(int timeout); + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/TransportException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/TransportException.java new file mode 100644 index 0000000000..4ad576e313 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/TransportException.java @@ -0,0 +1,39 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport; + +import java.io.IOException; + + +public class TransportException extends IOException { + + public TransportException() { + super(); + } + + public TransportException(String message) { + super(message); + } + + public TransportException(String message, Throwable cause) { + super(message, cause); + } + + public TransportException(Throwable cause) { + super(cause); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/TransportFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/TransportFactory.java new file mode 100644 index 0000000000..d0503a1cc8 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/TransportFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; + +public interface TransportFactory { + + T getTransport(ConnectionConfig config, Logger log, String userAgent) throws TransportException; +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpClientConnectionFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpClientConnectionFactory.java new file mode 100644 index 0000000000..3fa6a54d11 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpClientConnectionFactory.java @@ -0,0 +1,160 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +/* + * ==================================================================== + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import org.apache.commons.logging.Log; +import org.apache.http.HttpRequest; +import org.apache.http.HttpResponse; +import org.apache.http.annotation.Contract; +import org.apache.http.annotation.ThreadingBehavior; +import org.apache.http.config.ConnectionConfig; +import org.apache.http.conn.HttpConnectionFactory; +import org.apache.http.conn.ManagedHttpClientConnection; +import org.apache.http.conn.routing.HttpRoute; +import org.apache.http.entity.ContentLengthStrategy; +import org.apache.http.impl.conn.DefaultHttpResponseParserFactory; +import org.apache.http.impl.entity.LaxContentLengthStrategy; +import org.apache.http.impl.entity.StrictContentLengthStrategy; +import org.apache.http.impl.io.DefaultHttpRequestWriterFactory; +import org.apache.http.io.HttpMessageParserFactory; +import org.apache.http.io.HttpMessageWriterFactory; + +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CodingErrorAction; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Adapted from Apache HttpClient to serve as a Factory for + * {@link ManagedHttpClientConnection} instances that offer per + * connection HTTP wire logging. + * + */ +@Contract(threading = ThreadingBehavior.IMMUTABLE_CONDITIONAL) +public class ApacheHttpClientConnectionFactory + implements HttpConnectionFactory { + + private static final AtomicLong COUNTER = new AtomicLong(); + + private Log log; + + private final HttpMessageWriterFactory requestWriterFactory; + private final HttpMessageParserFactory responseParserFactory; + private final ContentLengthStrategy incomingContentStrategy; + private final ContentLengthStrategy outgoingContentStrategy; + + /** + * @since 4.4 + */ + public ApacheHttpClientConnectionFactory( + final HttpMessageWriterFactory requestWriterFactory, + final HttpMessageParserFactory responseParserFactory, + final ContentLengthStrategy incomingContentStrategy, + final ContentLengthStrategy outgoingContentStrategy) { + super(); + this.requestWriterFactory = requestWriterFactory != null ? requestWriterFactory : + DefaultHttpRequestWriterFactory.INSTANCE; + this.responseParserFactory = responseParserFactory != null ? responseParserFactory : + DefaultHttpResponseParserFactory.INSTANCE; + this.incomingContentStrategy = incomingContentStrategy != null ? incomingContentStrategy : + LaxContentLengthStrategy.INSTANCE; + this.outgoingContentStrategy = outgoingContentStrategy != null ? outgoingContentStrategy : + StrictContentLengthStrategy.INSTANCE; + } + + public ApacheHttpClientConnectionFactory( + final HttpMessageWriterFactory requestWriterFactory, + final HttpMessageParserFactory responseParserFactory) { + this(requestWriterFactory, responseParserFactory, null, null); + } + + public ApacheHttpClientConnectionFactory( + final HttpMessageParserFactory responseParserFactory) { + this(null, responseParserFactory); + } + + public ApacheHttpClientConnectionFactory() { + this(null, null); + } + + public ApacheHttpClientConnectionFactory(Log log) { + this(null, null); + this.log = log; + } + + @Override + public ManagedHttpClientConnection create(final HttpRoute route, final ConnectionConfig config) { + final ConnectionConfig cconfig = config != null ? config : ConnectionConfig.DEFAULT; + CharsetDecoder chardecoder = null; + CharsetEncoder charencoder = null; + final Charset charset = cconfig.getCharset(); + final CodingErrorAction malformedInputAction = cconfig.getMalformedInputAction() != null ? + cconfig.getMalformedInputAction() : CodingErrorAction.REPORT; + final CodingErrorAction unmappableInputAction = cconfig.getUnmappableInputAction() != null ? + cconfig.getUnmappableInputAction() : CodingErrorAction.REPORT; + if (charset != null) { + chardecoder = charset.newDecoder(); + chardecoder.onMalformedInput(malformedInputAction); + chardecoder.onUnmappableCharacter(unmappableInputAction); + charencoder = charset.newEncoder(); + charencoder.onMalformedInput(malformedInputAction); + charencoder.onUnmappableCharacter(unmappableInputAction); + } + final String id = "http-outgoing-" + Long.toString(COUNTER.getAndIncrement()); + return new LoggingManagedHttpClientConnection( + id, + log, + cconfig.getBufferSize(), + cconfig.getFragmentSizeHint(), + chardecoder, + charencoder, + cconfig.getMessageConstraints(), + incomingContentStrategy, + outgoingContentStrategy, + requestWriterFactory, + responseParserFactory); + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpTransport.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpTransport.java new file mode 100644 index 0000000000..4a33e5a58f --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpTransport.java @@ -0,0 +1,271 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import com.amazon.opendistroforelasticsearch.jdbc.auth.AuthenticationType; +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.auth.aws.AWSRequestSigningApacheInterceptor; +import com.amazonaws.auth.AWS4Signer; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import org.apache.http.Header; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.config.SocketConfig; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.socket.PlainConnectionSocketFactory; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.BasicHttpClientConnectionManager; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.http.ssl.SSLContexts; +import org.apache.http.ssl.TrustStrategy; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; + +public class ApacheHttpTransport implements HttpTransport, LoggingSource { + String scheme; + String host; + int port; + String path; + int readTimeout; + + private RequestConfig requestConfig; + private CloseableHttpClient httpClient; + + public ApacheHttpTransport(ConnectionConfig connectionConfig, Logger log, String userAgent) throws TransportException { + this.host = connectionConfig.getHost(); + this.port = connectionConfig.getPort(); + this.scheme = connectionConfig.isUseSSL() ? "https" : "http"; + this.path = connectionConfig.getPath(); + + updateRequestConfig(); + + ConnectionSocketFactory sslConnectionSocketFactory = null; + + try { + sslConnectionSocketFactory = getSslConnectionSocketFactory(connectionConfig); + } catch (Exception e) { + throw new TransportException("Exception building SSL/TLS socket factory " + e, e); + } + + Registry socketFactoryRegistry = RegistryBuilder.create() + .register("http", PlainConnectionSocketFactory.getSocketFactory()) + .register("https", sslConnectionSocketFactory) + .build(); + + ApacheHttpClientConnectionFactory connectionFactory = + new ApacheHttpClientConnectionFactory(new JclLoggerAdapter(log, getSource())); + + HttpClientBuilder httpClientBuilder = HttpClients.custom() + .setConnectionManager( + new BasicHttpClientConnectionManager(socketFactoryRegistry, connectionFactory)) + .setDefaultSocketConfig(buildDefaultSocketConfig()) + .setDefaultRequestConfig(getRequestConfig()) + .setUserAgent(userAgent); + + // request compression + if (!connectionConfig.requestCompression()) + httpClientBuilder.disableContentCompression(); + + // setup authentication + if (connectionConfig.getAuthenticationType() == AuthenticationType.BASIC) { + CredentialsProvider basicCredsProvider = new BasicCredentialsProvider(); + basicCredsProvider.setCredentials( + AuthScope.ANY, + new UsernamePasswordCredentials(connectionConfig.getUser(), connectionConfig.getPassword())); + httpClientBuilder.setDefaultCredentialsProvider(basicCredsProvider); + + } else if (connectionConfig.getAuthenticationType() == AuthenticationType.AWS_SIGV4) { + AWS4Signer signer = new AWS4Signer(); + signer.setServiceName("es"); + signer.setRegionName(connectionConfig.getRegion()); + + AWSCredentialsProvider provider = connectionConfig.getAwsCredentialsProvider() != null ? + connectionConfig.getAwsCredentialsProvider() : new DefaultAWSCredentialsProviderChain(); + httpClientBuilder.addInterceptorLast( + new AWSRequestSigningApacheInterceptor( + "es", + signer, + provider)); + } + + // TODO - can apply settings retry & backoff + this.httpClient = httpClientBuilder.build(); + } + + @Override + public CloseableHttpResponse doGet(String path, Header[] headers, HttpParam[] params, int timeout) throws TransportException { + return doGet(buildRequestURI(path, params), headers, timeout); + } + + @Override + public CloseableHttpResponse doPost(String path, Header[] headers, HttpParam[] params, String body, int timeout) throws TransportException { + return doPost(buildRequestURI(path, params), headers, body, timeout); + } + + @Override + public void close() throws TransportException { + try { + this.httpClient.close(); + } catch (IOException e) { + throw new TransportException(e); + } + } + + private ConnectionSocketFactory getSslConnectionSocketFactory(ConnectionConfig connectionConfig) + throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException, + UnrecoverableKeyException, KeyManagementException { + + TrustStrategy trustStrategy = connectionConfig.trustSelfSigned() ? new TrustSelfSignedStrategy() : null; + + SSLContextBuilder builder = SSLContexts.custom(); + + if (connectionConfig.getKeyStoreLocation() != null || connectionConfig.getTrustStoreLocation() != null) { + // trust material + if (connectionConfig.getTrustStoreLocation() != null) { + String trustStorePassword = connectionConfig.getTrustStorePassword(); + char[] password = trustStorePassword == null ? "".toCharArray() : trustStorePassword.toCharArray(); + + builder.loadTrustMaterial( + new File(connectionConfig.getTrustStoreLocation()), + password, trustStrategy); + } + + // key material + if (connectionConfig.getKeyStoreLocation() != null) { + String keyStorePassword = connectionConfig.getKeyStorePassword(); + char[] password = keyStorePassword == null ? "".toCharArray() : keyStorePassword.toCharArray(); + + // TODO - can add alias selection strategy + // TODO - can add support for a separate property for key password + builder.loadKeyMaterial(new File(connectionConfig.getKeyStoreLocation()), password, password).build(); + } + + } else { + + builder.loadTrustMaterial(null, trustStrategy); + } + + HostnameVerifier hostnameVerifier = connectionConfig.hostnameVerification() ? + SSLConnectionSocketFactory.getDefaultHostnameVerifier() : new NoopHostnameVerifier(); + + SSLContext sslContext = builder.build(); + return new SSLConnectionSocketFactory(sslContext, hostnameVerifier); + } + + private SocketConfig buildDefaultSocketConfig() { + return SocketConfig.custom() + .setSoKeepAlive(true) + .setSoTimeout(this.readTimeout) + .build(); + } + + private void updateRequestConfig() { + this.requestConfig = RequestConfig.custom() + .setSocketTimeout(this.readTimeout) + .build(); + } + + private RequestConfig getRequestConfig() { + return this.requestConfig; + } + + public void setReadTimeout(int readTimeout) { + if (readTimeout != this.readTimeout) { + this.readTimeout = readTimeout; + updateRequestConfig(); + } + } + + private URIBuilder getUriBuilder(String path) { + return new URIBuilder() + .setScheme(this.scheme) + .setHost(this.host) + .setPort(this.port) + .setPath(this.path + path); + } + + + private URI buildRequestURI(String path, HttpParam... params) throws TransportException { + try { + URIBuilder uriBuilder = getUriBuilder(path); + + if (params != null) { + for (HttpParam param : params) + uriBuilder.setParameter(param.getName(), param.getValue()); + } + return uriBuilder.build(); + } catch (URISyntaxException e) { + throw new TransportException(e); + } + } + + private CloseableHttpResponse doGet(URI uri, Header[] headers, int readTimeout) throws TransportException { + try { + setReadTimeout(readTimeout); + HttpGet request = new HttpGet(uri); + request.setHeaders(headers); + request.setConfig(getRequestConfig()); + return httpClient.execute(request); + } catch (IOException e) { + throw new TransportException(e); + } + } + + private CloseableHttpResponse doPost(URI uri, Header[] headers, String body, int readTimeout) throws TransportException { + try { + setReadTimeout(readTimeout); + HttpPost request = new HttpPost(uri); + request.setHeaders(headers); + request.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON)); + request.setConfig(getRequestConfig()); + return httpClient.execute(request); + } catch (IOException e) { + throw new TransportException(e); + } + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpTransportFactory.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpTransportFactory.java new file mode 100644 index 0000000000..ab6d5968c2 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/ApacheHttpTransportFactory.java @@ -0,0 +1,36 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportFactory; + +public class ApacheHttpTransportFactory implements TransportFactory { + + public static ApacheHttpTransportFactory INSTANCE = new ApacheHttpTransportFactory(); + + private ApacheHttpTransportFactory() { + + } + + @Override + public ApacheHttpTransport getTransport(ConnectionConfig config, Logger log, String userAgent) throws TransportException { + return new ApacheHttpTransport(config, log, userAgent); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/HttpParam.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/HttpParam.java new file mode 100644 index 0000000000..2969c592cd --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/HttpParam.java @@ -0,0 +1,37 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +public class HttpParam { + + private String name; + + private String value; + + public HttpParam(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public String getValue() { + return value; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/HttpTransport.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/HttpTransport.java new file mode 100644 index 0000000000..376d11a1f9 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/HttpTransport.java @@ -0,0 +1,31 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import com.amazon.opendistroforelasticsearch.jdbc.transport.Transport; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportException; +import org.apache.http.Header; +import org.apache.http.client.methods.CloseableHttpResponse; + +public interface HttpTransport extends Transport { + + CloseableHttpResponse doGet(String path, Header[] headers, HttpParam[] params, int timeout) + throws TransportException; + + CloseableHttpResponse doPost(String path, Header[] headers, HttpParam[] params, String body, int timeout) + throws TransportException; +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/JclLoggerAdapter.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/JclLoggerAdapter.java new file mode 100644 index 0000000000..03b54cc1ae --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/JclLoggerAdapter.java @@ -0,0 +1,126 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import com.amazon.opendistroforelasticsearch.jdbc.logging.Logger; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LoggingSource; +import org.apache.commons.logging.Log; + +public class JclLoggerAdapter implements Log, LoggingSource { + private final Logger logger; + private String source; + + public JclLoggerAdapter(Logger logger, String source) { + this.logger = logger; + this.source = source; + } + + @Override + public void debug(Object message) { + logger.debug(() -> logMessage(String.valueOf(message))); + } + + @Override + public void debug(Object message, Throwable t) { + logger.debug(String.valueOf(message), t); + } + + @Override + public void error(Object message) { + logger.error(String.valueOf(message)); + } + + @Override + public void error(Object message, Throwable t) { + logger.error(String.valueOf(message), t); + } + + @Override + public void fatal(Object message) { + logger.fatal(String.valueOf(message)); + } + + @Override + public void fatal(Object message, Throwable t) { + logger.fatal(String.valueOf(message), t); + } + + @Override + public void info(Object message) { + logger.info(String.valueOf(message)); + } + + @Override + public void info(Object message, Throwable t) { + logger.info(String.valueOf(message), t); + } + + @Override + public boolean isDebugEnabled() { + return logger.isDebugEnabled(); + } + + @Override + public boolean isErrorEnabled() { + return false; + } + + @Override + public boolean isFatalEnabled() { + return logger.isFatalEnabled(); + } + + @Override + public boolean isInfoEnabled() { + return logger.isInfoEnabled(); + } + + @Override + public boolean isTraceEnabled() { + return logger.isTraceEnabled(); + } + + @Override + public boolean isWarnEnabled() { + return logger.isWarnEnabled(); + } + + @Override + public void trace(Object message) { + logger.trace(() -> logMessage(String.valueOf(message))); + } + + @Override + public void trace(Object message, Throwable t) { + logger.trace(String.valueOf(message), t); + } + + @Override + public void warn(Object message) { + logger.warn(String.valueOf(message)); + } + + @Override + public void warn(Object message, Throwable t) { + logger.warn(String.valueOf(message), t); + } + + @Override + public String getSource() { + return source; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingInputStream.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingInputStream.java new file mode 100644 index 0000000000..de71cf1764 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingInputStream.java @@ -0,0 +1,159 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +/* + * ==================================================================== + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + */ +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import org.apache.http.impl.conn.Wire; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Adapted from Apache HttpClient to offer per connection HTTP wire + * logging. + */ +class LoggingInputStream extends InputStream { + + private final InputStream in; + private final Wire wire; + + public LoggingInputStream(final InputStream in, final Wire wire) { + super(); + this.in = in; + this.wire = wire; + } + + @Override + public int read() throws IOException { + try { + final int b = in.read(); + if (b == -1) { + wire.input("end of stream"); + } else { + wire.input(b); + } + return b; + } catch (final IOException ex) { + wire.input("[read] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public int read(final byte[] b) throws IOException { + try { + final int bytesRead = in.read(b); + if (bytesRead == -1) { + wire.input("end of stream"); + } else if (bytesRead > 0) { + wire.input(b, 0, bytesRead); + } + return bytesRead; + } catch (final IOException ex) { + wire.input("[read] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public int read(final byte[] b, final int off, final int len) throws IOException { + try { + final int bytesRead = in.read(b, off, len); + if (bytesRead == -1) { + wire.input("end of stream"); + } else if (bytesRead > 0) { + wire.input(b, off, bytesRead); + } + return bytesRead; + } catch (final IOException ex) { + wire.input("[read] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public long skip(final long n) throws IOException { + try { + return super.skip(n); + } catch (final IOException ex) { + wire.input("[skip] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public int available() throws IOException { + try { + return in.available(); + } catch (final IOException ex) { + wire.input("[available] I/O error : " + ex.getMessage()); + throw ex; + } + } + + @Override + public void mark(final int readlimit) { + super.mark(readlimit); + } + + @Override + public void reset() throws IOException { + super.reset(); + } + + @Override + public boolean markSupported() { + return false; + } + + @Override + public void close() throws IOException { + try { + in.close(); + } catch (final IOException ex) { + wire.input("[close] I/O error: " + ex.getMessage()); + throw ex; + } + } + +} + diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingManagedHttpClientConnection.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingManagedHttpClientConnection.java new file mode 100644 index 0000000000..8dafa330ba --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingManagedHttpClientConnection.java @@ -0,0 +1,136 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +/* + * ==================================================================== + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import org.apache.commons.logging.Log; +import org.apache.http.HttpRequest; +import org.apache.http.HttpResponse; +import org.apache.http.config.MessageConstraints; +import org.apache.http.entity.ContentLengthStrategy; +import org.apache.http.impl.conn.DefaultManagedHttpClientConnection; +import org.apache.http.impl.conn.Wire; +import org.apache.http.io.HttpMessageParserFactory; +import org.apache.http.io.HttpMessageWriterFactory; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.Socket; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; + +/** + * Adapted from Apache HttpClient to offer per connection HTTP wire + * logging. + */ +public class LoggingManagedHttpClientConnection extends DefaultManagedHttpClientConnection { + + private final Log log; + private final Wire wire; + + public LoggingManagedHttpClientConnection( + final String id, + final Log log, + final int buffersize, + final int fragmentSizeHint, + final CharsetDecoder chardecoder, + final CharsetEncoder charencoder, + final MessageConstraints constraints, + final ContentLengthStrategy incomingContentStrategy, + final ContentLengthStrategy outgoingContentStrategy, + final HttpMessageWriterFactory requestWriterFactory, + final HttpMessageParserFactory responseParserFactory) { + super(id, buffersize, fragmentSizeHint, chardecoder, charencoder, + constraints, incomingContentStrategy, outgoingContentStrategy, + requestWriterFactory, responseParserFactory); + this.log = log; + this.wire = new Wire(log, id); + } + + @Override + public void close() throws IOException { + + if (super.isOpen()) { + if (this.log.isDebugEnabled()) { + this.log.debug(getId() + ": Close connection"); + } + super.close(); + } + } + + @Override + public void setSocketTimeout(final int timeout) { + if (this.log.isDebugEnabled()) { + this.log.debug(getId() + ": set socket timeout to " + timeout); + } + super.setSocketTimeout(timeout); + } + + @Override + public void shutdown() throws IOException { + if (this.log.isDebugEnabled()) { + this.log.debug(getId() + ": Shutdown connection"); + } + super.shutdown(); + } + + @Override + protected InputStream getSocketInputStream(final Socket socket) throws IOException { + InputStream in = super.getSocketInputStream(socket); + if (this.wire.enabled()) { + in = new LoggingInputStream(in, this.wire); + } + return in; + } + + @Override + protected OutputStream getSocketOutputStream(final Socket socket) throws IOException { + OutputStream out = super.getSocketOutputStream(socket); + if (this.wire.enabled()) { + out = new LoggingOutputStream(out, this.wire); + } + return out; + } + + } diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingOutputStream.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingOutputStream.java new file mode 100644 index 0000000000..f52dd82f0f --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/LoggingOutputStream.java @@ -0,0 +1,118 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +/* + * ==================================================================== + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http; + +import org.apache.http.impl.conn.Wire; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Adapted from Apache HttpClient to offer per connection HTTP wire + * logging. + */ +class LoggingOutputStream extends OutputStream { + + private final OutputStream out; + private final Wire wire; + + public LoggingOutputStream(final OutputStream out, final Wire wire) { + super(); + this.out = out; + this.wire = wire; + } + + @Override + public void write(final int b) throws IOException { + try { + wire.output(b); + } catch (final IOException ex) { + wire.output("[write] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public void write(final byte[] b) throws IOException { + try { + wire.output(b); + out.write(b); + } catch (final IOException ex) { + wire.output("[write] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public void write(final byte[] b, final int off, final int len) throws IOException { + try { + wire.output(b, off, len); + out.write(b, off, len); + } catch (final IOException ex) { + wire.output("[write] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public void flush() throws IOException { + try { + out.flush(); + } catch (final IOException ex) { + wire.output("[flush] I/O error: " + ex.getMessage()); + throw ex; + } + } + + @Override + public void close() throws IOException { + try { + out.close(); + } catch (final IOException ex) { + wire.output("[close] I/O error: " + ex.getMessage()); + throw ex; + } + } + +} \ No newline at end of file diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/auth/aws/AWSRequestSigningApacheInterceptor.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/auth/aws/AWSRequestSigningApacheInterceptor.java new file mode 100644 index 0000000000..97212a83ae --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/auth/aws/AWSRequestSigningApacheInterceptor.java @@ -0,0 +1,186 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http.auth.aws; + +import com.amazonaws.DefaultRequest; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.Signer; +import com.amazonaws.http.HttpMethodName; +import org.apache.http.Header; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpException; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.HttpRequestInterceptor; +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.BasicHttpEntity; +import org.apache.http.message.BasicHeader; +import org.apache.http.protocol.HttpContext; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.apache.http.protocol.HttpCoreContext.HTTP_TARGET_HOST; + +/** + * An {@link HttpRequestInterceptor} that signs requests using any AWS {@link Signer} + * and {@link AWSCredentialsProvider}. + */ +public class AWSRequestSigningApacheInterceptor implements HttpRequestInterceptor { + /** + * The service that we're connecting to. Technically not necessary. + * Could be used by a future Signer, though. + */ + private final String service; + + /** + * The particular signer implementation. + */ + private final Signer signer; + + /** + * The source of AWS credentials for signing. + */ + private final AWSCredentialsProvider awsCredentialsProvider; + + /** + * + * @param service service that we're connecting to + * @param signer particular signer implementation + * @param awsCredentialsProvider source of AWS credentials for signing + */ + public AWSRequestSigningApacheInterceptor(final String service, + final Signer signer, + final AWSCredentialsProvider awsCredentialsProvider) { + this.service = service; + this.signer = signer; + this.awsCredentialsProvider = awsCredentialsProvider; + } + + /** + * {@inheritDoc} + */ + @Override + public void process(final HttpRequest request, final HttpContext context) + throws HttpException, IOException { + URIBuilder uriBuilder; + try { + uriBuilder = new URIBuilder(request.getRequestLine().getUri()); + } catch (URISyntaxException e) { + throw new IOException("Invalid URI" , e); + } + + // Copy Apache HttpRequest to AWS DefaultRequest + DefaultRequest signableRequest = new DefaultRequest<>(service); + + HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST); + if (host != null) { + signableRequest.setEndpoint(URI.create(host.toURI())); + } + final HttpMethodName httpMethod = + HttpMethodName.fromValue(request.getRequestLine().getMethod()); + signableRequest.setHttpMethod(httpMethod); + try { + signableRequest.setResourcePath(uriBuilder.build().getRawPath()); + } catch (URISyntaxException e) { + throw new IOException("Invalid URI" , e); + } + + if (request instanceof HttpEntityEnclosingRequest) { + HttpEntityEnclosingRequest httpEntityEnclosingRequest = + (HttpEntityEnclosingRequest) request; + if (httpEntityEnclosingRequest.getEntity() != null) { + signableRequest.setContent(httpEntityEnclosingRequest.getEntity().getContent()); + } + } + signableRequest.setParameters(nvpToMapParams(uriBuilder.getQueryParams())); + signableRequest.setHeaders(headerArrayToMap(request.getAllHeaders())); + + // Sign it + signer.sign(signableRequest, awsCredentialsProvider.getCredentials()); + + // Now copy everything back + request.setHeaders(mapToHeaderArray(signableRequest.getHeaders())); + if (request instanceof HttpEntityEnclosingRequest) { + HttpEntityEnclosingRequest httpEntityEnclosingRequest = + (HttpEntityEnclosingRequest) request; + if (httpEntityEnclosingRequest.getEntity() != null) { + BasicHttpEntity basicHttpEntity = new BasicHttpEntity(); + basicHttpEntity.setContent(signableRequest.getContent()); + httpEntityEnclosingRequest.setEntity(basicHttpEntity); + } + } + } + + /** + * + * @param params list of HTTP query params as NameValuePairs + * @return a multimap of HTTP query params + */ + private static Map> nvpToMapParams(final List params) { + Map> parameterMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + for (NameValuePair nvp : params) { + List argsList = + parameterMap.computeIfAbsent(nvp.getName(), k -> new ArrayList<>()); + argsList.add(nvp.getValue()); + } + return parameterMap; + } + + /** + * @param headers modeled Header objects + * @return a Map of header entries + */ + private static Map headerArrayToMap(final Header[] headers) { + Map headersMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + for (Header header : headers) { + if (!skipHeader(header)) { + headersMap.put(header.getName(), header.getValue()); + } + } + return headersMap; + } + + /** + * @param header header line to check + * @return true if the given header should be excluded when signing + */ + private static boolean skipHeader(final Header header) { + return ("content-length".equalsIgnoreCase(header.getName()) + && "0".equals(header.getValue())) // Strip Content-Length: 0 + || "host".equalsIgnoreCase(header.getName()); // Host comes from endpoint + } + + /** + * @param mapHeaders Map of header entries + * @return modeled Header objects + */ + private static Header[] mapToHeaderArray(final Map mapHeaders) { + Header[] headers = new Header[mapHeaders.size()]; + int i = 0; + for (Map.Entry headerEntry : mapHeaders.entrySet()) { + headers[i++] = new BasicHeader(headerEntry.getKey(), headerEntry.getValue()); + } + return headers; + } +} \ No newline at end of file diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/BaseTypeConverter.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/BaseTypeConverter.java new file mode 100644 index 0000000000..15a88006f3 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/BaseTypeConverter.java @@ -0,0 +1,71 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.Date; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +public abstract class BaseTypeConverter implements TypeConverter { + + static final Map typeHandlerMap = new HashMap<>(); + + static { + typeHandlerMap.put(String.class, StringType.INSTANCE); + + typeHandlerMap.put(Byte.class, ByteType.INSTANCE); + typeHandlerMap.put(Short.class, ShortType.INSTANCE); + typeHandlerMap.put(Integer.class, IntegerType.INSTANCE); + typeHandlerMap.put(Long.class, LongType.INSTANCE); + + typeHandlerMap.put(Float.class, FloatType.INSTANCE); + typeHandlerMap.put(Double.class, DoubleType.INSTANCE); + + typeHandlerMap.put(Boolean.class, BooleanType.INSTANCE); + + typeHandlerMap.put(Timestamp.class, TimestampType.INSTANCE); + typeHandlerMap.put(Date.class, DateType.INSTANCE); + + } + + @Override + public T convert(Object value, Class clazz, Map conversionParams) throws SQLException { + if (clazz == null) { + clazz = getDefaultJavaClass(); + } + + if (getSupportedJavaClasses() != null && getSupportedJavaClasses().contains(clazz)) { + TypeHelper typeHelper = getTypeHelper(clazz); + + if (typeHelper != null) { + return typeHelper.fromValue(value, conversionParams); + } + } + throw objectConversionException(value, clazz); + } + + private TypeHelper getTypeHelper(Class clazz) { + return typeHandlerMap.get(clazz); + } + + public abstract Class getDefaultJavaClass(); + + public abstract Set getSupportedJavaClasses(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/BooleanType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/BooleanType.java new file mode 100644 index 0000000000..6349753c13 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/BooleanType.java @@ -0,0 +1,58 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.util.Map; + +public class BooleanType implements TypeHelper { + + public static final BooleanType INSTANCE = new BooleanType(); + + private BooleanType() { + + } + + @Override + public Boolean fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return false; + } + + if (value instanceof Boolean) { + return (Boolean) value; + } else if (value instanceof String) { + return asBoolean((String) value); + } else { + throw objectConversionException(value); + } + } + + private Boolean asBoolean(String value) throws SQLException { + try { + return Boolean.valueOf(value); + } catch (NumberFormatException nfe) { + throw stringConversionException(value, nfe); + } + } + + + @Override + public String getTypeName() { + return "Boolean"; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ByteType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ByteType.java new file mode 100644 index 0000000000..d27c4b3d6a --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ByteType.java @@ -0,0 +1,60 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.util.Map; + +public class ByteType extends NumberType { + public static final ByteType INSTANCE = new ByteType(); + + private ByteType() { + + } + + public Byte fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return (byte) 0; + } + if (value instanceof Byte) { + return (Byte) value; + } else if (value instanceof String) { + return asByte((String) value); + } else if (value instanceof Number) { + return asByte((Number) value); + } else { + throw objectConversionException(value); + } + } + + private Byte asByte(String value) throws SQLException { + try { + return asByte(Double.valueOf(value)); + } catch (NumberFormatException nfe) { + throw stringConversionException(value, nfe); + } + } + + private Byte asByte(Number value) throws SQLException { + return (byte) getDoubleValueWithinBounds(value, Byte.MIN_VALUE, Byte.MAX_VALUE); + } + + @Override + public String getTypeName() { + return "Byte"; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/DateType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/DateType.java new file mode 100644 index 0000000000..5d98b38600 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/DateType.java @@ -0,0 +1,98 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.Date; +import java.sql.SQLException; +import java.time.LocalDate; +import java.time.format.DateTimeParseException; +import java.util.Calendar; +import java.util.Map; + +/** + * Supports returning a java.sql.Date from a String (starting with yyyy-mm-dd) + * or a Number value indicating epoch time in millis. + */ +public class DateType implements TypeHelper { + + public static final DateType INSTANCE = new DateType(); + + private DateType() { + + } + + @Override + public Date fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return null; + } + Calendar calendar = conversionParams != null ? (Calendar) conversionParams.get("calendar") : null; + if (value instanceof Date) { + return asDate((Date) value, calendar); + } else if (value instanceof String) { + return asDate((String) value, calendar); + } else if (value instanceof Number) { + return asDate((Number) value); + } else { + throw objectConversionException(value); + } + } + + public java.sql.Date asDate(Date value, Calendar calendar) throws SQLException { + if (calendar == null) { + return value; + } else { + return localDatetoSqlDate(value.toLocalDate(), calendar); + } + } + + public java.sql.Date asDate(String value, Calendar calendar) throws SQLException { + try { + if (calendar == null) { + return java.sql.Date.valueOf(toLocalDate(value)); + } else { + return localDatetoSqlDate(toLocalDate(value), calendar); + } + } catch (DateTimeParseException dpe) { + throw stringConversionException(value, dpe); + } + } + + private Date localDatetoSqlDate(LocalDate localDate, Calendar calendar) { + calendar.set(localDate.getYear(), + localDate.getMonthValue() - 1, + localDate.getDayOfMonth(), 0, 0, 0); + calendar.set(Calendar.MILLISECOND, 0); + return new java.sql.Date(calendar.getTimeInMillis()); + } + + public java.sql.Date asDate(Number value) { + return new java.sql.Date(value.longValue()); + } + + private LocalDate toLocalDate(String value) throws SQLException { + if (value == null || value.length() < 10) + throw stringConversionException(value, null); + return LocalDate.parse(value.substring(0, 10)); + } + + @Override + public String getTypeName() { + return "Date"; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/DoubleType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/DoubleType.java new file mode 100644 index 0000000000..ef25fbe360 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/DoubleType.java @@ -0,0 +1,62 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.util.Map; + +public class DoubleType implements TypeHelper { + + public static final DoubleType INSTANCE = new DoubleType(); + + private DoubleType() { + + } + + @Override + public Double fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return (double) 0; + } + if (value instanceof Double) { + return (Double) value; + } else if (value instanceof String) { + return asDouble((String) value); + } else if (value instanceof Number) { + return asDouble((Number) value); + } else { + throw objectConversionException(value); + } + } + + private Double asDouble(String value) throws SQLException { + try { + return Double.valueOf(value); + } catch (NumberFormatException nfe) { + throw stringConversionException(value, nfe); + } + } + + private Double asDouble(Number value) throws SQLException { + return value.doubleValue(); + } + + @Override + public String getTypeName() { + return "Double"; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ElasticsearchType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ElasticsearchType.java new file mode 100644 index 0000000000..eebaa1914e --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ElasticsearchType.java @@ -0,0 +1,221 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.JDBCType; +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Enum for Elasticsearch Data Types. + *

+ * Each type encapsulates mapping to its corresponding JDBC Type and + * associated properties. + *

+ * Where required, an Elasticsearch data type is mapped to the least + * precise {@link JDBCType} that can accurately represent it based on + * the following: + * + *

    + *
  1. + * Precision of a {@link JDBCType} is taken to be the + * precision of the corresponding Java type mentioned in + * the JDBC Spec - Table B-1: JDBC Types Mapped to Java Types + *
  2. + *
  3. + * Precision of Elasticsearch types is based on + * Elasticsearch Reference > Mapping > Field datatypes + *
  4. + *
+ */ +public enum ElasticsearchType { + + // Precision values based on number of decimal digits supported by Java types + // Display size values based on precision plus additional buffer for visual representation + // - Java long is a 64-bit integral value ~ 19 decimal digits + // - Java double has 53-bit precision ~ 15 decimal digits + // - Java float has 24-bit precision ~ 7 decimal digits + // - scaled_float is internally an elasticsearch long, but treated as Java Double here + // - ISO8601 representation of DateTime values as yyyy-mm-ddThh:mm:ss.mmmZ ~ 24 chars + + // Some Types not fully supported yet: VARBINARY, GEO_POINT, NESTED + BOOLEAN(JDBCType.BOOLEAN, Boolean.class, 1, 1, false), + BYTE(JDBCType.TINYINT, Byte.class, 3, 5, true), + SHORT(JDBCType.SMALLINT, Short.class, 5, 6, true), + INTEGER(JDBCType.INTEGER, Integer.class, 10, 11, true), + LONG(JDBCType.BIGINT, Long.class, 19, 20, true), + HALF_FLOAT(JDBCType.REAL, Float.class, 7, 15, true), + FLOAT(JDBCType.REAL, Float.class, 7, 15, true), + DOUBLE(JDBCType.DOUBLE, Double.class, 15, 25, true), + SCALED_FLOAT(JDBCType.DOUBLE, Double.class, 15, 25, true), + KEYWORD(JDBCType.VARCHAR, String.class, 256, 0, false), + TEXT(JDBCType.VARCHAR, String.class, Integer.MAX_VALUE, 0, false), + IP(JDBCType.VARCHAR, String.class, 15, 0, false), + NESTED(JDBCType.STRUCT, null, 0, 0, false), + OBJECT(JDBCType.STRUCT, null, 0, 0, false), + DATE(JDBCType.TIMESTAMP, Timestamp.class, 24, 24, false), + NULL(JDBCType.NULL, null, 0, 0, false), + UNSUPPORTED(JDBCType.OTHER, null, 0, 0, false); + + private static final Map jdbcTypeToESTypeMap; + + static { + // Map JDBCType to corresponding ElasticsearchType + jdbcTypeToESTypeMap = new HashMap<>(); + jdbcTypeToESTypeMap.put(JDBCType.NULL, NULL); + jdbcTypeToESTypeMap.put(JDBCType.BOOLEAN, BOOLEAN); + jdbcTypeToESTypeMap.put(JDBCType.TINYINT, BYTE); + jdbcTypeToESTypeMap.put(JDBCType.SMALLINT, SHORT); + jdbcTypeToESTypeMap.put(JDBCType.INTEGER, INTEGER); + jdbcTypeToESTypeMap.put(JDBCType.BIGINT, LONG); + jdbcTypeToESTypeMap.put(JDBCType.DOUBLE, DOUBLE); + jdbcTypeToESTypeMap.put(JDBCType.REAL, FLOAT); + jdbcTypeToESTypeMap.put(JDBCType.FLOAT, DOUBLE); + jdbcTypeToESTypeMap.put(JDBCType.VARCHAR, KEYWORD); + jdbcTypeToESTypeMap.put(JDBCType.TIMESTAMP, DATE); + jdbcTypeToESTypeMap.put(JDBCType.DATE, DATE); + } + + /** + * Elasticsearch designated type name + */ + private final String typeName; + + /** + * {@link JDBCType} that this type maps to + */ + private final JDBCType jdbcType; + + /** + * Java class that the type maps to + */ + private final String javaClassName; + + /** + * Maximum number of characters that may be needed to represent the + * values contained by this type. The value is determined based on + * description provided in {@link java.sql.ResultSetMetaData#getPrecision(int)}. + * + *
    + *
  • + * For numeric types the value indicates the max number of + * decimal digits that are possible with the corresponding + * Java type + *
  • + *
  • + * For character types the value indicates the length of + * potential character data in the type + *
  • + *
  • + * Fot Date-Time types the value indicates the characters + * needed to represent the value in zero offset (UTC) ISO8601 + * format with millisecond fractional time i.e. + * yyyy-mm-ddThh:mm:ss.mmmZ + *
  • + * + *
+ */ + private final int precision; + + /** + * Display Size as per {@link java.sql.ResultSetMetaData#getColumnDisplaySize(int)} + */ + private final int displaySize; + + /** + * True if the type holds signed numerical values + */ + private final boolean isSigned; + + ElasticsearchType(JDBCType jdbcType, Class javaClass, int precision, + int displaySize, boolean isSigned) { + this.typeName = name().toLowerCase(Locale.ROOT); + this.jdbcType = jdbcType; + this.javaClassName = javaClass == null ? null : javaClass.getName(); + this.precision = precision; + this.displaySize = displaySize; + this.isSigned = isSigned; + } + + public static ElasticsearchType fromJdbcType(JDBCType jdbcType) { + if (!jdbcTypeToESTypeMap.containsKey(jdbcType)) { + throw new IllegalArgumentException("Unsupported JDBC type \"" + jdbcType + "\""); + } + return jdbcTypeToESTypeMap.get(jdbcType); + } + + /** + * Returns the {@link ElasticsearchType} for the specified elasticsearch + * data type name. + */ + public static ElasticsearchType fromTypeName(String typeName) { + return fromTypeName(typeName, false); + } + + /** + * Parses a specified Elasticsearch type name to determine + * the corresponding {@link ElasticsearchType} + * + * @param typeName The Elasticsearch Type name to parse + * @param errorOnUnknownType If true, the method throws an + * {@link UnrecognizedElasticsearchTypeException} + * if the type name specified is not recognized. + * + * @return the {@link ElasticsearchType} value corresponding to the + * specified type name + */ + public static ElasticsearchType fromTypeName(String typeName, boolean errorOnUnknownType) { + try { + return ElasticsearchType.valueOf(typeName.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException iae) { + if (errorOnUnknownType) + throw new UnrecognizedElasticsearchTypeException("Unknown Type: \"" + typeName + "\"", iae); + else + return UNSUPPORTED; + } + } + + public int sqlTypeNumber() { + return jdbcType.getVendorTypeNumber(); + } + + public boolean isSigned() { + return isSigned; + } + + public String getTypeName() { + return typeName; + } + + public JDBCType getJdbcType() { + return jdbcType; + } + + public String getJavaClassName() { + return javaClassName; + } + + public int getPrecision() { + return precision; + } + + public int getDisplaySize() { + return displaySize; + } +} \ No newline at end of file diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/FloatType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/FloatType.java new file mode 100644 index 0000000000..d53e2c9452 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/FloatType.java @@ -0,0 +1,67 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.util.Map; + +public class FloatType extends NumberType { + + public static final FloatType INSTANCE = new FloatType(); + + private FloatType() { + + } + + @Override + public Float fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return (float) 0; + } + if (value instanceof Float) { + return (Float) value; + } else if (value instanceof String) { + return asFloat((String) value); + } else if (value instanceof Number) { + return asFloat((Number) value); + } else { + throw objectConversionException(value); + } + } + + private Float asFloat(String value) throws SQLException { + try { + return asFloat(Double.valueOf(value)); + } catch (NumberFormatException nfe) { + throw stringConversionException(value, nfe); + } + } + + private Float asFloat(Number value) throws SQLException { + return (float) getDoubleValueWithinBounds(value, -Float.MAX_VALUE, Float.MAX_VALUE); + } + + @Override + public String getTypeName() { + return "Float"; + } + + @Override + public boolean roundOffValue() { + return false; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/IntegerType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/IntegerType.java new file mode 100644 index 0000000000..74fc6a674d --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/IntegerType.java @@ -0,0 +1,63 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.util.Map; + +public class IntegerType extends NumberType { + + public static final IntegerType INSTANCE = new IntegerType(); + + private IntegerType() { + + } + + @Override + public Integer fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return 0; + } + if (value instanceof Integer) { + return (Integer) value; + } else if (value instanceof String) { + return asInteger((String) value); + } else if (value instanceof Number) { + return asInteger((Number) value); + } else { + throw objectConversionException(value); + } + } + + private Integer asInteger(String value) throws SQLException { + try { + return asInteger(Double.valueOf(value)); + } catch (NumberFormatException nfe) { + throw stringConversionException(value, nfe); + } + } + + private Integer asInteger(Number value) throws SQLException { + return (int) getDoubleValueWithinBounds(value, Integer.MIN_VALUE, Integer.MAX_VALUE); + } + + @Override + public String getTypeName() { + return "Integer"; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/LongType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/LongType.java new file mode 100644 index 0000000000..93b1f9d037 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/LongType.java @@ -0,0 +1,73 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.sql.SQLException; +import java.util.Map; + +public class LongType extends NumberType { + + public static final LongType INSTANCE = new LongType(); + + private LongType() { + + } + + @Override + public Long fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return (long) 0; + } + if (value instanceof Long) { + return (Long) value; + } else if (value instanceof String) { + return asLong((String) value); + } else if (value instanceof Number) { + return asLong((Number) value); + } else { + throw objectConversionException(value); + } + } + + private Long asLong(String value) throws SQLException { + try { + if (value.length() > 14) { + // more expensive conversion but + // needed to preserve precision for such large numbers + BigDecimal bd = new BigDecimal(value); + bd = bd.setScale(0, RoundingMode.HALF_UP); + return bd.longValueExact(); + } else { + return asLong(Double.valueOf(value)); + } + + } catch (ArithmeticException | NumberFormatException ex) { + throw stringConversionException(value, ex); + } + } + + private Long asLong(Number value) throws SQLException { + return (long) getDoubleValueWithinBounds(value, Long.MIN_VALUE, Long.MAX_VALUE); + } + + @Override + public String getTypeName() { + return "Long"; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/NumberType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/NumberType.java new file mode 100644 index 0000000000..114cb553d9 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/NumberType.java @@ -0,0 +1,58 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; + +public abstract class NumberType implements TypeHelper { + + /** + * Returns a {@link Number} type value as a {@link Double} if it falls + * within a specified min and max range. + * + * @param value Number value to parse + * @param minValue minimum value possible + * @param maxValue maximum value possible + * + * @return The double value corresponding to the specified Number if + * it falls within the specified min and max range. + * + * @throws SQLException If the double value is outside the possible + * range specified + */ + double getDoubleValueWithinBounds(Number value, double minValue, double maxValue) throws SQLException { + double doubleValue = value.doubleValue(); + + if (roundOffValue()) + doubleValue = Math.round(doubleValue); + + if (doubleValue > maxValue || doubleValue < minValue) + throw valueOutOfRangeException(value); + + return doubleValue; + } + + /** + * Whether to round off a fractional value during cross conversion + * from a different type to this type. + * + * @return true, to apply rounding off, false otherwise + */ + public boolean roundOffValue() { + return true; + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ShortType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ShortType.java new file mode 100644 index 0000000000..cfeff00920 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/ShortType.java @@ -0,0 +1,68 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.util.Map; + +public class ShortType extends NumberType { + + public static final ShortType INSTANCE = new ShortType(); + + private ShortType() { + + } + + @Override + public Short fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return (short) 0; + } + if (value instanceof Short) { + return (Short) value; + } else if (value instanceof String) { + return asShort((String) value); + } else if (value instanceof Number) { + return asShort((Number) value); + } else { + throw objectConversionException(value); + } + } + + private Short asShort(String value) throws SQLException { + try { + return asShort(Double.valueOf(value)); + } catch (NumberFormatException nfe) { + throw stringConversionException(value, nfe); + } + } + + private Short asShort(Number value) throws SQLException { + return (short) getDoubleValueWithinBounds(value, Short.MIN_VALUE, Short.MAX_VALUE); + } + + @Override + public String getTypeName() { + return "Short"; + } + + public static void main(String[] args) { + Short.valueOf("45.50"); + System.out.println(Math.round(100.45D)); + System.out.println(Math.round(100.95f)); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/StringType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/StringType.java new file mode 100644 index 0000000000..18222176c4 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/StringType.java @@ -0,0 +1,41 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.util.Map; + +public class StringType implements TypeHelper { + + public static final StringType INSTANCE = new StringType(); + + private StringType() { + + } + + @Override + public String getTypeName() { + return "String"; + } + + @Override + public String fromValue(Object value, Map conversionParams) { + if (value == null) + return null; + else + return String.valueOf(value); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TimestampType.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TimestampType.java new file mode 100644 index 0000000000..4f97788874 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TimestampType.java @@ -0,0 +1,112 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLException; +import java.sql.Timestamp; +import java.time.LocalDateTime; +import java.util.Calendar; +import java.util.Map; +import java.util.TimeZone; + +/** + * Supports returning a java.sql.Timestamp from a String in the + * JDBC escape format, or a Number value indicating epoch time in millis. + */ +public class TimestampType implements TypeHelper { + + public static final TimestampType INSTANCE = new TimestampType(); + + private TimestampType() { + + } + + @Override + public java.sql.Timestamp fromValue(Object value, Map conversionParams) throws SQLException { + if (value == null) { + return null; + } + Calendar calendar = conversionParams != null ? (Calendar) conversionParams.get("calendar") : null; + if (value instanceof Timestamp) { + return asTimestamp((Timestamp) value, calendar); + } else if (value instanceof String) { + return asTimestamp((String) value, calendar); + } else if (value instanceof Number) { + return asTimestamp((Number) value); + } else { + throw objectConversionException(value); + } + } + + public java.sql.Timestamp asTimestamp(Timestamp value, Calendar calendar) throws SQLException { + if (calendar == null) { + return value; + } else { + return localDateTimeToTimestamp(value.toLocalDateTime(), calendar); + } + } + + private Timestamp localDateTimeToTimestamp(LocalDateTime ldt, Calendar calendar) { + calendar.set(ldt.getYear(), ldt.getMonthValue()-1, ldt.getDayOfMonth(), + ldt.getHour(), ldt.getMinute(), ldt.getSecond()); + calendar.set(Calendar.MILLISECOND, ldt.getNano()/1000000); + + return new Timestamp(calendar.getTimeInMillis()); + } + + public java.sql.Timestamp asTimestamp(String value, Calendar calendar) throws SQLException { + try { + // Make some effort to understand ISO format + if (value.length() > 11 && value.charAt(10) == 'T') { + value = value.replace('T', ' '); + } + // Timestamp.valueOf() does not like timezone information + if (value.length() > 23) { + if (value.length() == 24 && value.charAt(23) == 'Z') { + value = value.substring(0, 23); + } + else if (value.charAt(23) == '+' || value.charAt(23) == '-') { + // 'calendar' parameter takes precedence + if (calendar == null) { + calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT" + value.substring(23))); + } + value = value.substring(0, 23); + } + } + + if (calendar == null) { + return Timestamp.valueOf(value); + } else { + Timestamp ts = Timestamp.valueOf(value); + return localDateTimeToTimestamp(ts.toLocalDateTime(), calendar); + } + + } catch (IllegalArgumentException iae) { + throw stringConversionException(value, iae); + } + } + + public java.sql.Timestamp asTimestamp(Number value) throws SQLException { + return new java.sql.Timestamp(value.longValue()); + } + + @Override + public String getTypeName() { + return "Timestamp"; + } + +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeConverter.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeConverter.java new file mode 100644 index 0000000000..4e155bdfd0 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeConverter.java @@ -0,0 +1,52 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.util.Map; + +public interface TypeConverter { + + /** + * This method allows retrieving a column value as an instance of + * a different class than the default Java class that the column's + * JDBCType maps to. + *

+ * This implements the aspect of the JDBC spec that specifies + * multiple JDBCTypes on which a ResultSet getter method may be called. + * + * @param Type of the Java Class + * @param value Column value + * @param clazz Instance of the Class to which the value needs to be + * converted + * @param conversionParams Optional conversion parameters to use in + * the conversion + * + * @return Column value as an instance of type T + * + * @throws SQLException if the conversion is not supported or the + * conversion operation fails. + */ + T convert(Object value, Class clazz, Map conversionParams) throws SQLException; + + default SQLDataException objectConversionException(Object value, Class clazz) { + return new SQLDataException(String.format( + "Can not convert object '%s' of type '%s' to type '%s'", + value, value.getClass().getName(), clazz.getName())); + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeConverters.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeConverters.java new file mode 100644 index 0000000000..08e4b65e32 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeConverters.java @@ -0,0 +1,260 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * For columns that are mapped to a certain JDBCType, the + * TypeConverter instances here provide utility functions + * to retrieve the values in these columns as instances + * of specified Java classes. + * + * These conversions are needed for example when ResultSet.getString() + * is invoked on a column that is internally a JDBCType.FLOAT or + * ResultSet.getFloat() is invoked on a column that is internally a + * JDBCType.VARCHAR + */ +public class TypeConverters { + + private static Map tcMap = new HashMap<>(); + + static { + // TODO - JDBCType.VARBINARY - byte[] -> Try ES data type + tcMap.put(JDBCType.TIMESTAMP, new TimestampTypeConverter()); + tcMap.put(JDBCType.DATE, new DateTypeConverter()); + + tcMap.put(JDBCType.FLOAT, new FloatTypeConverter()); + tcMap.put(JDBCType.REAL, new RealTypeConverter()); + tcMap.put(JDBCType.DOUBLE, new DoubleTypeConverter()); + + tcMap.put(JDBCType.VARCHAR, new VarcharTypeConverter()); + + tcMap.put(JDBCType.BOOLEAN, new BooleanTypeConverter()); + + tcMap.put(JDBCType.TINYINT, new TinyIntTypeConverter()); + tcMap.put(JDBCType.SMALLINT, new SmallIntTypeConverter()); + tcMap.put(JDBCType.INTEGER, new IntegerTypeConverter()); + tcMap.put(JDBCType.BIGINT, new BigIntTypeConverter()); + } + + public static TypeConverter getInstance(JDBCType jdbcType) { + return tcMap.get(jdbcType); + } + + public static class TimestampTypeConverter extends BaseTypeConverter { + + private static final Set supportedJavaClasses = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList( + String.class, Timestamp.class, java.sql.Date.class + ))); + + private TimestampTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Timestamp.class; + } + + @Override + public Set getSupportedJavaClasses() { + return supportedJavaClasses; + } + + } + + public static class DateTypeConverter extends BaseTypeConverter { + + private static final Set supportedJavaClasses = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList( + String.class, Timestamp.class, java.sql.Date.class + ))); + + private DateTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Date.class; + } + + @Override + public Set getSupportedJavaClasses() { + return supportedJavaClasses; + } + + } + + public static class VarcharTypeConverter extends BaseTypeConverter { + + private static final Set supportedJavaClasses = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList( + String.class, Timestamp.class, java.sql.Date.class, + Byte.class, Short.class, Integer.class, Long.class, + Boolean.class + ))); + + VarcharTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return String.class; + } + + @Override + public Set getSupportedJavaClasses() { + return supportedJavaClasses; + } + } + + public static class DoubleTypeConverter extends BaseTypeConverter { + + private static final Set supportedJavaClasses = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList( + String.class, Float.class, Double.class, + Byte.class, Short.class, Integer.class, Long.class + ))); + + private DoubleTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Double.class; + } + + @Override + public Set getSupportedJavaClasses() { + return supportedJavaClasses; + } + } + + public static class RealTypeConverter extends DoubleTypeConverter { + + RealTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Float.class; + } + } + + public static class FloatTypeConverter extends DoubleTypeConverter { + + FloatTypeConverter() { + + } + } + + public static class BooleanTypeConverter extends BaseTypeConverter { + + private static final Set supportedJavaClasses = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList( + Boolean.class, String.class + ))); + + BooleanTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Boolean.class; + } + + @Override + public Set getSupportedJavaClasses() { + return supportedJavaClasses; + } + } + + public static class IntegerTypeConverter extends BaseTypeConverter { + + private static final Set supportedJavaClasses = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList( + Float.class, Double.class, + Byte.class, Short.class, Integer.class, Long.class, + String.class + ))); + + IntegerTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Integer.class; + } + + @Override + public Set getSupportedJavaClasses() { + return supportedJavaClasses; + } + } + + public static class BigIntTypeConverter extends IntegerTypeConverter { + + BigIntTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Long.class; + } + } + + public static class TinyIntTypeConverter extends IntegerTypeConverter { + + TinyIntTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Byte.class; + } + } + + public static class SmallIntTypeConverter extends IntegerTypeConverter { + + SmallIntTypeConverter() { + + } + + @Override + public Class getDefaultJavaClass() { + return Short.class; + } + } +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeHelper.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeHelper.java new file mode 100644 index 0000000000..f0e9e4ad71 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypeHelper.java @@ -0,0 +1,73 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.util.Map; + +/** + * Provides conversion of Object instances to Java type T where possible. + *

+ * Used by {@link TypeConverter} instances to perform object cross conversions. + * + * @param The Java type to which conversion is provided. + */ +public interface TypeHelper { + default SQLDataException stringConversionException(String value, Throwable cause) { + if (cause != null) + return new SQLDataException(String.format("Can not parse %s as a %s", value, getTypeName()), cause); + else + return new SQLDataException(String.format("Can not parse %s as a %s", value, getTypeName())); + } + + default SQLDataException objectConversionException(Object value) { + if (value == null) { + return new SQLDataException( + String.format("Can not return null value as a %s", getTypeName())); + } else { + return new SQLDataException( + String.format("Can not return value of type %s as a %s", + value.getClass().getName(), getTypeName())); + } + } + + default SQLDataException valueOutOfRangeException(Object value) { + return new SQLDataException( + String.format("Object value %s out of range for type %s", value, getTypeName())); + + } + + /** + * Returns an Object as an equivalent instance of type T + * + * @param value Object instance to convert + * @param conversionParams Optional parameters to use for conversion + * + * @return instance of type T + * + * @throws SQLException if there is a problem in carrying out the conversion + */ + T fromValue(Object value, Map conversionParams) throws SQLException; + + /** + * Indicative name of the type T + * + * @return + */ + String getTypeName(); +} diff --git a/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/UnrecognizedElasticsearchTypeException.java b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/UnrecognizedElasticsearchTypeException.java new file mode 100644 index 0000000000..3b4e32e486 --- /dev/null +++ b/sql-jdbc/src/main/java/com/amazon/opendistroforelasticsearch/jdbc/types/UnrecognizedElasticsearchTypeException.java @@ -0,0 +1,35 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +public class UnrecognizedElasticsearchTypeException extends IllegalArgumentException { + + public UnrecognizedElasticsearchTypeException() { + } + + public UnrecognizedElasticsearchTypeException(String s) { + super(s); + } + + public UnrecognizedElasticsearchTypeException(String message, Throwable cause) { + super(message, cause); + } + + public UnrecognizedElasticsearchTypeException(Throwable cause) { + super(cause); + } +} diff --git a/sql-jdbc/src/main/resources/META-INF/services/java.sql.Driver b/sql-jdbc/src/main/resources/META-INF/services/java.sql.Driver new file mode 100644 index 0000000000..ff2ddb8960 --- /dev/null +++ b/sql-jdbc/src/main/resources/META-INF/services/java.sql.Driver @@ -0,0 +1 @@ +com.amazon.opendistroforelasticsearch.jdbc.Driver \ No newline at end of file diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ConnectionTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ConnectionTests.java new file mode 100644 index 0000000000..33c3e7e4d8 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ConnectionTests.java @@ -0,0 +1,271 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.AuthConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionPropertyException; +import com.amazon.opendistroforelasticsearch.jdbc.config.PasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.RegionConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.RequestCompressionConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.UserConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocol; +import com.amazon.opendistroforelasticsearch.jdbc.test.PerTestWireMockServerExtension; +import com.amazon.opendistroforelasticsearch.jdbc.test.WireMockServerHelpers; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.QueryMock; +import com.amazonaws.auth.SdkClock; +import com.github.tomakehurst.wiremock.WireMockServer; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ExtendWith(PerTestWireMockServerExtension.class) +class ConnectionTests implements WireMockServerHelpers { + + @Test + void testGetConnection(final WireMockServer mockServer) throws SQLException { + mockServer.stubFor(get(urlEqualTo("/")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + Driver driver = new Driver(); + Connection con = Assertions.assertDoesNotThrow( + () -> driver.connect(getBaseURLForMockServer(mockServer), (Properties) null)); + + assertConnectionOpen(con); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + @Test + void testConnectWithBasicAuth(final WireMockServer mockServer) throws ConnectionPropertyException, SQLException { + // HTTP Client Basic Auth is not pre-emptive, set up an Auth Challenge + mockServer.stubFor(get(urlEqualTo("/")) + .willReturn(aResponse() + .withStatus(401) + .withHeader("WWW-Authenticate", "Basic realm=\"Auth Realm\"") + )); + + // Response if request's basic auth matches expectation + mockServer.stubFor(get(urlEqualTo("/")) + .withBasicAuth("user-name", "password-$#@!*%^123") + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + Properties props = new Properties(); + props.put(AuthConnectionProperty.KEY, "basic"); + props.put(UserConnectionProperty.KEY, "user-name"); + props.put(PasswordConnectionProperty.KEY, "password-$#@!*%^123"); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(getBaseURLForMockServer(mockServer), props)); + + mockServer.verify(2, getRequestedFor(urlEqualTo("/")) + .withHeader("Accept", equalTo("application/json"))); + + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + @Test + void testConnectDefaultAuthWithUsername(final WireMockServer mockServer) throws SQLException { + // In the absence of explicit auth type, Basic is used if a username/password + // is specified + + // HTTP Client Basic Auth is not pre-emptive, set up an Auth Challenge + mockServer.stubFor(get(urlEqualTo("/")) + .willReturn(aResponse() + .withStatus(401) + .withHeader("WWW-Authenticate", "Basic realm=\"Auth Realm\"") + )); + + // Response if request's basic auth matches expectation + mockServer.stubFor(get(urlEqualTo("/")) + .withBasicAuth("user-name", "password-$#@!*%^123") + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + Properties props = new Properties(); + props.put(UserConnectionProperty.KEY, "user-name"); + props.put(PasswordConnectionProperty.KEY, "password-$#@!*%^123"); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(getBaseURLForMockServer(mockServer), props)); + + mockServer.verify(2, getRequestedFor(urlEqualTo("/")) + .withHeader("Accept", equalTo("application/json"))); + + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + @Test + void testConnectWithRequestCompression(final WireMockServer mockServer) throws SQLException { + // Respond only if request mentions it accepts gzip + // i.e. expected behavior when requestCompression is set + mockServer.stubFor( + get(urlEqualTo("/")) + .withHeader("Accept-Encoding", equalTo("gzip,deflate")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + Properties props = new Properties(); + props.setProperty(RequestCompressionConnectionProperty.KEY, "true"); + + // WireMockServer returns a gzip response by default + // if Accept-Enconding: gzip,deflate is present in the request + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(getBaseURLForMockServer(mockServer), props)); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + @Test + void testConnectWithoutRequestCompression(final WireMockServer mockServer) throws ConnectionPropertyException, SQLException { + // Respond successfully only if request does not mention it accepts gzip + // i.e. expected behavior when requestCompression is not set + mockServer.stubFor( + get(urlEqualTo("/")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + mockServer.stubFor( + get(urlEqualTo("/")) + .withHeader("Accept-Encoding", equalTo("gzip,deflate")) + .willReturn(aResponse() + .withStatus(400) + .withStatusMessage("Request seeks gzip response"))); + + // explicitly disable requestCompression + Properties props = new Properties(); + props.setProperty(RequestCompressionConnectionProperty.KEY, "false"); + + // WireMockServer returns a gzip response by default + // if Accept-Enconding: gzip,deflate is present in the request + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(getBaseURLForMockServer(mockServer), props)); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + @Test + void testConnectWithDefaultRequestCompression(final WireMockServer mockServer) throws ConnectionPropertyException, SQLException { + // Respond successfully only if request does not mention it accepts gzip + // i.e. expected behavior when requestCompression is not set + mockServer.stubFor( + get(urlEqualTo("/")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + // return HTTP 400 if request contains Accept-Encoding: gzip + mockServer.stubFor( + get(urlEqualTo("/")) + .withHeader("Accept-Encoding", equalTo("gzip,deflate")) + .willReturn(aResponse() + .withStatus(400) + .withStatusMessage("Request seeks gzip response by default"))); + + // empty Properties - expect default behavior is to not set requestCompression + Properties props = new Properties(); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(getBaseURLForMockServer(mockServer), props)); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + // TODO - find a way to test this differently? + @Disabled("currently this does not work because Host header value " + + "is included in signature which is of 'localhost:port' form " + + "and since the port value differs every run of the test, the " + + "signature generated is different from the canned response " + + "we're testing against") + @Test + void testConnectWithAwsSigV4Auth(final WireMockServer mockServer) throws SQLException { + mockServer.stubFor(get(urlEqualTo("/")) + .withHeader("Authorization", + equalTo("AWS4-HMAC-SHA256 " + + "Credential=AKIAJUXF4LQLB55YQ73A/20181119/us-east-1/es/aws4_request, " + + "SignedHeaders=host;user-agent;x-amz-date, " + + "Signature=80088eaaa2e7766ccee12014a5ab80d323635347157ea29935e990d34bcbff12")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + + Properties props = new Properties(); + props.setProperty(AuthConnectionProperty.KEY, "aws_sigv4"); + props.setProperty(RegionConnectionProperty.KEY, "us-east-1"); + + // Ensure AWS Signing uses same date/time as was used to generate + // the signatures in this test case + SdkClock.Instance.set(new SdkClock.MockClock(1542653839129L)); + + Connection con = Assertions.assertDoesNotThrow(() -> + new Driver().connect(getBaseURLForMockServer(mockServer), props)); + + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + con.close(); + } + + @ParameterizedTest + @ValueSource(strings = { + "/context/path", + "/context/path/", + "//context/path", + "//context/path/", + }) + void testConnectionWithContextPath(final String userContextPath, final WireMockServer mockServer) + throws SQLException, IOException { + QueryMock.NycTaxisQueryMock queryMock = new QueryMock.NycTaxisQueryMock(); + queryMock.setupMockServerStub(mockServer, "/context/path/", + "/context/path"+ JsonHttpProtocol.DEFAULT_SQL_CONTEXT_PATH+"?format=jdbc"); + + Driver driver = new Driver(); + Connection con = Assertions.assertDoesNotThrow( + () -> driver.connect(getURLForMockServerWithContext(mockServer, userContextPath), (Properties) null)); + + assertConnectionOpen(con); + queryMock.assertConnectionResponse((ElasticsearchConnection) con); + + Statement st = con.createStatement(); + + Assertions.assertDoesNotThrow(() -> st.executeQuery(queryMock.getSql())); + + con.close(); + } + + private void assertConnectionOpen(final Connection con) { + boolean closed = assertDoesNotThrow(con::isClosed); + assertTrue(!closed, "Connection is closed"); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/CursorTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/CursorTests.java new file mode 100644 index 0000000000..8927d1944e --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/CursorTests.java @@ -0,0 +1,168 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.ColumnMetaData; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Cursor; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Row; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Schema; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonQueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class CursorTests { + + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 7, 10, 11}) + void testCursorNext(final int rowCount) { + Schema schema = new Schema(Arrays.asList( + toColumnMetaData("rownum", ElasticsearchType.INTEGER.getTypeName()))); + + List rows = new ArrayList<>(); + for (int rowNum = 1; rowNum <= rowCount; rowNum++) { + rows.add(toRow(rowNum)); + } + + Cursor cursor = new Cursor(schema, rows); + int cursorRowCount = 0; + + while (cursor.next()) { + cursorRowCount++; + assertEquals(1, cursor.getColumnCount(), "Unexpected column count. Row number: " + cursorRowCount); + assertEquals(cursorRowCount, cursor.getColumn(0), "Unexpected row number indicator"); + } + + assertEquals(rowCount, cursorRowCount, "Unexpected number of rows retrieved from cursor."); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 7, 10, 11}) + void testCursorNextWithMultipleColumns(final int rowCount) { + + final String STRINGVAL = "string"; + final long LONGVAL = 12345678901234567L; + final double DOUBLEVAL = 100.25; + + final List columnMetaDatas = Arrays.asList( + toColumnMetaData("rownum", ElasticsearchType.INTEGER.getTypeName()), + toColumnMetaData("stringval", ElasticsearchType.TEXT.getTypeName()), + toColumnMetaData("longval", ElasticsearchType.LONG.getTypeName()), + toColumnMetaData("doubleval", ElasticsearchType.DOUBLE.getTypeName()) + ); + + Schema schema = new Schema(columnMetaDatas); + + List rows = new ArrayList<>(); + for (int rowNum = 1; rowNum <= rowCount; rowNum++) { + rows.add(toRow(rowNum, STRINGVAL, LONGVAL, DOUBLEVAL)); + } + + Cursor cursor = new Cursor(schema, rows); + int cursorRowCount = 0; + + assertEquals(Integer.valueOf(0), cursor.findColumn("rownum"), "Mismatch in locating column 'rownum'"); + assertEquals(Integer.valueOf(1), cursor.findColumn("stringval"), "Mismatch in locating column 'stringval'"); + assertEquals(Integer.valueOf(2), cursor.findColumn("longval"), "Mismatch in locating column 'longval'"); + assertEquals(Integer.valueOf(3), cursor.findColumn("doubleval"), "Mismatch in locating column 'doubleval'"); + assertNull(cursor.findColumn("unknown"), "Valid index for unknown column label"); + + while (cursor.next()) { + cursorRowCount++; + assertThrows(IllegalArgumentException.class, () -> cursor.getColumn(-1)); + assertEquals(columnMetaDatas.size(), cursor.getColumnCount(), "Unexpected column count. Row number: " + cursorRowCount); + assertEquals(cursorRowCount, cursor.getColumn(0), "Unexpected row number indicator"); + assertEquals(STRINGVAL, cursor.getColumn(1), "Unexpected column value. Row number: " + cursorRowCount); + assertEquals(LONGVAL, cursor.getColumn(2), "Unexpected column value. Row number: " + cursorRowCount); + assertEquals(DOUBLEVAL, cursor.getColumn(3), "Unexpected column value. Row number: " + cursorRowCount); + assertThrows(IllegalArgumentException.class, () -> cursor.getColumn(4)); + } + + assertEquals(rowCount, cursorRowCount, "Unexpected number of rows retrieved from cursor."); + } + + @ParameterizedTest + @ValueSource(ints = {4, 7, 10, 11}) + void testCursorFindColumn(final int rowCount) { + final String STRINGVAL = "string"; + final long LONGVAL = 12345678901234567L; + final double DOUBLEVAL = 100.25; + + final List columnMetaDatas = Arrays.asList( + toColumnMetaData("rownum", ElasticsearchType.INTEGER.getTypeName()), + toColumnMetaData("stringval", ElasticsearchType.TEXT.getTypeName(), "stringlabel"), + toColumnMetaData("longval", ElasticsearchType.LONG.getTypeName()), + toColumnMetaData("doubleval", ElasticsearchType.DOUBLE.getTypeName(), "doubleLabel") + ); + + Schema schema = new Schema(columnMetaDatas); + + List rows = new ArrayList<>(); + for (int rowNum = 1; rowNum <= rowCount; rowNum++) { + rows.add(toRow(rowNum, STRINGVAL, LONGVAL, DOUBLEVAL)); + } + + Cursor cursor = new Cursor(schema, rows); + int cursorRowCount = 0; + + assertEquals(Integer.valueOf(0), cursor.findColumn("rownum"), "Mismatch in locating column 'rownum'"); + assertNull(cursor.findColumn("stringval"), "column lookup succeeded by name - doubleval"); + assertEquals(Integer.valueOf(1), cursor.findColumn("stringlabel"), "Mismatch in locating column 'stringlabel'"); + assertEquals(Integer.valueOf(2), cursor.findColumn("longval"), "Mismatch in locating column 'longval'"); + assertEquals(Integer.valueOf(3), cursor.findColumn("doubleLabel"), "Mismatch in locating column 'doubleLabel'"); + assertNull(cursor.findColumn("doubleval"), "column lookup succeeded by name - doubleval"); + assertNull(cursor.findColumn("unknown"), "Valid index for unknown column label"); + + while (cursor.next()) { + cursorRowCount++; + assertThrows(IllegalArgumentException.class, () -> cursor.getColumn(-1)); + assertEquals(columnMetaDatas.size(), cursor.getColumnCount(), "Unexpected column count. Row number: " + cursorRowCount); + assertEquals(cursorRowCount, columnObject(cursor, "rownum"), "Unexpected row number indicator"); + assertEquals(STRINGVAL, columnObject(cursor, "stringlabel"), "Unexpected column value. Row number: " + cursorRowCount); + assertEquals(LONGVAL, columnObject(cursor, "longval"), "Unexpected column value. Row number: " + cursorRowCount); + assertEquals(DOUBLEVAL, columnObject(cursor, "doubleLabel"), "Unexpected column value. Row number: " + cursorRowCount); + assertThrows(IllegalArgumentException.class, () -> cursor.getColumn(4)); + } + + assertEquals(rowCount, cursorRowCount, "Unexpected number of rows retrieved from cursor."); + } + + private Object columnObject(Cursor cursor, String columnLabel) { + return cursor.getColumn(cursor.findColumn(columnLabel)); + } + + private Row toRow(Object... values) { + return new Row(Arrays.asList(values)); + } + + private ColumnMetaData toColumnMetaData(String name, String type) { + return toColumnMetaData(name, type, null); + + } + + private ColumnMetaData toColumnMetaData(String name, String type, String label) { + return new ColumnMetaData(new JsonQueryResponse.SchemaEntry(name, type, label)); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DataSourceTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DataSourceTests.java new file mode 100644 index 0000000000..d0e8e46a22 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DataSourceTests.java @@ -0,0 +1,207 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.auth.AuthenticationType; +import com.amazon.opendistroforelasticsearch.jdbc.config.AuthConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.config.HostConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.LoginTimeoutConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PortConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.UserConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.test.PerTestWireMockServerExtension; +import com.amazon.opendistroforelasticsearch.jdbc.test.WireMockServerHelpers; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.QueryMock; +import com.github.tomakehurst.wiremock.WireMockServer; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +import javax.sql.DataSource; +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +import static org.junit.jupiter.api.Assertions.*; + +@ExtendWith(PerTestWireMockServerExtension.class) +public class DataSourceTests implements WireMockServerHelpers { + + @Test + void testDataSourceConfig() throws SQLException { + ElasticsearchDataSource eds = new ElasticsearchDataSource(); + + Properties props = new Properties(); + props.setProperty(HostConnectionProperty.KEY, "some-host"); + props.setProperty(PortConnectionProperty.KEY, "1100"); + props.setProperty(LoginTimeoutConnectionProperty.KEY, "100"); + eds.setProperties(props); + + ConnectionConfig config = eds.getConnectionConfig(null); + + assertEquals("some-host", config.getHost()); + assertEquals(1100, config.getPort()); + assertEquals(100, config.getLoginTimeout()); + assertNull(config.getUser()); + assertNull(config.getPassword()); + Assertions.assertEquals(AuthenticationType.NONE, config.getAuthenticationType()); + assertNull(config.getAwsCredentialsProvider()); + } + + @Test + void testDataSourceConfigWithDefaults() throws SQLException { + ElasticsearchDataSource eds = new ElasticsearchDataSource(); + + Properties defaults = new Properties(); + defaults.setProperty(UserConnectionProperty.KEY, "default-user"); + defaults.setProperty(PasswordConnectionProperty.KEY, "default-pass"); + defaults.setProperty(AuthConnectionProperty.KEY, "basic"); + + Properties props = new Properties(defaults); + props.setProperty(HostConnectionProperty.KEY, "some-host"); + props.setProperty(PortConnectionProperty.KEY, "1100"); + props.setProperty(LoginTimeoutConnectionProperty.KEY, "100"); + + eds.setProperties(props); + + ConnectionConfig config = eds.getConnectionConfig(null); + + assertEquals("some-host", config.getHost()); + assertEquals(1100, config.getPort()); + assertEquals(100, config.getLoginTimeout()); + assertEquals("default-user", config.getUser()); + assertEquals("default-pass", config.getPassword()); + assertEquals(AuthenticationType.BASIC, config.getAuthenticationType()); + } + + @Test + void testDataSourceConfigUpdate() throws SQLException { + ElasticsearchDataSource eds = new ElasticsearchDataSource(); + Properties props = new Properties(); + props.setProperty(HostConnectionProperty.KEY, "some-host"); + props.setProperty(PortConnectionProperty.KEY, "1100"); + props.setProperty(LoginTimeoutConnectionProperty.KEY, "100"); + eds.setProperties(props); + + props = new Properties(); + props.setProperty(HostConnectionProperty.KEY, "some-host-updated"); + props.setProperty(PortConnectionProperty.KEY, "2100"); + eds.setProperties(props); + + ConnectionConfig config = eds.getConnectionConfig(null); + + assertEquals("some-host-updated", config.getHost()); + assertEquals(2100, config.getPort()); + assertEquals(0, config.getLoginTimeout()); + assertNull(config.getUser()); + assertNull(config.getPassword()); + } + + @Test + void testDataSourceConfigUpdateWithOverrides() throws SQLException { + ElasticsearchDataSource eds = new ElasticsearchDataSource(); + Properties props = new Properties(); + props.setProperty(HostConnectionProperty.KEY, "some-host"); + props.setProperty(PortConnectionProperty.KEY, "2100"); + eds.setProperties(props); + + Map overrides = new HashMap<>(); + overrides.put(UserConnectionProperty.KEY, "override-user"); + overrides.put(PasswordConnectionProperty.KEY, "override-pass"); + ConnectionConfig config = eds.getConnectionConfig(overrides); + + assertEquals("some-host", config.getHost()); + assertEquals(2100, config.getPort()); + assertEquals(0, config.getLoginTimeout()); + assertEquals("override-user", config.getUser()); + assertEquals("override-pass", config.getPassword()); + } + + @Test + void testDataSourceConfigUpdateWithOverridesPrecedence() throws SQLException { + ElasticsearchDataSource eds = new ElasticsearchDataSource(); + Properties props = new Properties(); + props.setProperty(HostConnectionProperty.KEY, "some-host"); + props.setProperty(PortConnectionProperty.KEY, "1100"); + props.setProperty(LoginTimeoutConnectionProperty.KEY, "100"); + eds.setProperties(props); + + props = new Properties(); + props.setProperty(HostConnectionProperty.KEY, "some-host-updated"); + props.setProperty(PortConnectionProperty.KEY, "2100"); + props.setProperty(UserConnectionProperty.KEY, "user"); + props.setProperty(PasswordConnectionProperty.KEY, "pass"); + eds.setProperties(props); + + ConnectionConfig config = eds.getConnectionConfig(null); + + assertEquals("some-host-updated", config.getHost()); + assertEquals(2100, config.getPort()); + assertEquals(0, config.getLoginTimeout()); + assertEquals("user", config.getUser()); + assertEquals("pass", config.getPassword()); + + Map overrides = new HashMap<>(); + overrides.put(UserConnectionProperty.KEY, "override-user"); + overrides.put(PasswordConnectionProperty.KEY, "override-pass"); + config = eds.getConnectionConfig(overrides); + + assertEquals("some-host-updated", config.getHost()); + assertEquals(2100, config.getPort()); + assertEquals(0, config.getLoginTimeout()); + assertEquals("override-user", config.getUser()); + assertEquals("override-pass", config.getPassword()); + } + + @Test + void testDataSourceFromUrlNycTaxisQuery(WireMockServer mockServer) throws SQLException, IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryMock(); + queryMock.setupMockServerStub(mockServer); + + DataSource ds = new ElasticsearchDataSource(); + ((ElasticsearchDataSource) ds).setUrl(getBaseURLForMockServer(mockServer)); + + Connection con = ds.getConnection(); + Statement st = con.createStatement(); + ResultSet rs = assertDoesNotThrow(() -> st.executeQuery(queryMock.getSql())); + + assertNotNull(rs); + queryMock.getMockResultSet().assertMatches(rs); + } + + @Test + void testDataSourceFromPropsNycTaxisQuery(WireMockServer mockServer) throws SQLException, IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryMock(); + queryMock.setupMockServerStub(mockServer); + + DataSource ds = new ElasticsearchDataSource(); + ((ElasticsearchDataSource) ds).setProperties(getConnectionPropertiesForMockServer(mockServer)); + + Connection con = ds.getConnection(); + Statement st = con.createStatement(); + ResultSet rs = assertDoesNotThrow(() -> st.executeQuery(queryMock.getSql())); + + assertNotNull(rs); + queryMock.getMockResultSet().assertMatches(rs); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DatabaseMetaDataTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DatabaseMetaDataTests.java new file mode 100644 index 0000000000..aa8e854f06 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DatabaseMetaDataTests.java @@ -0,0 +1,466 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.DatabaseMetaDataImpl.ColumnMetadataStatement; +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.logging.NoOpLogger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ClusterMetadata; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ConnectionResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Protocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.Transport; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportFactory; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockResultSet; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockResultSetMetaData; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockResultSetRows; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +public class DatabaseMetaDataTests { + + @Test + void testClusterMetaData() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + assertEquals("6.3.2", dbmd.getDatabaseProductVersion()); + assertEquals(6, dbmd.getDatabaseMajorVersion()); + assertEquals(3, dbmd.getDatabaseMinorVersion()); + assertEquals("Elasticsearch", dbmd.getDatabaseProductName()); + + assertFalse(con.isClosed()); + } + + @Test + void testGetAttributes() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("TYPE_CAT") + .column("TYPE_SCHEM") + .column("TYPE_NAME") + .column("ATTR_NAME") + .column("DATA_TYPE", ElasticsearchType.INTEGER) + .column("ATTR_TYPE_NAME") + .column("ATTR_SIZE", ElasticsearchType.INTEGER) + .column("DECIMAL_DIGITS", ElasticsearchType.INTEGER) + .column("NUM_PREC_RADIX", ElasticsearchType.INTEGER) + .column("NULLABLE", ElasticsearchType.INTEGER) + .column("REMARKS") + .column("ATTR_DEF") + .column("SQL_DATA_TYPE", ElasticsearchType.INTEGER) + .column("SQL_DATETIME_SUB", ElasticsearchType.INTEGER) + .column("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER) + .column("ORDINAL_POSITION", ElasticsearchType.INTEGER) + .column("IS_NULLABLE") + .column("SCOPE_CATALOG") + .column("SCOPE_SCHEMA") + .column("SCOPE_TABLE") + .column("SOURCE_DATA_TYPE", ElasticsearchType.SHORT) + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getAttributes("", null, null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetSuperTables() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("TABLE_CAT") + .column("TABLE_SCHEM") + .column("TABLE_NAME") + .column("SUPERTABLE_NAME") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getSuperTables("", null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetSuperTypes() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("TYPE_CAT") + .column("TYPE_SCHEM") + .column("TYPE_NAME") + .column("SUPERTYPE_CAT") + .column("SUPERTYPE_SCHEM") + .column("SUPERTYPE_NAME") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getSuperTypes("", null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetUDTs() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("TYPE_CAT") + .column("TYPE_SCHEM") + .column("TYPE_NAME") + .column("CLASS_NAME") + .column("DATA_TYPE", ElasticsearchType.INTEGER) + .column("REMARKS") + .column("BASE_TYPE", ElasticsearchType.SHORT) + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getUDTs("", null, null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetProcedures() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("PROCEDURE_CAT") + .column("PROCEDURE_SCHEM") + .column("PROCEDURE_NAME") + .column("RESERVED4") + .column("RESERVED5") + .column("RESERVED6") + .column("REMARKS") + .column("PROCEDURE_TYPE", ElasticsearchType.SHORT) + .column("SPECIFIC_NAME") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getProcedures(null, null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetProcedureColumns() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("PROCEDURE_CAT") + .column("PROCEDURE_SCHEM") + .column("PROCEDURE_NAME") + .column("COLUMN_NAME") + .column("COLUMN_TYPE", ElasticsearchType.SHORT) + .column("DATA_TYPE", ElasticsearchType.INTEGER) + .column("TYPE_NAME") + .column("PRECISION", ElasticsearchType.INTEGER) + .column("LENGTH", ElasticsearchType.INTEGER) + .column("SCALE", ElasticsearchType.SHORT) + .column("RADIX", ElasticsearchType.SHORT) + .column("NULLABLE", ElasticsearchType.SHORT) + .column("REMARKS") + .column("COLUMN_DEF") + .column("SQL_DATA_TYPE", ElasticsearchType.INTEGER) + .column("SQL_DATETIME_SUB", ElasticsearchType.INTEGER) + .column("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER) + .column("ORDINAL_POSITION", ElasticsearchType.INTEGER) + .column("IS_NULLABLE") + .column("SPECIFIC_NAME") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getProcedureColumns("", null, null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetFunctions() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("FUNCTION_CAT") + .column("FUNCTION_SCHEM") + .column("FUNCTION_NAME") + .column("REMARKS") + .column("FUNCTION_TYPE", ElasticsearchType.SHORT) + .column("SPECIFIC_NAME") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getFunctions("", null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + + @Test + void testGetFunctionColumns() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("FUNCTION_CAT") + .column("FUNCTION_SCHEM") + .column("FUNCTION_NAME") + .column("COLUMN_NAME") + .column("COLUMN_TYPE", ElasticsearchType.SHORT) + .column("DATA_TYPE", ElasticsearchType.INTEGER) + .column("TYPE_NAME") + .column("PRECISION", ElasticsearchType.INTEGER) + .column("LENGTH", ElasticsearchType.INTEGER) + .column("SCALE", ElasticsearchType.SHORT) + .column("RADIX", ElasticsearchType.SHORT) + .column("NULLABLE", ElasticsearchType.SHORT) + .column("REMARKS") + .column("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER) + .column("ORDINAL_POSITION", ElasticsearchType.INTEGER) + .column("IS_NULLABLE") + .column("SPECIFIC_NAME") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getFunctionColumns("", null, null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testPseudoColumns() throws ResponseException, IOException, SQLException { + Connection con = getMockConnection(); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("TABLE_CAT") + .column("TABLE_SCHEM") + .column("TABLE_NAME") + .column("COLUMN_NAME") + .column("DATA_TYPE", ElasticsearchType.INTEGER) + .column("COLUMN_SIZE", ElasticsearchType.INTEGER) + .column("DECIMAL_DIGITS", ElasticsearchType.INTEGER) + .column("NUM_PREC_RADIX", ElasticsearchType.INTEGER) + .column("COLUMN_USAGE") + .column("REMARKS") + .column("CHAR_OCTET_LENGTH", ElasticsearchType.INTEGER) + .column("IS_NULLABLE") + .build(); + + DatabaseMetaData dbmd = con.getMetaData(); + + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getPseudoColumns("", null, null, null); + + new MockResultSet(mockResultSetMetaData, MockResultSetRows.emptyResultSetRows()).assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetCatalogs() throws Exception { + Connection con = getMockConnection(); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getCatalogs(); + + getExpectedCatalogsResultSet().assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetSchemas() throws Exception { + Connection con = getMockConnection(); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd, "DatabaseMetaData is null"); + + ResultSet rs = dbmd.getSchemas(); + + getExpectedSchemaResultSet().assertMatches(rs); + assertDoesNotThrow(rs::close); + } + + @Test + void testGetSchemasWithValidPatterns() throws Exception { + Connection con = getMockConnection(); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd, "DatabaseMetaData is null"); + + assertValidSchemaResultSet(dbmd.getSchemas(null, null)); + assertValidSchemaResultSet(dbmd.getSchemas(null, "%")); + assertValidSchemaResultSet(dbmd.getSchemas(null, "")); + assertValidSchemaResultSet(dbmd.getSchemas("mock-cluster", null)); + assertValidSchemaResultSet(dbmd.getSchemas("mock-cluster", "%")); + assertValidSchemaResultSet(dbmd.getSchemas("mock-cluster", "")); + } + + @Test + void testGetSchemasWithInvalidPatterns() throws Exception { + Connection con = getMockConnection(); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd, "DatabaseMetaData is null"); + + assertEmptySchemaResultSet(dbmd.getSchemas("", null)); + assertEmptySchemaResultSet(dbmd.getSchemas("some-cat", "%")); + assertEmptySchemaResultSet(dbmd.getSchemas("mock-cluster", "some-schema")); + assertEmptySchemaResultSet(dbmd.getSchemas(null, "some-schema")); + } + + @Test + void testGetColumnsWithoutColumnNamePattern() throws Exception { + Connection con = getMockConnection(); + + ColumnMetadataStatement stmt = new ColumnMetadataStatement((ConnectionImpl)con, "TABLE_%", null, NoOpLogger.INSTANCE); + assertEquals("DESCRIBE TABLES LIKE TABLE_%", stmt.sql); + assertDoesNotThrow(stmt::close); + } + + @Test + void testGetColumnsWithColumnNamePattern() throws Exception { + Connection con = getMockConnection(); + + ColumnMetadataStatement stmt = new ColumnMetadataStatement((ConnectionImpl)con, "TABLE_%", "COLUMN_%", NoOpLogger.INSTANCE); + assertEquals("DESCRIBE TABLES LIKE TABLE_% COLUMNS LIKE COLUMN_%", stmt.sql); + assertDoesNotThrow(stmt::close); + } + + private void assertValidSchemaResultSet(ResultSet rs) throws SQLException { + getExpectedSchemaResultSet().assertMatches(rs); + } + + private void assertEmptySchemaResultSet(ResultSet rs) throws SQLException { + getEmptySchemaResultSet().assertMatches(rs); + } + + private MockResultSet getExpectedCatalogsResultSet() { + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("TABLE_CAT", ElasticsearchType.TEXT) + .build(); + + MockResultSetRows mockResultSetRows = MockResultSetRows.builder() + .row() + .column("mock-cluster") + .build(); + + return new MockResultSet(mockResultSetMetaData, mockResultSetRows); + } + + + private MockResultSet getExpectedSchemaResultSet() { + MockResultSetRows mockResultSetRows = MockResultSetRows.builder() + .row() + .column("") + .column("mock-cluster") + .build(); + + return new MockResultSet(getMockSchemaResultSetMetaData(), mockResultSetRows); + } + + private MockResultSet getEmptySchemaResultSet() { + return new MockResultSet(getMockSchemaResultSetMetaData(), MockResultSetRows.emptyResultSetRows()); + } + + + private MockResultSetMetaData getMockSchemaResultSetMetaData() { + return MockResultSetMetaData.builder() + .column("TABLE_SCHEM", ElasticsearchType.TEXT) + .column("TABLE_CATALOG", ElasticsearchType.TEXT) + .build(); + } + + private Connection getMockConnection() throws ResponseException, IOException, SQLException { + TransportFactory mockTransportFactory = mock(TransportFactory.class); + when(mockTransportFactory.getTransport(any(), any(), any())) + .thenReturn(mock(Transport.class)); + + ProtocolFactory mockProtocolFactory = mock(ProtocolFactory.class); + Protocol mockProtocol = mock(Protocol.class); + + when(mockProtocolFactory.getProtocol(any(ConnectionConfig.class), any(Transport.class))) + .thenReturn(mockProtocol); + + ClusterMetadata mockClusterMetaData = mock(ClusterMetadata.class); + ElasticsearchVersion mockEV = mock(ElasticsearchVersion.class); + + when(mockEV.getFullVersion()).thenReturn("6.3.2"); + when(mockEV.getMajor()).thenReturn(6); + when(mockEV.getMinor()).thenReturn(3); + when(mockEV.getRevision()).thenReturn(2); + + when(mockClusterMetaData.getVersion()).thenReturn(mockEV); + when(mockClusterMetaData.getClusterName()).thenReturn("mock-cluster"); + when(mockClusterMetaData.getClusterUUID()).thenReturn("mock-cluster-uuid"); + + ConnectionResponse mockConnectionResponse = mock(ConnectionResponse.class); + when(mockConnectionResponse.getClusterMetadata()).thenReturn(mockClusterMetaData); + + when(mockProtocol.connect(anyInt())).thenReturn(mockConnectionResponse); + + Connection con = new ConnectionImpl(mock(ConnectionConfig.class), + mockTransportFactory, mockProtocolFactory, NoOpLogger.INSTANCE); + return con; + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DriverTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DriverTests.java new file mode 100644 index 0000000000..0e266df482 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/DriverTests.java @@ -0,0 +1,57 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.test.PerClassWireMockServerExtension; +import com.amazon.opendistroforelasticsearch.jdbc.test.WireMockServerHelpers; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import com.github.tomakehurst.wiremock.WireMockServer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.junit.jupiter.api.Assertions.*; + +@ExtendWith(PerClassWireMockServerExtension.class) +public class DriverTests implements WireMockServerHelpers { + + @Test + public void testConnect(WireMockServer mockServer) throws SQLException { + mockServer.stubFor(get(urlEqualTo("/")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + + Driver driver = new Driver(); + Connection con = assertDoesNotThrow(() -> driver.connect( + getBaseURLForMockServer(mockServer), (Properties) null)); + + assertConnectionOpen(con); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + } + + + private void assertConnectionOpen(final Connection con) { + boolean closed = assertDoesNotThrow(con::isClosed); + assertTrue(!closed, "Connection is closed"); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/PreparedStatementTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/PreparedStatementTests.java new file mode 100644 index 0000000000..eacec90c15 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/PreparedStatementTests.java @@ -0,0 +1,193 @@ +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.logging.NoOpLogger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ConnectionResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Protocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.test.PerTestWireMockServerExtension; +import com.amazon.opendistroforelasticsearch.jdbc.transport.Transport; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportFactory; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLNonTransientException; +import java.util.Arrays; +import java.util.Objects; +import java.util.Properties; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +/** + * PreparedStatement tests + * + * @author echo + * @since 12.03.20 + **/ +@ExtendWith(PerTestWireMockServerExtension.class) +public class PreparedStatementTests { + + final String sql = "select pickup_datetime, trip_type, passenger_count, " + + "fare_amount, extra, vendor_id from nyc_taxis LIMIT 5"; + + private static Stream getArgumentsStream() { + int[] resultSetTypes = new int[]{ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.TYPE_FORWARD_ONLY}; + int[] resultSetConcurrencyTypes = new int[]{ResultSet.CONCUR_READ_ONLY, ResultSet.CONCUR_UPDATABLE}; + int[] resultSetHoldabilityTypes = new int[]{ResultSet.HOLD_CURSORS_OVER_COMMIT, ResultSet.CLOSE_CURSORS_AT_COMMIT}; + Stream.Builder builder = Stream.builder(); + for (int resultSetType : resultSetTypes) { + for (int resultSetConcurrencyType : resultSetConcurrencyTypes) { + for (int resultSetHoldabilityType : resultSetHoldabilityTypes) { + builder.add(Arguments.of(resultSetType, resultSetConcurrencyType, resultSetHoldabilityType)); + } + } + } + return builder.build(); + } + + private static Stream resultSetParamsNotSupported() { + return getArgumentsStream().filter(a -> !Arrays.deepEquals(a.get(), new Object[]{ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT})); + } + + private static Stream resultSetParamsSupported() { + return getArgumentsStream().filter(a -> Arrays.deepEquals(a.get(), new Object[]{ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT})); + } + + private static Stream resultSetParamsNotSupportedHoldability() { + return getArgumentsStream().filter(a -> !Objects.equals(a.get()[0], ResultSet.TYPE_FORWARD_ONLY) || !Objects.equals(a.get()[1], ResultSet.CONCUR_READ_ONLY)); + } + + @Test + void testPreparedStatementExecute() throws ResponseException, IOException, SQLException { + try (Connection con = getMockConnection()) { + try (PreparedStatement pstm = con.prepareStatement(sql)) { + assertTrue(pstm.execute()); + ResultSet rs = assertDoesNotThrow(() -> pstm.getResultSet()); + rs.close(); + } + } + } + + @ParameterizedTest + @MethodSource("resultSetParamsNotSupported") + void testPrepareStatementNotSupported(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException, IOException, ResponseException { + try (Connection con = getMockConnection()) { + assertThrows(SQLNonTransientException.class, () -> con.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability)); + assertThrows(SQLNonTransientException.class, () -> con.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability)); + } + } + + @ParameterizedTest + @MethodSource("resultSetParamsNotSupportedHoldability") + void testPrepareStatementNotSupported(int resultSetType, int resultSetConcurrency) throws SQLException, IOException, ResponseException { + try (Connection con = getMockConnection()) { + assertThrows(SQLNonTransientException.class, () -> con.prepareStatement(sql, resultSetType, resultSetConcurrency)); + assertThrows(SQLNonTransientException.class, () -> con.createStatement(resultSetType, resultSetConcurrency)); + } + } + + @ParameterizedTest + @MethodSource("resultSetParamsSupported") + void testPrepareStatementSupported(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException, IOException, ResponseException { + + try (Connection con = getMockConnection()) { + try (PreparedStatement pstm = con.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability)) { + assertTrue(pstm.execute()); + ResultSet rs = assertDoesNotThrow(() -> pstm.getResultSet()); + rs.close(); + assertDoesNotThrow(() -> con.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability)).close(); + assertDoesNotThrow(() -> con.prepareStatement(sql, resultSetType, resultSetConcurrency)).close(); + assertDoesNotThrow(() -> con.createStatement(resultSetType, resultSetConcurrency)).close(); + } + } + } + + private Connection getMockConnection() throws IOException, ResponseException, SQLException { + TransportFactory tf = mock(TransportFactory.class); + ProtocolFactory pf = mock(ProtocolFactory.class); + Protocol mockProtocol = mock(Protocol.class); + + when(mockProtocol.connect(anyInt())).thenReturn(mock(ConnectionResponse.class)); + + when(tf.getTransport(any(), any(), any())) + .thenReturn(mock(Transport.class)); + + when(pf.getProtocol(any(ConnectionConfig.class), any(Transport.class))) + .thenReturn(mockProtocol); + + when(mockProtocol.execute(any(QueryRequest.class))) + .thenReturn(mock(QueryResponse.class)); + Connection con = new ConnectionImpl(ConnectionConfig.builder().build(), tf, pf, NoOpLogger.INSTANCE); + return con; + } + + @Test + void testEffectiveFetchSizeOnPreparedStatement() throws ResponseException, IOException, SQLException { + + TransportFactory tf = mock(TransportFactory.class); + ProtocolFactory pf = mock(ProtocolFactory.class); + Protocol mockProtocol = mock(Protocol.class); + + when(mockProtocol.connect(anyInt())).thenReturn(mock(ConnectionResponse.class)); + + when(tf.getTransport(any(), any(), any())) + .thenReturn(mock(Transport.class)); + + when(pf.getProtocol(any(ConnectionConfig.class), any(Transport.class))) + .thenReturn(mockProtocol); + + when(mockProtocol.execute(any(QueryRequest.class))) + .thenReturn(mock(QueryResponse.class)); + + String url = "jdbc:elasticsearch://localhost:9200?fetchSize=400"; + + ConnectionConfig connectionConfig = ConnectionConfig.builder().setUrl(url).build(); + Connection con = new ConnectionImpl(connectionConfig, tf, pf, NoOpLogger.INSTANCE); + PreparedStatement st = con.prepareStatement(sql); + assertEquals(st.getFetchSize(), 400); + st.close(); + con.close(); + + // Properties override connection string fetchSize + Properties properties = new Properties(); + properties.setProperty("fetchSize", "5000"); + connectionConfig = ConnectionConfig.builder().setUrl(url).setProperties(properties).build(); + con = new ConnectionImpl(connectionConfig, tf, pf, NoOpLogger.INSTANCE); + st = con.prepareStatement(sql); + assertEquals(st.getFetchSize(), 5000); + st.close(); + con.close(); + + + // setFetchSize overrides fetchSize set anywhere + connectionConfig = ConnectionConfig.builder().setUrl(url).setProperties(properties).build(); + con = new ConnectionImpl(connectionConfig, tf, pf, NoOpLogger.INSTANCE); + st = con.prepareStatement(sql); + st.setFetchSize(200); + assertEquals(st.getFetchSize(), 200); + st.close(); + con.close(); + + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetMetaDataTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetMetaDataTests.java new file mode 100644 index 0000000000..0f04de3873 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetMetaDataTests.java @@ -0,0 +1,61 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.DatabaseMetaDataImpl.ResultSetColumnDescriptor; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.ColumnMetaData; +import com.amazon.opendistroforelasticsearch.jdbc.internal.results.Schema; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; + +/** + * Unit tests for {@link ResultSetMetaDataImpl} + */ +public class ResultSetMetaDataTests { + + private ResultSetMetaDataImpl metaData; + + @BeforeEach + public void setUp() { + ResultSetImpl resultSet = mock(ResultSetImpl.class); + Schema schema = new Schema(Arrays.asList( + new ColumnMetaData(new ResultSetColumnDescriptor("name", "keyword", null)), + new ColumnMetaData(new ResultSetColumnDescriptor("address", "text", null)), + new ColumnMetaData(new ResultSetColumnDescriptor("age", "long", null)), + new ColumnMetaData(new ResultSetColumnDescriptor("balance", "float", null)), + new ColumnMetaData(new ResultSetColumnDescriptor("employer", "nested", null)), + new ColumnMetaData(new ResultSetColumnDescriptor("birthday", "date", null)) + )); + metaData = new ResultSetMetaDataImpl(resultSet, schema); + } + + @Test + public void getColumnTypeNameShouldReturnJDBCType() throws SQLException { + assertEquals("VARCHAR", metaData.getColumnTypeName(1)); + assertEquals("VARCHAR", metaData.getColumnTypeName(2)); + assertEquals("BIGINT", metaData.getColumnTypeName(3)); + assertEquals("REAL", metaData.getColumnTypeName(4)); + assertEquals("STRUCT", metaData.getColumnTypeName(5)); + assertEquals("TIMESTAMP", metaData.getColumnTypeName(6)); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetTests.java new file mode 100644 index 0000000000..db1f61beb8 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/ResultSetTests.java @@ -0,0 +1,279 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.exceptions.ObjectClosedException; +import com.amazon.opendistroforelasticsearch.jdbc.logging.NoOpLogger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocol; +import com.amazon.opendistroforelasticsearch.jdbc.test.TestResources; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import com.amazon.opendistroforelasticsearch.jdbc.test.PerTestWireMockServerExtension; +import com.amazon.opendistroforelasticsearch.jdbc.test.WireMockServerHelpers; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockResultSet; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockResultSetRows; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockResultSetMetaData; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.QueryMock; +import com.github.tomakehurst.wiremock.WireMockServer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.stream.Stream; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.matchingJsonPath; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.mock; + +@ExtendWith(PerTestWireMockServerExtension.class) +public class ResultSetTests implements WireMockServerHelpers { + + @ParameterizedTest + @MethodSource("queryMockProvider") + void testQueryResultSet(QueryMock queryMock, WireMockServer mockServer) throws SQLException, IOException { + queryMock.setupMockServerStub(mockServer); + + Connection con = new Driver().connect(getBaseURLForMockServer(mockServer), null); + Statement st = con.createStatement(); + ResultSet rs = assertDoesNotThrow(() -> st.executeQuery(queryMock.getSql())); + + assertNotNull(rs); + + // prior to ResultSet iteration + assertTrue(rs.isBeforeFirst(), "isBeforeFirst not True for non-empty ResultSet before the first next()"); + SQLException ex = assertThrows(SQLException.class, () -> rs.getObject(1)); + assertTrue(ex.getMessage().contains("Illegal operation before start of ResultSet")); + + // this will consume the resultSet + queryMock.getMockResultSet().assertMatches(rs); + + // post ResultSet iteration + assertTrue(rs.isAfterLast(), "isAfterLast not True after end of ResultSet."); + assertFalse(rs.isBeforeFirst(), "isBeforeFirst True when isAfterLast is True."); + ex = assertThrows(SQLException.class, () -> rs.getObject(1)); + assertTrue(ex.getMessage().contains("Illegal operation after end of ResultSet")); + + rs.close(); + + // post ResultSet close + assertTrue(rs.isClosed(), "ResultSet isClosed returns False after call to close it."); + assertThrows(ObjectClosedException.class, rs::next); + assertThrows(ObjectClosedException.class, rs::isAfterLast); + assertThrows(ObjectClosedException.class, rs::isBeforeFirst); + assertThrows(ObjectClosedException.class, () -> rs.getObject(1)); + + st.close(); + con.close(); + } + + private static Stream queryMockProvider() { + return Stream.of( + Arguments.of(new QueryMock.NycTaxisQueryMock()), + Arguments.of(new QueryMock.NycTaxisQueryWithAliasMock()) + ); + } + + + + @Test + void testResultSetOnPaginatedResponse(WireMockServer mockServer) throws SQLException, IOException { + + String queryUrl = JsonHttpProtocol.DEFAULT_SQL_CONTEXT_PATH+"?format=jdbc"; + final String sql = "SELECT firstname, age FROM accounts LIMIT 12"; + + // get Connection stub + setupStubForConnect(mockServer, "/"); + + // query response stub for initial page + mockServer.stubFor(post(urlEqualTo(queryUrl)) + .withHeader("Accept", equalTo("application/json")) + .withHeader("Content-Type", equalTo("application/json")) + .withRequestBody(matchingJsonPath("$.query", equalTo(sql))) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(getResponseBodyFromPath("mock/protocol/json/cursor/queryresponse_accounts_00.json")))); + + // query response stub for second page + mockServer.stubFor(post(urlEqualTo(queryUrl)) + .withHeader("Accept", equalTo("application/json")) + .withHeader("Content-Type", equalTo("application/json")) + .withRequestBody(matchingJsonPath("$.cursor", equalTo("abcde_1"))) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(getResponseBodyFromPath("mock/protocol/json/cursor/queryresponse_accounts_01.json")))); + + // query response stub for third page + mockServer.stubFor(post(urlEqualTo(queryUrl)) + .withHeader("Accept", equalTo("application/json")) + .withHeader("Content-Type", equalTo("application/json")) + .withRequestBody(matchingJsonPath("$.cursor", equalTo("abcde_2"))) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(getResponseBodyFromPath("mock/protocol/json/cursor/queryresponse_accounts_02.json")))); + + // query response stub for last page + mockServer.stubFor(post(urlEqualTo(queryUrl)) + .withHeader("Accept", equalTo("application/json")) + .withHeader("Content-Type", equalTo("application/json")) + .withRequestBody(matchingJsonPath("$.cursor", equalTo("abcde_3"))) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(getResponseBodyFromPath("mock/protocol/json/cursor/queryresponse_accounts_03.json")))); + + + Connection con = new Driver().connect(getBaseURLForMockServer(mockServer), null); + Statement st = con.createStatement(); + st.setFetchSize(3); + ResultSet rs = assertDoesNotThrow(() -> st.executeQuery(sql)); + int cursorRowCount = 0; + + while(rs.next()) { + cursorRowCount++; + } + assertEquals(12, cursorRowCount, "Unexpected number of rows retrieved from cursor."); + + // test for execute method, mostly used by BI tools like Tableau for example. + con = new Driver().connect(getBaseURLForMockServer(mockServer), null); + Statement statement = con.createStatement(); + st.setFetchSize(3); + boolean executed = assertDoesNotThrow(() -> statement.execute(sql)); + assertTrue(executed); + rs = statement.getResultSet(); + cursorRowCount = 0; + + while(rs.next()) { + cursorRowCount++; + } + assertEquals(12, cursorRowCount, "Unexpected number of rows retrieved from cursor."); + } + + + @Test + void testNullableFieldsQuery(WireMockServer mockServer) throws SQLException, IOException { + QueryMock.NullableFieldsQueryMock queryMock = new QueryMock.NullableFieldsQueryMock(); + + queryMock.setupMockServerStub(mockServer); + + Connection con = new Driver().connect(getBaseURLForMockServer(mockServer), null); + Statement st = con.createStatement(); + ResultSet rs = assertDoesNotThrow(() -> st.executeQuery(queryMock.getSql())); + + assertNotNull(rs); + + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("testBoolean", ElasticsearchType.BOOLEAN) + .column("docId", ElasticsearchType.TEXT) + .column("testByte", ElasticsearchType.BYTE) + .column("testFloat", ElasticsearchType.FLOAT) + .column("testLong", ElasticsearchType.LONG) + .column("testShort", ElasticsearchType.SHORT) + .column("testHalfFloat", ElasticsearchType.HALF_FLOAT) + .column("testTimeStamp", ElasticsearchType.DATE) + .column("testScaledFloat", ElasticsearchType.SCALED_FLOAT) + .column("testKeyword", ElasticsearchType.KEYWORD) + .column("testText", ElasticsearchType.TEXT) + .column("testDouble", ElasticsearchType.DOUBLE) + .build(); + + MockResultSetRows mockResultSetRows = MockResultSetRows.builder() + .row() + .column(false, true) + .column("2", false) + .column((byte) 0, true) + .column((float) 22.145135459218345, false) + .column((long) 0, true) + .column((short) 0, true) + .column((float) 24.324234543532153, false) + .column(Timestamp.valueOf("2015-01-01 12:10:30"), false) + .column((double) 24.324234543532153, false) + .column("Test String", false) + .column("document3", false) + .column((double) 0, true) + .row() + .column(true, false) + .column("1", false) + .column((byte) 126, false) + .column((float) 0, true) + .column((long) 32000320003200030L, false) + .column((short) 29000, false) + .column((float) 0, true) + .column(null, true) + .column((double) 0, true) + .column(null, true) + .column(null, true) + .column((double) 22.312423148903218, false) + .build(); + + MockResultSet mockResultSet = new MockResultSet(mockResultSetMetaData, mockResultSetRows); + + mockResultSet.assertMatches(rs); + + rs.close(); + con.close(); + } + + @Test + void testResultSetWrapper() throws SQLException { + ResultSetImpl rsImpl = new ResultSetImpl(mock(StatementImpl.class), mock(QueryResponse.class), NoOpLogger.INSTANCE); + + assertTrue(rsImpl.isWrapperFor(ResultSet.class), + "ResultSet impl returns False for isWrapperFor(ResultSet.class)"); + + ResultSet unwrapped = assertDoesNotThrow(() -> rsImpl.unwrap(ResultSet.class), + "Unexpected exception when unwrapping ResultSet"); + + assertNotNull(unwrapped, "Unwrapped ResultSet null"); + + assertFalse(rsImpl.isWrapperFor(mock(ResultSet.class).getClass()), + "ResultSet impl returns True for isWrapperFor(mockClass)"); + + assertFalse(rsImpl.isWrapperFor(null), + "ResultSet impl returns True for isWrapperFor(null)"); + + SQLException ex = assertThrows(SQLException.class, () -> rsImpl.unwrap(mock(ResultSet.class).getClass())); + assertTrue(ex.getMessage().contains("Unable to unwrap")); + } + + + public String getResponseBodyFromPath(String path) throws IOException { + return TestResources.readResourceAsString(path); + } + + public void setupStubForConnect(final WireMockServer mockServer, final String contextPath) { + // get Connection stub + mockServer.stubFor(get(urlEqualTo(contextPath)) + .withHeader("Accept", equalTo("application/json")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLClientAuthTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLClientAuthTests.java new file mode 100644 index 0000000000..d46cb1386b --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLClientAuthTests.java @@ -0,0 +1,101 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.KeyStoreLocationConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.KeyStorePasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.TrustStoreLocationConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.TrustStorePasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.test.TLSServer; +import com.amazon.opendistroforelasticsearch.jdbc.test.TestResources; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import org.eclipse.jetty.server.Server; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; + +import java.nio.file.Path; +import java.sql.Connection; +import java.util.Properties; + +@ExtendWith(TempDirectory.class) +public class SSLClientAuthTests { + + // WireMockServer has a problem initializing server on TLS from + // a password protected JKS keystore. If the issue gets fixed, + // these tests can use WireMockServer instead. + + static Server jettyServer; + static String connectURL; + + @BeforeAll + static void beforeAll(@TempDirectory.TempDir Path tempDir) throws Exception { + + // Start server with SSL enabled and requiring client cert auth + Path keyStoreFile = tempDir.resolve("server_keystore"); + Path trustStoreFile = tempDir.resolve("server_truststore"); + TestResources.copyResourceToPath(TLSServer.SERVER_KEY_JKS_RESOURCE, keyStoreFile); + TestResources.copyResourceToPath(TLSServer.TRUST_CLIENT_JKS_RESOURCE, trustStoreFile); + System.out.println("Copied server keystore to: " + keyStoreFile.toAbsolutePath().toString()); + System.out.println("Copied server truststore to: " + trustStoreFile.toAbsolutePath().toString()); + + String host = "localhost"; + jettyServer = TLSServer.startSecureServer(host, + keyStoreFile.toAbsolutePath().toString(), + "changeit", + "JKS", + trustStoreFile.toAbsolutePath().toString(), + "changeit", + "JKS", + true, + new TLSServer.MockESConnectionHandler()); + + connectURL = TLSServer.getBaseURLForConnect(jettyServer); + System.out.println("Started on: " + connectURL); + } + + @AfterAll + static void afterAll() throws Exception { + System.out.println("Stopping jetty"); + jettyServer.stop(); + } + + @Test + void testTLSClientAuth(@TempDirectory.TempDir Path tempDir) throws Exception { + Path keyStoreFile = tempDir.resolve("client_keystore"); + TestResources.copyResourceToPath(TLSServer.CLIENT_KEY_JKS_RESOURCE, keyStoreFile); + + Path trustStoreFile = tempDir.resolve("client_truststore"); + TestResources.copyResourceToPath(TLSServer.TRUST_SERVER_JKS_RESOURCE, trustStoreFile); + + Properties props = new Properties(); + props.setProperty(TrustStoreLocationConnectionProperty.KEY, trustStoreFile.toAbsolutePath().toString()); + props.setProperty(TrustStorePasswordConnectionProperty.KEY, "changeit"); + + props.setProperty(KeyStoreLocationConnectionProperty.KEY, keyStoreFile.toAbsolutePath().toString()); + props.setProperty(KeyStorePasswordConnectionProperty.KEY, "changeit"); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(connectURL, props)); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + } + + +} + diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLConnectionTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLConnectionTests.java new file mode 100644 index 0000000000..286f238822 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLConnectionTests.java @@ -0,0 +1,122 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.TrustSelfSignedConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.TrustStoreLocationConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.TrustStorePasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.test.TLSServer; +import com.amazon.opendistroforelasticsearch.jdbc.test.TestResources; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import org.eclipse.jetty.server.Server; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; + +import java.io.IOException; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ExtendWith(TempDirectory.class) +public class SSLConnectionTests { + + // WireMockServer has a problem initializing server on TLS from + // a password protected JKS keystore. If the issue gets fixed, + // these tests can use WireMockServer instead. + + static Server jettyServer; + static String connectURL; + + @BeforeAll + static void beforeAll(@TempDirectory.TempDir Path tempDir) throws Exception { + + // Start server with SSL enabled + Path keyStoreFile = tempDir.resolve("keystore"); + TestResources.copyResourceToPath(TLSServer.SERVER_KEY_JKS_RESOURCE, keyStoreFile); + System.out.println("Copied keystore to: " + keyStoreFile.toAbsolutePath().toString()); + + String host = "localhost"; + jettyServer = TLSServer.startSecureServer(host, + keyStoreFile.toAbsolutePath().toString(), + "changeit", + "JKS", + new TLSServer.MockESConnectionHandler()); + + connectURL = TLSServer.getBaseURLForConnect(jettyServer); + System.out.println("Started on: " + connectURL); + } + + @AfterAll + static void afterAll() throws Exception { + System.out.println("Stopping jetty"); + jettyServer.stop(); + } + + @Test + void testTrustSelfSignedEnabled() throws Exception { + Properties props = new Properties(); + props.setProperty(TrustSelfSignedConnectionProperty.KEY, "true"); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(connectURL, props)); + + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + } + + @Test + void testTrustSelfSignedDisabled() { + Properties props = new Properties(); + props.setProperty(TrustSelfSignedConnectionProperty.KEY, "false"); + + SQLException sqle = Assertions.assertThrows(SQLException.class, () -> new Driver().connect(connectURL, props)); + + assertNotNull(sqle.getMessage()); + assertTrue(sqle.getMessage().contains("Connection error")); + } + + + @Test + void testTrustSelfSignedDefault() { + SQLException sqle = Assertions.assertThrows(SQLException.class, () -> new Driver().connect(connectURL, null)); + + assertNotNull(sqle.getMessage()); + assertTrue(sqle.getMessage().contains("Connection error")); + } + + @Test + void testTrustCustomCert(@TempDirectory.TempDir Path tempDir) throws IOException, SQLException { + Path trustStoreFile = tempDir.resolve("truststore"); + TestResources.copyResourceToPath(TLSServer.TRUST_SERVER_JKS_RESOURCE, trustStoreFile); + System.out.println("Copied truststore to: " + trustStoreFile.toAbsolutePath().toString()); + + Properties props = new Properties(); + props.setProperty(TrustStoreLocationConnectionProperty.KEY, trustStoreFile.toAbsolutePath().toString()); + props.setProperty(TrustStorePasswordConnectionProperty.KEY, "changeit"); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(connectURL, props)); + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + } + +} + diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLHostnameVerificationTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLHostnameVerificationTests.java new file mode 100644 index 0000000000..8a520445b2 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/SSLHostnameVerificationTests.java @@ -0,0 +1,105 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.HostnameVerificationConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.TrustSelfSignedConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.test.TLSServer; +import com.amazon.opendistroforelasticsearch.jdbc.test.TestResources; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import org.eclipse.jetty.server.Server; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; + +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ExtendWith(TempDirectory.class) +public class SSLHostnameVerificationTests { + + // WireMockServer has a problem initializing server on TLS from + // a password protected JKS keystore. If the issue gets fixed, + // these tests can use WireMockServer instead. + + static Server jettyServer; + static String connectURL; + + @BeforeAll + static void beforeAll(@TempDirectory.TempDir Path tempDir) throws Exception { + + // Start server with SSL enabled + Path keyStoreFile = tempDir.resolve("keystore"); + TestResources.copyResourceToPath(TLSServer.SERVER_KEY_JKS_RESOURCE_NON_LOCALHOST, keyStoreFile); + System.out.println("Copied keystore to: " + keyStoreFile.toAbsolutePath().toString()); + + String host = "localhost"; + jettyServer = TLSServer.startSecureServer(host, + keyStoreFile.toAbsolutePath().toString(), + "changeit", + "JKS", + new TLSServer.MockESConnectionHandler()); + + connectURL = TLSServer.getBaseURLForConnect(jettyServer); + System.out.println("Started on: " + connectURL); + } + + @AfterAll + static void afterAll() throws Exception { + System.out.println("Stopping jetty"); + jettyServer.stop(); + } + + @Test + void testTrustSelfSignedEnabledHostnameVerificationDisabled() throws Exception { + Properties props = new Properties(); + props.setProperty(TrustSelfSignedConnectionProperty.KEY, "true"); + props.setProperty(HostnameVerificationConnectionProperty.KEY, "false"); + + Connection con = Assertions.assertDoesNotThrow(() -> new Driver().connect(connectURL, props)); + + MockES.INSTANCE.assertMockESConnectionResponse((ElasticsearchConnection) con); + } + + @Test + void testTrustSelfSignedEnabledHostnameVerificationEnabled() throws Exception { + Properties props = new Properties(); + props.setProperty(TrustSelfSignedConnectionProperty.KEY, "true"); + props.setProperty(HostnameVerificationConnectionProperty.KEY, "true"); + + SQLException sqe = Assertions.assertThrows(SQLException.class, () -> new Driver().connect(connectURL, props)); + assertTrue(sqe.getMessage().contains("javax.net.ssl.SSLPeerUnverifiedException")); + } + + @Test + void testTrustSelfSignedEnabledHostnameVerificationDefault() throws Exception { + Properties props = new Properties(); + props.setProperty(TrustSelfSignedConnectionProperty.KEY, "true"); + + SQLException sqe = Assertions.assertThrows(SQLException.class, () -> new Driver().connect(connectURL, props)); + assertTrue(sqe.getMessage().contains("javax.net.ssl.SSLPeerUnverifiedException")); + } + +} + diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/StatementTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/StatementTests.java new file mode 100644 index 0000000000..d333b62e53 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/StatementTests.java @@ -0,0 +1,168 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.logging.NoOpLogger; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ConnectionResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcQueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.Protocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.ProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.QueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.InternalServerErrorException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.test.PerTestWireMockServerExtension; +import com.amazon.opendistroforelasticsearch.jdbc.transport.Transport; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportFactory; +import com.amazon.opendistroforelasticsearch.jdbc.test.WireMockServerHelpers; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.QueryMock; +import com.github.tomakehurst.wiremock.WireMockServer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +import static com.github.tomakehurst.wiremock.client.WireMock.get; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(PerTestWireMockServerExtension.class) +public class StatementTests implements WireMockServerHelpers { + + @Test + void testQueryRequest() throws ResponseException, IOException, SQLException { + + final String sql = "select pickup_datetime, trip_type, passenger_count, " + + "fare_amount, extra, vendor_id from nyc_taxis LIMIT 5"; + + TransportFactory tf = mock(TransportFactory.class); + ProtocolFactory pf = mock(ProtocolFactory.class); + Protocol mockProtocol = mock(Protocol.class); + + when(mockProtocol.connect(anyInt())).thenReturn(mock(ConnectionResponse.class)); + + when(tf.getTransport(any(), any(), any())) + .thenReturn(mock(Transport.class)); + + when(pf.getProtocol(any(ConnectionConfig.class), any(Transport.class))) + .thenReturn(mockProtocol); + + when(mockProtocol.execute(any(QueryRequest.class))) + .thenReturn(mock(QueryResponse.class)); + + Connection con = new ConnectionImpl(ConnectionConfig.builder().build(), tf, pf, NoOpLogger.INSTANCE); + + Statement st = con.createStatement(); + ResultSet rs = assertDoesNotThrow(() -> st.executeQuery(sql)); + + JdbcQueryRequest request = new JdbcQueryRequest(sql); + + verify(mockProtocol).execute(request); + + // new ResultSetImpl(mock(StatementImpl.class), mock(QueryResponse.class)); + st.close(); + rs.close(); + con.close(); + } + + + @Test + void testEffectiveFetchSizeOnStatement() throws ResponseException, IOException, SQLException { + + TransportFactory tf = mock(TransportFactory.class); + ProtocolFactory pf = mock(ProtocolFactory.class); + Protocol mockProtocol = mock(Protocol.class); + + when(mockProtocol.connect(anyInt())).thenReturn(mock(ConnectionResponse.class)); + + when(tf.getTransport(any(), any(), any())) + .thenReturn(mock(Transport.class)); + + when(pf.getProtocol(any(ConnectionConfig.class), any(Transport.class))) + .thenReturn(mockProtocol); + + when(mockProtocol.execute(any(QueryRequest.class))) + .thenReturn(mock(QueryResponse.class)); + + String url = "jdbc:elasticsearch://localhost:9200?fetchSize=400"; + + ConnectionConfig connectionConfig = ConnectionConfig.builder().setUrl(url).build(); + Connection con = new ConnectionImpl(connectionConfig, tf, pf, NoOpLogger.INSTANCE); + Statement st = con.createStatement(); + assertEquals(st.getFetchSize(), 400); + st.close(); + con.close(); + + // Properties override connection string fetchSize + Properties properties = new Properties(); + properties.setProperty("fetchSize", "5000"); + connectionConfig = ConnectionConfig.builder().setUrl(url).setProperties(properties).build(); + con = new ConnectionImpl(connectionConfig, tf, pf, NoOpLogger.INSTANCE); + st = con.createStatement(); + assertEquals(st.getFetchSize(), 5000); + st.close(); + con.close(); + + + // setFetchSize overrides fetchSize set anywhere + connectionConfig = ConnectionConfig.builder().setUrl(url).setProperties(properties).build(); + con = new ConnectionImpl(connectionConfig, tf, pf, NoOpLogger.INSTANCE); + st = con.createStatement(); + st.setFetchSize(200); + assertEquals(st.getFetchSize(), 200); + st.close(); + con.close(); + + } + + @Test + void testQueryInternalServerError(WireMockServer mockServer) throws SQLException, IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryInternalErrorMock(); + + queryMock.setupMockServerStub(mockServer); + + Connection con = new Driver().connect(getBaseURLForMockServer(mockServer), null); + Statement st = con.createStatement(); + InternalServerErrorException ex = assertThrows( + InternalServerErrorException.class, () -> st.executeQuery(queryMock.getSql())); + + String expectedDetails = "java.lang.NullPointerException\n\t" + + "at org.elasticsearch.plugin.nlpcn.Schema.getTypeFromMetaData(Schema.java:156)\n\t" + + "at org.elasticsearch.plugin.nlpcn.Schema.populateColumns(Schema.java:146)\n\t" + + "at java.base/java.lang.Thread.run(Thread.java:844)\n"; + + assertEquals("error reason", ex.getReason()); + assertEquals("java.lang.NullPointerException", ex.getType()); + assertEquals(expectedDetails, ex.getDetails()); + + st.close(); + con.close(); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionConfigTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionConfigTests.java new file mode 100644 index 0000000000..fbd611645f --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/config/ConnectionConfigTests.java @@ -0,0 +1,905 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.config; + +import com.amazon.opendistroforelasticsearch.jdbc.auth.AuthenticationType; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.UrlParser; +import com.amazon.opendistroforelasticsearch.jdbc.logging.LogLevel; +import com.amazonaws.auth.EnvironmentVariableCredentialsProvider; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.net.URISyntaxException; +import java.util.*; +import java.util.function.Function; + +import static org.junit.jupiter.api.Assertions.*; + +class ConnectionConfigTests { + + @Test + void testConnectionConfigBuilderDefaults() { + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder(); + + ConnectionProperty[] conProps = conConfigBuilder.getConnectionProperties(); + + // properties start out un-parsed + assertTrue(Arrays.stream(conProps).noneMatch(ConnectionProperty::isParsed)); + + // no exception with all properties set to default + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + // verify defaults + verifyDefaults(conConfig); + } + + @Test + void testConnectionConfigBuilderDefaultsWithEmptyProps() { + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setPropertyMap(new HashMap<>()); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + // verify defaults + verifyDefaults(conConfig); + } + + @Test + void testConnectionConfigHost() { + Map props = new HashMap<>(); + + // exception with invalid values + assertPropertyRejects(HostConnectionProperty.KEY, + 42, + true, + false); + + // valid value + assertPropertyAccepts(HostConnectionProperty.KEY, ConnectionConfig::getHost, + "hostvalue", + "host.1234567890+&-$-"); + } + + @Test + void testPortConfig() { + // exception with invalid values + assertPropertyRejects(PortConnectionProperty.KEY, + "invalidValue", + -1, + "-1", + 65536, + "65536"); + + // valid values + assertPropertyAccepts(PortConnectionProperty.KEY, ConnectionConfig::getPort, + 9400, + 65535); + + assertPropertyAcceptsParsedValue( + PortConnectionProperty.KEY, ConnectionConfig::getPort, "9400", 9400); + } + + @Test + void testFetchSizeConfig() { + // exception with invalid values + assertPropertyRejects(FetchSizeProperty.KEY, + "invalidValue", + -1, + "-1", + "3.14"); + + // valid values + assertPropertyAccepts(FetchSizeProperty.KEY, ConnectionConfig::getFetchSize, + 500, + 0); + + assertPropertyAcceptsParsedValue( + FetchSizeProperty.KEY, ConnectionConfig::getFetchSize, "25", 25); + } + + @Test + void testPathConfig() { + // exception with invalid values + assertPropertyRejects(PathConnectionProperty.KEY, 42, -1, true, false); + + // valid values + assertPropertyAccepts( + PathConnectionProperty.KEY, + ConnectionConfig::getPath, + "somepath", + "path/value", + "long/path/value/here/1234567890+&-$-" + ); + + // ignore a single trailing '/' in the user specified Path + assertPropertyAcceptsValue(PathConnectionProperty.KEY, ConnectionConfig::getPath, "/context/path/", "/context/path"); + } + + @Test + void testLogOutputConfig() { + // exception with invalid values + assertPropertyRejects(LogOutputConnectionProperty.KEY, 42); + + // valid values + assertPropertyAccepts( + LogOutputConnectionProperty.KEY, + ConnectionConfig::getLogOutput, + "AGeneralPlainString", + "some/path/value", + "long/nix/path/value/here/1234567890+&-$-.log", + "c:\\long\\windows-path\\here\\1234567890+&-$-.log" + ); + } + + @Test + void testLogLevelConfig() { + // exception with invalid values + assertPropertyRejects(LogLevelConnectionProperty.KEY, 42, "unknown", "true", true, false); + + // valid values + Arrays.stream(LogLevel.values()).forEach( + logLevel -> assertPropertyAcceptsValue( + LogLevelConnectionProperty.KEY, + ConnectionConfig::getLogLevel, + logLevel.name().toLowerCase(), logLevel)); + } + + @Test + void testLoginTimeoutConfig() { + // exception with invalid values + assertPropertyRejects(LoginTimeoutConnectionProperty.KEY, -1, "invalid", "9999999.5"); + + // valid values + assertPropertyAccepts(LoginTimeoutConnectionProperty.KEY, ConnectionConfig::getLoginTimeout, + 6000, 0, 9999999); + assertPropertyAcceptsParsedValue(LoginTimeoutConnectionProperty.KEY, ConnectionConfig::getLoginTimeout, + "0", 0); + assertPropertyAcceptsParsedValue(LoginTimeoutConnectionProperty.KEY, ConnectionConfig::getLoginTimeout, + "30", 30); + assertPropertyAcceptsParsedValue(LoginTimeoutConnectionProperty.KEY, ConnectionConfig::getLoginTimeout, + "6000", 6000); + assertPropertyAcceptsParsedValue(LoginTimeoutConnectionProperty.KEY, ConnectionConfig::getLoginTimeout, + "9999999", 9999999); + } + + @Test + void testUseSSLConfig() { + assertCommonBooleanPropertyTests(UseSSLConnectionProperty.KEY, ConnectionConfig::isUseSSL); + } + + @Test + void testRequestCompressionConfig() { + assertCommonBooleanPropertyTests(RequestCompressionConnectionProperty.KEY, ConnectionConfig::requestCompression); + } + + @Test + void testAuthConfig() { + // exception with invalid values + assertPropertyRejects(AuthConnectionProperty.KEY, 42, "unknown", "true", true, false); + } + + @Test + void testBasicAuthConfigMissingUsername() { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + props.put(AuthConnectionProperty.KEY, "basic"); + + builder.setPropertyMap(props); + ConnectionPropertyException ex = assertThrows(ConnectionPropertyException.class, builder::build); + assertEquals(AuthConnectionProperty.KEY, ex.getPropertyKey()); + assertTrue(ex.getMessage() != null && ex.getMessage().contains("requires a valid username")); + } + + @Test + void testBasicAuthConfigWithUsername() { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + props.put(AuthConnectionProperty.KEY, "basic"); + props.put(UserConnectionProperty.KEY, "user"); + + builder.setPropertyMap(props); + ConnectionConfig connectionConfig = assertDoesNotThrow(builder::build); + Assertions.assertEquals(AuthenticationType.BASIC, connectionConfig.getAuthenticationType()); + } + + @Test + void testDefaultAuthConfigWithUsername() { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + props.put(UserConnectionProperty.KEY, "some_user"); + + builder.setPropertyMap(props); + ConnectionConfig connectionConfig = assertDoesNotThrow(builder::build); + Assertions.assertEquals(AuthenticationType.BASIC, connectionConfig.getAuthenticationType()); + assertEquals("some_user", connectionConfig.getUser()); + } + + @Test + void testAwsSigV4AuthConfigWithRegion() { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + props.put(AuthConnectionProperty.KEY, "aws_sigv4"); + props.put(RegionConnectionProperty.KEY, "us-west-2"); + + builder.setPropertyMap(props); + ConnectionConfig connectionConfig = assertDoesNotThrow(builder::build); + Assertions.assertEquals(AuthenticationType.AWS_SIGV4, connectionConfig.getAuthenticationType()); + } + + @Test + void testAwsSigV4AuthConfigWithoutRegion() { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + props.put(AuthConnectionProperty.KEY, "aws_sigv4"); + + builder.setPropertyMap(props); + ConnectionPropertyException ex = assertThrows(ConnectionPropertyException.class, builder::build); + assertEquals(AuthConnectionProperty.KEY, ex.getPropertyKey()); + assertTrue(ex.getMessage() != null && ex.getMessage().contains("requires a region")); + } + + @Test + void testAwsSigV4AuthConfigWithDetectedRegion() { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + props.put(AuthConnectionProperty.KEY, "aws_sigv4"); + props.put(HostConnectionProperty.KEY, "some-hostname.us-west-1.es.amazonaws.com"); + + builder.setPropertyMap(props); + ConnectionConfig connectionConfig = assertDoesNotThrow(builder::build); + Assertions.assertEquals(AuthenticationType.AWS_SIGV4, connectionConfig.getAuthenticationType()); + assertEquals("us-west-1", connectionConfig.getRegion()); + } + + @Test + void testRegionConfig() { + Map props = new HashMap<>(); + + // exception with invalid values + assertPropertyRejects(RegionConnectionProperty.KEY, + 42, + true, + false); + + // valid values + assertPropertyAccepts(RegionConnectionProperty.KEY, ConnectionConfig::getRegion, + "region-value", + "us-gov-west-1", + "ap-southeast-2"); + } + + @Test + void testAwsCredentialsProviderConfig() { + assertPropertyRejectsValue(AwsCredentialsProviderProperty.KEY, "Invalid AWS Credentials Provider"); + + // The property accepts null and valid AWSCredentialProvider + assertPropertyAcceptsValue(AwsCredentialsProviderProperty.KEY, ConnectionConfig::getAwsCredentialsProvider, + null); + assertPropertyAcceptsValue(AwsCredentialsProviderProperty.KEY, ConnectionConfig::getAwsCredentialsProvider, + new EnvironmentVariableCredentialsProvider()); + } + + @Test + void testHostnameVerificationConfig() { + assertCommonBooleanPropertyTests(HostnameVerificationConnectionProperty.KEY, ConnectionConfig::hostnameVerification); + } + + @Test + void testConnectionConfigMultipleProps() { + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder(); + + Map props = new HashMap<>(); + + // exception when any property invalid + props.put(HostConnectionProperty.KEY, "es-host"); + props.put(PortConnectionProperty.KEY, "9300"); + props.put(UseSSLConnectionProperty.KEY, "True"); + props.put(PathConnectionProperty.KEY, "valid/path"); + props.put(LoginTimeoutConnectionProperty.KEY, -1); // invalid + conConfigBuilder.setPropertyMap(props); + ConnectionPropertyException ex = assertThrows(ConnectionPropertyException.class, conConfigBuilder::build); + assertEquals(LoginTimeoutConnectionProperty.KEY, ex.getPropertyKey()); + + props.put(HostConnectionProperty.KEY, "es-host"); + props.put(PortConnectionProperty.KEY, "9300"); + props.put(UseSSLConnectionProperty.KEY, "True"); + props.put(PathConnectionProperty.KEY, 100); // invalid + props.put(LoginTimeoutConnectionProperty.KEY, "60"); + conConfigBuilder.setPropertyMap(props); + ex = assertThrows(ConnectionPropertyException.class, conConfigBuilder::build); + assertEquals(PathConnectionProperty.KEY, ex.getPropertyKey()); + + props.put(HostConnectionProperty.KEY, "es-host"); + props.put(PortConnectionProperty.KEY, "9300"); + props.put(UseSSLConnectionProperty.KEY, 5); // invalid + props.put(PathConnectionProperty.KEY, "path/value"); + props.put(LoginTimeoutConnectionProperty.KEY, "60"); + conConfigBuilder.setPropertyMap(props); + ex = assertThrows(ConnectionPropertyException.class, conConfigBuilder::build); + assertEquals(UseSSLConnectionProperty.KEY, ex.getPropertyKey()); + + props.put(HostConnectionProperty.KEY, "es-host"); + props.put(PortConnectionProperty.KEY, -5); // invalid + props.put(UseSSLConnectionProperty.KEY, "true"); + props.put(PathConnectionProperty.KEY, "path/value"); + props.put(LoginTimeoutConnectionProperty.KEY, "60"); + conConfigBuilder.setPropertyMap(props); + ex = assertThrows(ConnectionPropertyException.class, conConfigBuilder::build); + assertEquals(PortConnectionProperty.KEY, ex.getPropertyKey()); + + props.put(HostConnectionProperty.KEY, new Object()); // invalid + props.put(PortConnectionProperty.KEY, "9300"); + props.put(UseSSLConnectionProperty.KEY, "true"); + props.put(PathConnectionProperty.KEY, "path/value"); + props.put(LoginTimeoutConnectionProperty.KEY, "60"); + conConfigBuilder.setPropertyMap(props); + ex = assertThrows(ConnectionPropertyException.class, conConfigBuilder::build); + assertEquals(HostConnectionProperty.KEY, ex.getPropertyKey()); + + // all valid + props.put(HostConnectionProperty.KEY, "es-hostname"); + props.put(PortConnectionProperty.KEY, "9400"); + props.put(UseSSLConnectionProperty.KEY, "true"); + props.put(PathConnectionProperty.KEY, "path/value/1"); + props.put(LoginTimeoutConnectionProperty.KEY, "90"); + conConfigBuilder.setPropertyMap(props); + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + assertEquals("es-hostname", conConfig.getHost()); + assertEquals(9400, conConfig.getPort()); + assertTrue(conConfig.isUseSSL()); + assertEquals("path/value/1", conConfig.getPath()); + assertEquals(90, conConfig.getLoginTimeout()); + } + + @Test + void testBuildWithProperties() { + Properties properties = new Properties(); + + properties.setProperty(HostConnectionProperty.KEY, "prop-host"); + properties.setProperty(LogOutputConnectionProperty.KEY, "prop-log-file"); + properties.setProperty(PathConnectionProperty.KEY, "prop-path"); + properties.setProperty(LoginTimeoutConnectionProperty.KEY, "3000"); + properties.setProperty(UseSSLConnectionProperty.KEY, "true"); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder().setProperties(properties); + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertEquals(conConfig.getHost(), "prop-host"); + assertEquals(conConfig.getLogOutput(), "prop-log-file"); + assertEquals(conConfig.getPath(), "prop-path"); + assertEquals(conConfig.getLoginTimeout(), 3000); + assertTrue(conConfig.isUseSSL()); + + // verify unset properties carry expected defaults + assertEquals(conConfig.getPort(), 443); // default with useSSL + assertFalse(conConfig.requestCompression()); + } + + @Test + void testBuildWithPropertiesWithDefaults() { + Properties defaults = new Properties(); + defaults.setProperty(PortConnectionProperty.KEY, "1080"); + defaults.setProperty(LogOutputConnectionProperty.KEY, "default-log-file"); + defaults.setProperty(LoginTimeoutConnectionProperty.KEY, "1000"); + defaults.setProperty(UseSSLConnectionProperty.KEY, "true"); + + Properties properties = new Properties(defaults); + properties.setProperty(HostConnectionProperty.KEY, "prop-host"); + properties.setProperty(LogOutputConnectionProperty.KEY, "prop-log-file"); + properties.setProperty(PathConnectionProperty.KEY, "prop-path"); + properties.setProperty(LoginTimeoutConnectionProperty.KEY, "3000"); + + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder().setProperties(properties); + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertEquals(conConfig.getHost(), "prop-host"); + assertEquals(conConfig.getPort(), 1080); // set from defaults + assertEquals(conConfig.getLogOutput(), "prop-log-file"); // default overridden + assertEquals(conConfig.getPath(), "prop-path"); // no default + assertEquals(conConfig.getLoginTimeout(), 3000); // default overridden + assertTrue(conConfig.isUseSSL()); // set from defaults + } + + @Test + void testBuildWithUrl() { + final String url = UrlParser.URL_PREFIX + "https://url-host/?logOutput=url-log-file&loginTimeout=2000"; + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.isUseSSL()); + assertEquals(conConfig.getHost(), "url-host"); + assertEquals(conConfig.getPath(), "/"); + assertEquals(conConfig.getLogOutput(), "url-log-file"); + assertEquals(conConfig.getLoginTimeout(), 2000); + + // verify unset properties carry expected defaults + assertEquals(conConfig.getPort(), 443); // default with useSSL + assertFalse(conConfig.requestCompression()); + } + + @Test + void testBuildWithInvalidUrl() { + final String url = "https://url-host/?logOutput=url-log-file&loginTimeout=2000"; + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url); + + ConnectionPropertyException ex = assertThrows(ConnectionPropertyException.class, conConfigBuilder::build); + assertNotNull(ex.getCause()); + assertEquals(URISyntaxException.class, ex.getCause().getClass()); + } + + @Test + void testBuildWithUrlAndProperties() { + final String url = UrlParser.URL_PREFIX + "https://url-host/?logOutput=url-log-file&loginTimeout=2000"; + Properties properties = new Properties(); + + properties.setProperty(HostConnectionProperty.KEY, "prop-host"); + properties.setProperty(PathConnectionProperty.KEY, "prop-path"); + properties.setProperty(LoginTimeoutConnectionProperty.KEY, "3000"); + properties.setProperty(UseSSLConnectionProperty.KEY, "false"); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url) + .setProperties(properties); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + // properties overridden by builder.setProperties( ) + assertFalse(conConfig.isUseSSL()); + assertEquals(conConfig.getHost(), "prop-host"); + assertEquals(conConfig.getPath(), "prop-path"); + assertEquals(conConfig.getLoginTimeout(), 3000); + + // properties from url + assertEquals(conConfig.getLogOutput(), "url-log-file"); + + // verify unset properties carry expected defaults + assertEquals(conConfig.getPort(), 9200); + } + + @Test + void testBuildWithUrlAndPropertiesWithDefaults() { + final String url = UrlParser.URL_PREFIX + "http://url-host/?logOutput=url-log-file&loginTimeout=2000&user=url-user"; + + Properties defaults = new Properties(); + defaults.setProperty(PortConnectionProperty.KEY, "1080"); + defaults.setProperty(LogOutputConnectionProperty.KEY, "default-log-file"); + defaults.setProperty(LoginTimeoutConnectionProperty.KEY, "1000"); + defaults.setProperty(UseSSLConnectionProperty.KEY, "true"); + + Properties properties = new Properties(defaults); + properties.setProperty(HostConnectionProperty.KEY, "prop-host"); + properties.setProperty(LogOutputConnectionProperty.KEY, "prop-log-file"); + properties.setProperty(PathConnectionProperty.KEY, "prop-path"); + properties.setProperty(LoginTimeoutConnectionProperty.KEY, "3000"); + + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url) + .setProperties(properties); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.isUseSSL()); // set from defaults + assertEquals(conConfig.getHost(), "prop-host"); + assertEquals(conConfig.getPort(), 1080); // set from defaults + assertEquals(conConfig.getLogOutput(), "prop-log-file"); // default overridden + assertEquals(conConfig.getPath(), "prop-path"); // no default + assertEquals(conConfig.getLoginTimeout(), 3000); // default overridden + + assertEquals(conConfig.getUser(), "url-user"); // from url + } + + @Test + void testBuildWithPropertyMap() { + Map propertyMap = new HashMap<>(); + + propertyMap.put(RequestCompressionConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setPropertyMap(propertyMap); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.requestCompression()); + assertEquals(conConfig.getHost(), "prop-host"); + assertEquals(conConfig.getPort(), 9200); // default + assertEquals(conConfig.getLogOutput(), "prop-log-file"); + assertEquals(conConfig.getPath(), "prop-path"); + assertEquals(conConfig.getLoginTimeout(), 3000); + } + + @Test + void testBuildWithUrlAndPropertyMap() { + final String url = UrlParser.URL_PREFIX + "http://url-host/?logOutput=url-log-file&loginTimeout=2000&user=url-user"; + + Map propertyMap = new HashMap<>(); + + propertyMap.put(RequestCompressionConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url) + .setPropertyMap(propertyMap); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.requestCompression()); + assertEquals("prop-host", conConfig.getHost()); + assertEquals(9200, conConfig.getPort()); // default + assertEquals("prop-log-file", conConfig.getLogOutput()); + assertEquals("prop-path", conConfig.getPath()); + assertEquals(3000, conConfig.getLoginTimeout()); + assertEquals(true, conConfig.requestCompression()); + + assertEquals("url-user", conConfig.getUser()); // from url + } + + @Test + void testBuildWithUrlAndOverrideMap() { + final String url = UrlParser.URL_PREFIX + "http://url-host/?logOutput=url-log-file&loginTimeout=2000&user=url-user"; + + Map propertyMap = new HashMap<>(); + + propertyMap.put(UseSSLConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url) + .overrideProperties(propertyMap); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.isUseSSL()); + assertEquals("prop-host", conConfig.getHost()); + assertEquals(443, conConfig.getPort()); // default with useSSL + assertEquals("prop-log-file", conConfig.getLogOutput()); + assertEquals("prop-path", conConfig.getPath()); + assertEquals(3000, conConfig.getLoginTimeout()); + + assertEquals("url-user", conConfig.getUser()); // from url + } + + @Test + void testBuildWithUrlAndPropertyMapAndOverrides() { + final String url = UrlParser.URL_PREFIX + "http://url-host/?logOutput=url-log-file" + + "&loginTimeout=2000&user=url-user&password=url-password"; + + Map propertyMap = new HashMap<>(); + + propertyMap.put(UseSSLConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + Map overrideMap = new HashMap<>(); + overrideMap.put(UseSSLConnectionProperty.KEY, false); + overrideMap.put(LoginTimeoutConnectionProperty.KEY, 5000); + overrideMap.put(UserConnectionProperty.KEY, "override-user"); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url) + .setPropertyMap(propertyMap) + .overrideProperties(overrideMap); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertFalse(conConfig.isUseSSL()); // override + assertEquals("prop-host", conConfig.getHost()); + assertEquals(9200, conConfig.getPort()); // default + assertEquals("prop-log-file", conConfig.getLogOutput()); + assertEquals("prop-path", conConfig.getPath()); + assertEquals(5000, conConfig.getLoginTimeout()); // override + + assertEquals("override-user", conConfig.getUser()); // override + assertEquals("url-password", conConfig.getPassword()); // url + } + + @Test + void testBuildWithUrlAndPropertyMapAndMultipleOverrides() { + final String url = UrlParser.URL_PREFIX + "http://url-host/?logOutput=url-log-file" + + "&loginTimeout=2000&user=url-user&password=url-password"; + + Map propertyMap = new HashMap<>(); + + propertyMap.put(UseSSLConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + Map overrideMap1 = new HashMap<>(); + overrideMap1.put(UseSSLConnectionProperty.KEY, false); + overrideMap1.put(LoginTimeoutConnectionProperty.KEY, 5000); + + Map overrideMap2 = new HashMap<>(); + overrideMap2.put(UseSSLConnectionProperty.KEY, true); + overrideMap2.put(UserConnectionProperty.KEY, "override-user"); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .setUrl(url) + .setPropertyMap(propertyMap) + .overrideProperties(overrideMap1) + .overrideProperties(overrideMap2); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.isUseSSL()); // override 2 + assertEquals("prop-host", conConfig.getHost()); + assertEquals(443, conConfig.getPort()); // default with useSSL + assertEquals("prop-log-file", conConfig.getLogOutput()); + assertEquals("prop-path", conConfig.getPath()); + assertEquals(5000, conConfig.getLoginTimeout()); // override 1 + + assertEquals("override-user", conConfig.getUser()); // override 2 + assertEquals("url-password", conConfig.getPassword()); // url + } + + @ParameterizedTest + @CsvSource(value = { + "0, 1, 2", "0, 2, 1", + "1, 0, 2", "1, 2, 0", + "2, 0, 1", "2, 1, 0" + }) + void testBuildWithUrlAndPropertyMapAndOverridesCombinations(int f, int s, int t) { + // Verify that order of invoking ConnectionConfig.Builder methods + // does not change the effective behavior + final String url = UrlParser.URL_PREFIX + "http://url-host/?logOutput=url-log-file" + + "&loginTimeout=2000&user=url-user&password=url-password"; + + Map propertyMap = new HashMap<>(); + + propertyMap.put(UseSSLConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + Map overrideMap = new HashMap<>(); + overrideMap.put(UseSSLConnectionProperty.KEY, false); + overrideMap.put(LoginTimeoutConnectionProperty.KEY, 5000); + overrideMap.put(UserConnectionProperty.KEY, "override-user"); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder(); + Integer[] order = new Integer[]{f, s, t}; + + for (int m : order) { + switch (m) { + case 0: + conConfigBuilder.setUrl(url); + break; + case 1: + conConfigBuilder.setPropertyMap(propertyMap); + break; + case 2: + conConfigBuilder.overrideProperties(overrideMap); + break; + } + } + + String message = "order: " + Arrays.toString(order); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build, message); + + assertFalse(conConfig.isUseSSL(), message); // override + assertEquals("prop-host", conConfig.getHost(), message); + assertEquals(9200, conConfig.getPort(), message); // default + assertEquals("prop-log-file", conConfig.getLogOutput(), message); + assertEquals("prop-path", conConfig.getPath(), message); + assertEquals(5000, conConfig.getLoginTimeout(), message); // override + + assertEquals("override-user", conConfig.getUser(), message); // override + assertEquals("url-password", conConfig.getPassword(), message); // url + } + + @Test + void testBuildWithOverrideMap() { + Map propertyMap = new HashMap<>(); + + propertyMap.put(UseSSLConnectionProperty.KEY, true); + propertyMap.put(HostConnectionProperty.KEY, "prop-host"); + propertyMap.put(LogOutputConnectionProperty.KEY, "prop-log-file"); + propertyMap.put(PathConnectionProperty.KEY, "prop-path"); + propertyMap.put(LoginTimeoutConnectionProperty.KEY, 3000); + + ConnectionConfig.Builder conConfigBuilder = ConnectionConfig.builder() + .overrideProperties(propertyMap); + + ConnectionConfig conConfig = assertDoesNotThrow(conConfigBuilder::build); + + assertTrue(conConfig.isUseSSL()); + assertEquals("prop-host", conConfig.getHost()); + assertEquals(443, conConfig.getPort()); // default with useSSL + assertEquals("prop-log-file", conConfig.getLogOutput()); + assertEquals("prop-path", conConfig.getPath()); + assertEquals(3000, conConfig.getLoginTimeout()); + } + + /** + * Common assertions that should pass with any Boolean property + * + * @param propertyKey property key + * @param propertyGetter getter function to retrieve the parsed value + * of the property + */ + private void assertCommonBooleanPropertyTests(String propertyKey, Function propertyGetter) { + // exception with invalid values + assertPropertyRejects(propertyKey, -1, 0, 100.5); + + // valid values + assertPropertyAccepts(propertyKey, propertyGetter, true, false); + + assertPropertyAcceptsParsedValue(propertyKey, propertyGetter, "true", true); + assertPropertyAcceptsParsedValue(propertyKey, propertyGetter, "false", false); + assertPropertyAcceptsParsedValue(propertyKey, propertyGetter, "any", false); + } + + /** + * Helper to assert a {@link ConnectionConfig} accepts specified + * objects as values for a property and the actual value of the + * property is set exactly same as the specified objects. + * + * @param key key associated with property + * @param propertyGetter getter function to retrieve the parsed value of the property + * @param values value objects to set the property value with + */ + private void assertPropertyAccepts( + final String key, Function propertyGetter, final Object... values) { + Arrays.stream(values).forEach((value) -> assertPropertyAcceptsValue(key, propertyGetter, value)); + } + + /** + * Helper to assert {@link ConnectionConfig} rejects specified + * objects as values for a property. + * + * @param key key associated with property + * @param values value objects to set the property value with + */ + private void assertPropertyRejects(final String key, final Object... values) { + Arrays.stream(values).forEach((value) -> assertPropertyRejectsValue(key, value)); + } + + /** + * Helper to assert {@link ConnectionConfig} accepts the specified + * object as a value for a property. + * + * @param key + * @param propertyGetter + * @param specifiedValue + */ + private void assertPropertyAcceptsValue( + final String key, Function propertyGetter, final Object specifiedValue) { + assertPropertyAcceptsValue(key, propertyGetter, specifiedValue, specifiedValue); + } + + /** + * Helper to assert {@link ConnectionConfig} accepts a specified + * object as a value and that the resulting value retrieved from + * ConnectionConfig matches the expected value. + * + * @param key Property key to set + * @param propertyGetter Function to retrieve property value from ConnectionConfig + * @param specifiedValue Property value to specify when building ConnectionConfig + * @param expectedValue Expected value returned from ConnectionConfig + */ + private void assertPropertyAcceptsValue( + final String key, Function propertyGetter, + final Object specifiedValue, final Object expectedValue) { + ConnectionConfig conConfig = assertConnectionConfigIsBuilt(key, specifiedValue); + assertEquals(expectedValue, propertyGetter.apply(conConfig)); + } + + /** + * Helper to assert {@link ConnectionConfig} accepts a specified + * object as a value for a property and that the actual value set + * on the property matches a specific value. + * + * @param key key associated with property + * @param propertyGetter getter function to retrieve the parsed value of the property + * @param specifiedValue value object to set the property with + * @param parsedValue expected parsed (actual) value of the property + */ + private void assertPropertyAcceptsParsedValue( + final String key, Function propertyGetter, + final Object specifiedValue, final Object parsedValue) { + ConnectionConfig conConfig = assertConnectionConfigIsBuilt(key, specifiedValue); + assertEquals(parsedValue, propertyGetter.apply(conConfig)); + } + + /** + * Helper to assert a {@link ConnectionConfig} can be built successfully + * when a specific property is assigned a certain value. + * + * @param key key associated with a property + * @param specifiedValue value to assign the property + * + * @return {@link ConnectionConfig} object built with specified property + */ + private ConnectionConfig assertConnectionConfigIsBuilt(final String key, final Object specifiedValue) { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + // exception with invalid values + props.put(key, specifiedValue); + builder.setPropertyMap(props); + return assertDoesNotThrow(builder::build); + } + + /** + * Helper to assert that building a {@link ConnectionConfig} fails with + * an exception if the specified property is assigned a certain value. + * + * @param key key associated with the property + * @param value value to assign to the property + */ + private void assertPropertyRejectsValue(final String key, final Object value) { + ConnectionConfig.Builder builder = ConnectionConfig.builder(); + Map props = new HashMap<>(); + + // exception with invalid value + props.put(key, value); + builder.setPropertyMap(props); + ConnectionPropertyException ex = assertThrows(ConnectionPropertyException.class, builder::build); + assertEquals(key, ex.getPropertyKey()); + } + + /** + * Verifies property values in a {@link ConnectionConfig} instance match + * their expected defaults. + * + * @param connectionConfig {@link ConnectionConfig} instance to inspect + */ + private void verifyDefaults(ConnectionConfig connectionConfig) { + // verify defaults + assertEquals(9200, connectionConfig.getPort()); + assertEquals("", connectionConfig.getPath()); + assertEquals(0, connectionConfig.getFetchSize()); + assertEquals("localhost", connectionConfig.getHost()); + assertEquals(0, connectionConfig.getLoginTimeout()); + assertFalse(connectionConfig.isUseSSL()); + assertFalse(connectionConfig.requestCompression()); + assertEquals(AuthenticationType.NONE, connectionConfig.getAuthenticationType()); + assertNull(connectionConfig.getRegion()); + assertEquals(LogLevel.OFF, connectionConfig.getLogLevel()); + assertTrue(connectionConfig.hostnameVerification()); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/AwsHostnameUtilTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/AwsHostnameUtilTests.java new file mode 100644 index 0000000000..535d1d474e --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/AwsHostnameUtilTests.java @@ -0,0 +1,64 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + + +public class AwsHostnameUtilTests { + + /** + * Test region name extracted from input hostname is as expected + * when the input hostname is a known url format. + * + * @param hostname hostname to parse + * @param expectedRegion expected region value + */ + @ParameterizedTest + @CsvSource({ + "search-domain-name.us-east-1.es.amazonaws.com, us-east-1", + "search-domain-name.us-gov-west-1.es.amazonaws.com, us-gov-west-1", + "search-domain-name.ap-southeast-2.es.a9.com, ap-southeast-2", + "search-domain-name.sub-domain.us-west-2.es.amazonaws.com, us-west-2", + "search-us-east-1.us-west-2.es.amazonaws.com, us-west-2", + }) + void testNonNullRegionsFromAwsHostnames(String hostname, String expectedRegion) { + assertEquals(expectedRegion, AwsHostNameUtil.parseRegion(hostname)); + } + + /** + * Verify that a region value is not extracted from an input hostname + * + * @param hostname hostname to parse + */ + @ParameterizedTest + @ValueSource(strings = { + "search-domain-name.us-east-1.es.amazonaws.co", + "search-domain-name.us-gov-west-1.es.amazonaws", + "search-domain-name.ap-southeast-2.es.com", + }) + void testNullRegions(String hostname) { + String region = AwsHostNameUtil.parseRegion(hostname); + assertNull(region, () -> hostname + " returned non-null region: " + region); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/SqlParserTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/SqlParserTests.java new file mode 100644 index 0000000000..edaa39e400 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/SqlParserTests.java @@ -0,0 +1,107 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.*; + +class SqlParserTests { + + @ParameterizedTest + @MethodSource("pameterizedValidSqlProvider") + void testQueryParamCount(String sql, int expectedCount) { + int paramCount = SqlParser.countParameterMarkers(sql); + assertEquals(expectedCount, paramCount, + () -> String.format("[%s] returned %d parameters. Expected %d.", sql, paramCount, expectedCount)); + } + + + private static Stream pameterizedValidSqlProvider() { + return Stream.of( + Arguments.of("select X from table", 0), + Arguments.of("select X from table where Y='?'", 0), + + // single line comments + Arguments.of("select X from table -- where Y=?", 0), + Arguments.of("select X from table where Y='?'--", 0), + Arguments.of("select X from table where Y='?'--?", 0), + Arguments.of("select X from table where Y='?'--some comment ?", 0), + Arguments.of("select X from table where Y='?'--some comment ?", 0), + Arguments.of("select X from table where Y='?'--some comment", 0), + + // multi and single line comments + Arguments.of("select X,Y /* ? \n ? */ from table where Y='?'--some comment ?", 0), + Arguments.of("select X,Y /* ? \r\n \n? */ from table where Y='?'--some comment ?", 0), + Arguments.of("select X,Y /* ? \n ? */ from table where Y='?'--some comment ?", 0), + Arguments.of("select X,Y /* ? ? */ from table where Y='?'--some comment ?", 0), + + // double quotes + Arguments.of("select X,Y from table where Y=\"?\"--some comment ?", 0), + + // escaped single quotes + Arguments.of("select X,Y from table where Y='''?'--some comment ?", 0), + + // 1 param marker + Arguments.of("select X from table where Y='?'--some comment \n and Z=?", 1), + Arguments.of("select X from table where Y='?'--some comment \r\n and Z=?", 1), + Arguments.of("select X from table where Y='?'--some comment \r\n and Z=? /* \n */ -- and P=?", 1), + + // 2 param markers + Arguments.of("select X from table where Y='?'--some comment \r\n and Z=? /* \n */ and P=?", 2), + + // Many param markers + Arguments.of("select X from table where A=? and B=? and C=? and D=? and (E=? or F=?) ", 6), + Arguments.of("select X from table where A=? \n --- \n and B=? /* ? */ and C=? and \n D=? and (E=? or F=?) ", 6) + + ); + } + + @ParameterizedTest + @ValueSource(strings = { + "select X from table /*", + "select X,Y /* ? \n ? from table where Y='?'--some comment ?", + "select X,Y /*unterminated-comment ? \n ? from table where A='?' and B='unterminated-literal --some comment \n and c=?" + }) + void testUnterminatedCommentQueries(String sql) { + + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> SqlParser.countParameterMarkers(sql), + () -> String.format("[%s] did not throw an exception. Expected unterminated comment exception.", sql)); + + assertNotNull(ex.getMessage()); + assertTrue(ex.getMessage().contains("unterminated comment")); + } + + @ParameterizedTest + @ValueSource(strings = { + "select X from table where A='unterminated and B=?", + "select X,Y from T where A in (?) \n and B=? and C='unterminated --some comment \n and D=?" + }) + void testUnterminatedStringQueries(String sql) { + + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> SqlParser.countParameterMarkers(sql), + () -> String.format("[%s] did not throw an exception. Expected unterminated string exception.", sql)); + + assertNotNull(ex.getMessage()); + assertTrue(ex.getMessage().contains("unterminated string")); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/UrlParserTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/UrlParserTests.java new file mode 100644 index 0000000000..1580c154bd --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/internal/util/UrlParserTests.java @@ -0,0 +1,189 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.internal.util; + +import static org.junit.jupiter.api.Assertions.*; + +import com.amazon.opendistroforelasticsearch.jdbc.config.HostConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PasswordConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PathConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PortConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.UseSSLConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.UserConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.test.KeyValuePairs; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.net.URISyntaxException; +import java.util.Properties; + +class UrlParserTests { + + @ParameterizedTest + @ValueSource(strings = { + "jdbc:elasticsearch://host:9200", + "jdbc:elasticsearch://host:9200/path", + "jdbc:elasticsearch://host:9200/path/", + "jdbc:elasticsearch://host:9200/path?option=value", + "jdbc:elasticsearch://host:9200/path?option=value&option2=value2", + "jdbc:elasticsearch://host/path", + "jdbc:elasticsearch://host/path/", + "jdbc:elasticsearch://host/path?option=value&option2=value2", + }) + void testIsAcceptable(String url) { + assertTrue(UrlParser.isAcceptable(url), () -> url + " was not accepted"); + } + + @ParameterizedTest + @ValueSource(strings = { + "jdbc:elasticsearch:/", + "elasticsearch://host:9200/path", + "jdbc:elasticsearch:", + "jdbc:elasticsearch", + "jdbc://host:9200/" + }) + void testIsNotAcceptable(String url) { + assertFalse(UrlParser.isAcceptable(url), () -> url + " was accepted"); + } + + @Test + void testNullNotAcceptable() { + assertFalse(UrlParser.isAcceptable(null), "null was accepted"); + } + + @Test + void testPropertiesFromURL() throws URISyntaxException { + + propertiesFromUrl("jdbc:elasticsearch://") + .match(); // empty properties + + propertiesFromUrl("jdbc:elasticsearch://https://localhost:9200/") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "localhost"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "9200"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "true"), + KeyValuePairs.skvp(PathConnectionProperty.KEY, "/")); + + propertiesFromUrl("jdbc:elasticsearch://localhost:9200") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "localhost"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "9200"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "false")); + + propertiesFromUrl("jdbc:elasticsearch://es-domain-name.sub.hostname.com:1080") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "es-domain-name.sub.hostname.com"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "1080"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "false")); + + propertiesFromUrl("jdbc:elasticsearch://es-domain-name.sub.hostname.com:1090/") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "es-domain-name.sub.hostname.com"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "1090"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "false"), + KeyValuePairs.skvp(PathConnectionProperty.KEY, "/")); + + } + + @Test + public void testPropertiesFromLongUrl() { + propertiesFromUrl( + "jdbc:elasticsearch://search-elasticsearch-es23-dedm-za-1-edmwao5g64rlo3hcohapy2jpru.us-east-1.es.a9.com") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, + "search-elasticsearch-es23-dedm-za-1-edmwao5g64rlo3hcohapy2jpru.us-east-1.es.a9.com"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "false")); + } + + @Test + public void testPropertiesFromUrlInvalidPrefix() { + String url = "jdbc:unknown://https://localhost:9200/"; + + URISyntaxException ex = assertThrows(URISyntaxException.class, () -> UrlParser.parseProperties(url)); + assertTrue(ex.getMessage().contains(UrlParser.URL_PREFIX)); + } + + @Test + public void testPropertiesFromUrlInvalidScheme() { + String url = "jdbc:elasticsearch://tcp://domain-name.sub-domain.com:9023"; + + URISyntaxException ex = assertThrows(URISyntaxException.class, () -> UrlParser.parseProperties(url)); + assertTrue(ex.getMessage().contains("Invalid scheme:tcp")); + } + + @Test + public void testPropertiesFromUrlHttpsScheme() { + String url = "jdbc:elasticsearch://https://domain-name.sub-domain.com:9023"; + + propertiesFromUrl("jdbc:elasticsearch://https://domain-name.sub-domain.com:9023") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "domain-name.sub-domain.com"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "9023"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "true")); + } + + @Test + public void testPropertiesFromUrlHttpsSchemeAndPath() { + propertiesFromUrl("jdbc:elasticsearch://https://domain-name.sub-domain.com:9023/context/path") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "domain-name.sub-domain.com"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "9023"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "true"), + KeyValuePairs.skvp(PathConnectionProperty.KEY, "/context/path")); + } + + @Test + public void testPropertiesFromUrlAndQueryString() { + propertiesFromUrl("jdbc:elasticsearch://https://domain-name.sub-domain.com:9023/context/path?" + + "user=username123&password=pass@$!w0rd") + .match( + KeyValuePairs.skvp(HostConnectionProperty.KEY, "domain-name.sub-domain.com"), + KeyValuePairs.skvp(PortConnectionProperty.KEY, "9023"), + KeyValuePairs.skvp(UseSSLConnectionProperty.KEY, "true"), + KeyValuePairs.skvp(PathConnectionProperty.KEY, "/context/path"), + KeyValuePairs.skvp(UserConnectionProperty.KEY, "username123"), + KeyValuePairs.skvp(PasswordConnectionProperty.KEY, "pass@$!w0rd")); + } + + @Test + public void testPropertiesFromUrlWithInvalidQueryString() { + final String url = "jdbc:elasticsearch://https://domain-name.sub-domain.com:9023/context/path?prop=value=3"; + + URISyntaxException ex = assertThrows(URISyntaxException.class, () -> UrlParser.parseProperties(url)); + assertTrue(ex.getMessage().contains("Expected key=value pairs")); + } + + private ConnectionPropertyMatcher propertiesFromUrl(String url) { + Properties props = Assertions.assertDoesNotThrow(() -> UrlParser.parseProperties(url), + () -> "Exception occurred when parsing URL: " + url); + return new ConnectionPropertyMatcher(props); + } + + private class ConnectionPropertyMatcher { + Properties properties; + + public ConnectionPropertyMatcher(Properties props) { + this.properties = props; + } + + public void match(KeyValuePairs.StringKvp... keyValuePairs) { + assertEquals(KeyValuePairs.toProperties(keyValuePairs), properties); + } + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JsonHttpProtocolTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JsonHttpProtocolTests.java new file mode 100644 index 0000000000..88a5de36e7 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/JsonHttpProtocolTests.java @@ -0,0 +1,398 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.protocol; + +import com.amazon.opendistroforelasticsearch.jdbc.config.ConnectionConfig; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.MalformedResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.exceptions.ResponseException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.HttpException; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocol; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocolFactory; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonQueryRequest; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonQueryResponse; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockCloseableHttpResponseBuilder; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockHttpTransport; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.QueryMock; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; +import org.apache.http.Header; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.*; + + +public class JsonHttpProtocolTests { + + @Test + void testConnect() throws IOException { + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(200) + .withResponseBody(MockES.INSTANCE.getConnectionResponse()) + .build(); + + HttpTransport mockTransport = mock(HttpTransport.class); + + ArgumentCaptor captor = ArgumentCaptor.forClass(Header[].class); + when(mockTransport.doGet(eq("/"), captor.capture(), isNull(), anyInt())) + .thenReturn(mockResponse); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + ConnectionResponse response = assertDoesNotThrow(() -> protocol.connect(0)); + + verify(mockTransport, times(1)).doGet(eq("/"), captor.capture(), isNull(), anyInt()); + + assertNotNull(captor.getAllValues(), "No headers captured in request"); + Header[] headers = captor.getAllValues().get(0); + + assertNotNull(headers, "No headers found in request"); + + boolean expectedHeadersPresent = Arrays.stream(headers).anyMatch( + (header) -> "Accept".equalsIgnoreCase(header.getName()) && + "application/json".equals(header.getValue())); + + assertTrue(expectedHeadersPresent, "Expected headers not found in request. Headers received: " + + Arrays.toString(headers)); + + assertNotNull(response.getClusterMetadata()); + assertEquals("c1", response.getClusterMetadata().getClusterName()); + assertEquals("JpZSfOJiSLOntGp0zljpVQ", response.getClusterMetadata().getClusterUUID()); + assertNotNull(response.getClusterMetadata().getVersion()); + assertEquals("6.3.1", response.getClusterMetadata().getVersion().getFullVersion()); + assertEquals(6, response.getClusterMetadata().getVersion().getMajor()); + assertEquals(3, response.getClusterMetadata().getVersion().getMinor()); + assertEquals(1, response.getClusterMetadata().getVersion().getRevision()); + } + + + @Test + void testConnectError() throws IOException { + HttpTransport mockTransport = mock(HttpTransport.class); + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(404) + .build(); + + MockHttpTransport.setupConnectionResponse(mockTransport, mockResponse); + + JsonHttpProtocol protocol = new JsonHttpProtocol(mockTransport); + + HttpException ex = assertThrows(HttpException.class, () -> protocol.connect(0)); + assertEquals(404, ex.getStatusCode()); + } + + + @Test + void testConnectForbiddenError() throws IOException, TransportException { + HttpTransport mockTransport = mock(HttpTransport.class); + String responseBody = " {\"Message\":\"User: arn:aws:iam::1010001001000:user/UserId " + + "is not authorized to perform: es:ESHttpGet\"}"; + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(403) + .withContentType("application/json") + .withResponseBody(responseBody) + .build(); + + MockHttpTransport.setupConnectionResponse(mockTransport, mockResponse); + + JsonHttpProtocol protocol = new JsonHttpProtocol(mockTransport); + + HttpException httpException = assertThrows(HttpException.class, () -> protocol.connect(0)); + assertEquals(403, httpException.getStatusCode()); + assertNotNull(httpException.getLocalizedMessage(), "HttpException message is null"); + assertTrue(httpException.getLocalizedMessage().contains(responseBody), + "HttpException message does not contain response received"); + } + + @Test + void testConnectMalformedResponse() throws IOException { + HttpTransport mockTransport = mock(HttpTransport.class); + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(200) + .withResponseBody("") + .build(); + + MockHttpTransport.setupConnectionResponse(mockTransport, mockResponse); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + + assertThrows(MalformedResponseException.class, () -> protocol.connect(0)); + } + + @Test + void testQueryResponseNycTaxis() throws IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryMock(); + + HttpTransport mockTransport = mock(HttpTransport.class); + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(200) + .withResponseBody(queryMock.getResponseBody()) + .build(); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + + MockHttpTransport.setupQueryResponse(protocol, mockTransport, mockResponse); + + QueryResponse response = assertDoesNotThrow(() -> protocol.execute(buildJsonQueryRequest(queryMock))); + + Assertions.assertEquals( + buildJsonQueryResponse( + toSchema( + schemaEntry("pickup_datetime", "date"), + schemaEntry("trip_type", "keyword"), + schemaEntry("passenger_count", "integer"), + schemaEntry("fare_amount", "scaled_float"), + schemaEntry("extra", "scaled_float"), + schemaEntry("vendor_id", "keyword") + ), + toDatarows( + toDatarow("2015-01-01 00:34:42", "1", 1, 5, 0.5, "2"), + toDatarow("2015-01-01 00:34:46", "1", 1, 12, 0.5, "2"), + toDatarow("2015-01-01 00:34:44", "1", 1, 5, 0.5, "1"), + toDatarow("2015-01-01 00:34:48", "1", 1, 5, 0.5, "2"), + toDatarow("2015-01-01 00:34:53", "1", 1, 24.5, 0.5, "2") + ), + 5, 1000, 200), + response); + } + + @Test + void testQueryResponseWithAliasesNycTaxis() throws IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryWithAliasMock(); + + HttpTransport mockTransport = mock(HttpTransport.class); + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(200) + .withResponseBody(queryMock.getResponseBody()) + .build(); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + + MockHttpTransport.setupQueryResponse(protocol, mockTransport, mockResponse); + + QueryResponse response = assertDoesNotThrow(() -> protocol.execute(buildJsonQueryRequest(queryMock))); + + Assertions.assertEquals( + buildJsonQueryResponse( + toSchema( + schemaEntry("pickup_datetime", "date", "pdt"), + schemaEntry("trip_type", "keyword"), + schemaEntry("passenger_count", "integer", "pc"), + schemaEntry("fare_amount", "scaled_float"), + schemaEntry("extra", "scaled_float"), + schemaEntry("vendor_id", "keyword") + ), + toDatarows( + toDatarow("2015-01-01 00:34:42", "1", 1, 5, 0.5, "2"), + toDatarow("2015-01-01 00:34:46", "1", 1, 12, 0.5, "2"), + toDatarow("2015-01-01 00:34:44", "1", 1, 5, 0.5, "1"), + toDatarow("2015-01-01 00:34:48", "1", 1, 5, 0.5, "2"), + toDatarow("2015-01-01 00:34:53", "1", 1, 24.5, 0.5, "2") + ), + 5, 1000, 200), + response); + } + + @Test + void testQueryResponseSoNested() throws IOException { + QueryMock queryMock = new QueryMock.SoNestedQueryMock(); + + HttpTransport mockTransport = mock(HttpTransport.class); + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(200) + .withResponseBody(queryMock.getResponseBody()) + .build(); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + + MockHttpTransport.setupQueryResponse(protocol, mockTransport, mockResponse); + + QueryResponse response = assertDoesNotThrow(() -> protocol.execute(buildJsonQueryRequest(queryMock))); + + Assertions.assertEquals( + buildJsonQueryResponse( + toSchema( + schemaEntry("user", "keyword"), + schemaEntry("title", "text"), + schemaEntry("qid", "keyword"), + schemaEntry("creationDate", "date") + ), + toDatarows( + toDatarow("Jash", + "Display Progress Bar at the Time of Processing", + "1000000", + "2009-06-16T07:28:42.770"), + toDatarow("Michael Ecklund (804104)", + "PHP Sort array by field?", + "10000005", + "2012-04-03T19:25:46.213"), + toDatarow("farley (1311218)", + "Arrays in PHP seems to drop elements", + "10000007", + "2012-04-03T19:26:05.400"), + toDatarow("John Strickler (292614)", + "RESTful servlet URLs - servlet-mapping in web.xml", + "10000008", + "2012-04-03T19:26:09.137"), + toDatarow("rahulm (123536)", + "Descriptor conversion problem", + "1000001", + "2009-06-16T07:28:52.333") + ), + 5, 20000, 200), + response); + } + + @Test + void testQueryResponseInternalServerError() throws IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryInternalErrorMock(); + + HttpTransport mockTransport = mock(HttpTransport.class); + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(200) + .withResponseBody(queryMock.getResponseBody()) + .build(); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + + MockHttpTransport.setupQueryResponse(protocol, mockTransport, mockResponse); + + QueryResponse response = assertDoesNotThrow(() -> protocol.execute(buildJsonQueryRequest(queryMock))); + + JsonQueryResponse.JsonRequestError error = new JsonQueryResponse.JsonRequestError(); + error.setReason("error reason"); + error.setType("java.lang.NullPointerException"); + error.setDetails( + "java.lang.NullPointerException\n\t" + + "at org.elasticsearch.plugin.nlpcn.Schema.getTypeFromMetaData(Schema.java:156)\n\t" + + "at org.elasticsearch.plugin.nlpcn.Schema.populateColumns(Schema.java:146)\n\t" + + "at java.base/java.lang.Thread.run(Thread.java:844)\n" + ); + + assertEquals(buildJsonQueryResponse(null, null, 0, 0, 500, error), response); + } + + @Test + void testQueryResponseSqlPluginPossiblyMissing() throws IOException { + QueryMock queryMock = new QueryMock.NycTaxisQueryInternalErrorMock(); + + HttpTransport mockTransport = mock(HttpTransport.class); + + String responseBody = "{\"error\":\"Incorrect HTTP method for uri [/_sql?format=jdbc] " + + "and method [POST], allowed: [PUT, DELETE, GET, HEAD]\",\"status\":405}"; + + CloseableHttpResponse mockResponse = new MockCloseableHttpResponseBuilder() + .withHttpReturnCode(405) + .withContentType("application/json; charset=UTF-8") + .withResponseBody(responseBody) + .build(); + + JsonHttpProtocol protocol = JsonHttpProtocolFactory.INSTANCE.getProtocol( + mock(ConnectionConfig.class), mockTransport); + + MockHttpTransport.setupQueryResponse(protocol, mockTransport, mockResponse); + + ResponseException responseException = assertThrows(ResponseException.class, + () -> protocol.execute(buildJsonQueryRequest(queryMock))); + + assertNotNull(responseException.getMessage()); + assertTrue(responseException.getMessage().contains("Make sure the SQL plugin is installed")); + + Throwable cause = responseException.getCause(); + assertNotNull(cause, "Expected ResponseException cause to be non-null "); + assertTrue(responseException.getCause() instanceof HttpException, () -> "ResponseException cause expected to " + + "be of type " + HttpException.class + " but was: " + responseException.getCause().getClass()); + + HttpException httpException = (HttpException) cause; + assertEquals(405, httpException.getStatusCode()); + assertNotNull(httpException.getLocalizedMessage(), "HttpException message is null"); + assertTrue(httpException.getLocalizedMessage().contains(responseBody), + "HttpException message does not contain response received"); + + } + + + private JsonQueryRequest buildJsonQueryRequest(QueryMock queryMock) { + return buildJsonQueryRequest(queryMock.getSql()); + } + + private JsonQueryRequest buildJsonQueryRequest(String sql) { + return new JsonQueryRequest(new JdbcQueryRequest(sql)); + } + + private JsonQueryResponse buildJsonQueryResponse( + List schema, List> datarows, + int size, int total, int status) { + return buildJsonQueryResponse(schema, datarows, size, total, status, null); + } + + private JsonQueryResponse buildJsonQueryResponse( + List schema, List> datarows, + int size, int total, int status, JsonQueryResponse.JsonRequestError error) { + JsonQueryResponse response = new JsonQueryResponse(); + + response.setSchema(schema); + response.setDatarows(datarows); + response.setSize(size); + response.setTotal(total); + response.setStatus(status); + response.setError(error); + + return response; + } + + private static List toSchema(JsonQueryResponse.SchemaEntry... schemaEntries) { + return Arrays.asList(schemaEntries); + } + + private static JsonQueryResponse.SchemaEntry schemaEntry(String name, String type) { + return schemaEntry(name, type, null); + } + + private static JsonQueryResponse.SchemaEntry schemaEntry(String name, String type, String label) { + return new JsonQueryResponse.SchemaEntry(name, type, label); + } + + private static List> toDatarows(List... values) { + return Arrays.asList(values); + } + + private static List toDatarow(Object... values) { + return Arrays.asList(values); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorQueryRequestTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorQueryRequestTests.java new file mode 100644 index 0000000000..a42605aa8d --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/protocol/http/JsonCursorQueryRequestTests.java @@ -0,0 +1,25 @@ +package com.amazon.opendistroforelasticsearch.jdbc.protocol.http; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.JdbcQueryRequest; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; + +public class JsonCursorQueryRequestTests { + + @Test + public void testCursorRequestBody() { + JdbcQueryRequest jdbcQueryRequest = new JdbcQueryRequest("abcde12345"); + JsonCursorQueryRequest jsonCursorQueryRequest = new JsonCursorQueryRequest(jdbcQueryRequest); + ObjectMapper mapper = new ObjectMapper(); + String expectedRequestBody = "{\"cursor\":\"abcde12345\"}"; + String actual = assertDoesNotThrow(() -> mapper.writeValueAsString(jsonCursorQueryRequest)); + assertEquals(expectedRequestBody, actual); + + assertEquals(0, jsonCursorQueryRequest.getFetchSize()); + assertEquals(null, jsonCursorQueryRequest.getParameters()); + + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/KeyValuePairs.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/KeyValuePairs.java new file mode 100644 index 0000000000..1da7d0d9c6 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/KeyValuePairs.java @@ -0,0 +1,58 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.function.Supplier; +import java.util.stream.Collector; +import java.util.stream.Collectors; + +/** + * A Factory class for building key-value pair objects + */ +public class KeyValuePairs { + + public static StringKvp skvp(String key, String value) { + return new StringKvp(key, value); + } + + /** + * Models a key-value pair where both key and value are Strings + */ + public static class StringKvp extends AbstractMap.SimpleImmutableEntry { + + public StringKvp(String key, String value) { + super(key, value); + } + } + + public static Properties toProperties(final StringKvp... kvps) { + Properties props = new Properties(); + Arrays.stream(kvps).forEach(kvp -> props.setProperty(kvp.getKey(), kvp.getValue())); + return props; + } + + public static Map toMap(final StringKvp... kvps) { + return Arrays.stream(kvps).collect(Collectors.toMap(StringKvp::getKey, StringKvp::getValue)); + } +} + + diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/PerClassWireMockServerExtension.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/PerClassWireMockServerExtension.java new file mode 100644 index 0000000000..9715773930 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/PerClassWireMockServerExtension.java @@ -0,0 +1,109 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.common.ConsoleNotifier; +import org.junit.jupiter.api.extension.*; + +import java.lang.reflect.Field; + +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.options; + + +/** + * JUnit extension to inject a WireMockServer instance into a + * {@link WireMockServer} parameter for a Test. + *

+ * Use this extension to reuse a single {@link WireMockServer} + * instance across all Tests in a class. + *

+ * Since the tests operate on a shared mock server instance, + * thread safety should be considered if any of the tests + * are expected to be run in parallel. + *

+ * The extension ensures: + *

+ *

  • + * Before any tests run, a mock server is started.. + *
  • + *
  • + * Each Test declaring a {@link WireMockServer} parameter receives + * the mock server instance in the parameter. + *
  • + *
  • + * After each test, all request Stub mappings are reset - this + * ensures request mappings created in one test never leak into + * a subsequent test. + *
  • + *
  • + * After all tests, the mock server is stopped. + *
  • + *

    + */ +public class PerClassWireMockServerExtension implements BeforeAllCallback, AfterAllCallback, + AfterEachCallback, ParameterResolver { + + private WireMockServer mockServer; + + @Override + public void beforeAll(ExtensionContext context) throws Exception { + createAndStartMockServer(); + } + + @Override + public void afterEach(ExtensionContext context) throws Exception { + mockServer.resetToDefaultMappings(); + } + + @Override + public void afterAll(ExtensionContext context) throws Exception { + cleanupMockServer(context); + } + + private WireMockServer createAndStartMockServer() { + System.out.println("Creating mock server"); + mockServer = new WireMockServer(options() + .dynamicPort() + .notifier(new ConsoleNotifier(true) + )); + + mockServer.start(); + return mockServer; + } + + private void cleanupMockServer(ExtensionContext context) { + if (mockServer != null) { + System.out.println("Cleaning up mock server"); + mockServer.stop(); + mockServer = null; + } + } + + @Override + public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext) + throws ParameterResolutionException { + return parameterContext.getParameter().getType() == WireMockServer.class; + } + + @Override + public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext) + throws ParameterResolutionException { + return mockServer; + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/PerTestWireMockServerExtension.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/PerTestWireMockServerExtension.java new file mode 100644 index 0000000000..d32812954b --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/PerTestWireMockServerExtension.java @@ -0,0 +1,94 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.common.ConsoleNotifier; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.ParameterContext; +import org.junit.jupiter.api.extension.ParameterResolutionException; +import org.junit.jupiter.api.extension.ParameterResolver; + +import java.lang.reflect.Field; + +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.options; + + +/** + * JUnit extension to inject a WireMockServer instance into a + * {@link WireMockServer} parameter for a Test. + * + * Use this extension to create a new {@link WireMockServer} + * instance for each Test in a class. + * + * The extension ensures: + *

    + *

  • + * Before each test, mock server is started and made available to the Test as a parameter. + * Note: if a test does not declare a {@link WireMockServer} parameter, no mock server + * instance is created. + *
  • + *
  • + * After the test execution, the mock server is stopped. + *
  • + *

    + * + * + */ +public class PerTestWireMockServerExtension implements AfterEachCallback, ParameterResolver { + + private WireMockServer mockServer; + + @Override + public void afterEach(ExtensionContext context) throws Exception { + cleanupMockServer(context); + } + + private WireMockServer createAndStartMockServer() { + System.out.println("Creating mock server"); + mockServer = new WireMockServer(options() + .dynamicPort() + .notifier(new ConsoleNotifier(true) + )); + + mockServer.start(); + return mockServer; + } + + private void cleanupMockServer(ExtensionContext context) { + if (mockServer != null) { + System.out.println("Cleaning up mock server"); + mockServer.stop(); + mockServer = null; + } + } + + @Override + public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext) + throws ParameterResolutionException { + return parameterContext.getParameter().getType() == WireMockServer.class; + } + + @Override + public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext) + throws ParameterResolutionException { + return createAndStartMockServer(); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/TLSServer.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/TLSServer.java new file mode 100644 index 0000000000..f1eb46bccf --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/TLSServer.java @@ -0,0 +1,165 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.UrlParser; +import com.amazon.opendistroforelasticsearch.jdbc.test.mocks.MockES; +import org.eclipse.jetty.server.ConnectionFactory; +import org.eclipse.jetty.server.Connector; +import org.eclipse.jetty.server.Handler; +import org.eclipse.jetty.server.HttpConfiguration; +import org.eclipse.jetty.server.NetworkTrafficServerConnector; +import org.eclipse.jetty.server.Request; +import org.eclipse.jetty.server.SecureRequestCustomizer; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.handler.AbstractHandler; +import org.eclipse.jetty.util.ssl.SslContextFactory; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; + +public class TLSServer { + + public static final String TRUST_SERVER_JKS_RESOURCE = "mock/jks/truststore_with_server_cert.jks"; + public static final String TRUST_CLIENT_JKS_RESOURCE = "mock/jks/truststore_with_client_cert.jks"; + + public static final String SERVER_KEY_JKS_RESOURCE = "mock/jks/keystore_with_server_key.jks"; + public static final String SERVER_KEY_JKS_RESOURCE_NON_LOCALHOST = "mock/jks/keystore_with_non_localhost_server_key.jks"; + public static final String CLIENT_KEY_JKS_RESOURCE = "mock/jks/keystore_with_client_key.jks"; + + public static Server startSecureServer( + String host, + String keyStorePath, + String keyStorePassword, + String keyStoreType, + Handler handler) throws Exception { + + return startSecureServer( + host, + keyStorePath, + keyStorePassword, + keyStoreType, + null, + null, + null, + false, + handler + ); + } + + public static Server startSecureServer( + String host, + String keyStorePath, + String keyStorePassword, + String keyStoreType, + String trustStorePath, + String trustStorePassword, + String trustStoreType, + boolean needClientAuth, + Handler handler) throws Exception { + Server jettyServer = new Server(); + jettyServer.setStopTimeout(0); + + ServerConnector httpsConnector = null; + + // setup ssl + SslContextFactory sslContextFactory = new SslContextFactory(); + sslContextFactory.setKeyStorePath(keyStorePath); + sslContextFactory.setKeyStorePassword(keyStorePassword); + sslContextFactory.setKeyStoreType(keyStoreType); + + if (trustStorePath != null) { + sslContextFactory.setTrustStorePath(trustStorePath); + sslContextFactory.setTrustStorePassword(trustStorePassword); + sslContextFactory.setTrustStoreType(trustStoreType); + } + sslContextFactory.setNeedClientAuth(needClientAuth); + + HttpConfiguration httpConfig = new HttpConfiguration(); + httpConfig.addCustomizer(new SecureRequestCustomizer()); + + httpsConnector = createServerConnector( + jettyServer, + host, + 0, + new org.eclipse.jetty.server.SslConnectionFactory( + sslContextFactory, + "http/1.1" + ), + new org.eclipse.jetty.server.HttpConnectionFactory(httpConfig) + ); + + jettyServer.addConnector(httpsConnector); + jettyServer.setHandler(handler); + jettyServer.start(); + + return jettyServer; + } + + public static class MockESConnectionHandler extends AbstractHandler { + @Override + public void handle( + String target, + Request baseRequest, + HttpServletRequest request, + HttpServletResponse response) throws IOException, ServletException { + response.setContentType("application/json"); + response.setStatus(200); + baseRequest.setHandled(true); + response.getWriter().write(MockES.INSTANCE.getConnectionResponse()); + } + } + + private static ServerConnector createServerConnector( + Server jettyServer, + String bindAddress, + int port, + ConnectionFactory... connectionFactories) { + NetworkTrafficServerConnector connector = new NetworkTrafficServerConnector( + jettyServer, + null, + null, + null, + 2, + 2, + connectionFactories + ); + connector.setPort(port); + connector.setStopTimeout(0); + connector.getSelectorManager().setStopTimeout(0); + connector.setHost(bindAddress); + + return connector; + } + + public static String getBaseURLForConnect(Server jettyServer) { + int port = -1; + String host = null; + + for (Connector c : jettyServer.getConnectors()) { + if (c instanceof ServerConnector) { + port = ((ServerConnector) c).getLocalPort(); + host = ((ServerConnector) c).getHost(); + } + } + + return UrlParser.URL_PREFIX + "https://" + host + ":" + port; + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/TestResources.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/TestResources.java new file mode 100644 index 0000000000..1b52c854e0 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/TestResources.java @@ -0,0 +1,67 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; + +public class TestResources { + + public static String readResourceAsString(String resourcePath) throws IOException { + InputStream is = getResourceAsStream(resourcePath); + + StringBuilder sb = new StringBuilder(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(is))) { + String line; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + } + return sb.toString(); + } + + public static InputStream getResourceAsStream(String resourcePath) throws IOException { + InputStream is = TestResources.class.getClassLoader().getResourceAsStream(resourcePath); + + if (is == null) { + throw new TestResourcesException("Resource with path: " + resourcePath + " not found!"); + } + + return is; + } + + public static void copyResourceToPath(String resourcePath, Path path) throws IOException { + InputStream is = getResourceAsStream(resourcePath); + + Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); + } + + public static class TestResourcesException extends RuntimeException { + + public TestResourcesException() { + } + + public TestResourcesException(String message) { + super(message); + } + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/UTCTimeZoneTestExtension.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/UTCTimeZoneTestExtension.java new file mode 100644 index 0000000000..5f0a07a4d5 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/UTCTimeZoneTestExtension.java @@ -0,0 +1,42 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +import java.util.TimeZone; + +public class UTCTimeZoneTestExtension implements BeforeEachCallback, AfterEachCallback { + + TimeZone jvmDefaultTimeZone; + + @Override + public void afterEach(ExtensionContext context) throws Exception { + // restore JVM default timezone + TimeZone.setDefault(jvmDefaultTimeZone); + } + + @Override + public void beforeEach(ExtensionContext context) throws Exception { + jvmDefaultTimeZone = TimeZone.getDefault(); + + // test case inputs assume default TZ is UTC + TimeZone.setDefault(TimeZone.getTimeZone("UTC")); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/WireMockServerHelpers.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/WireMockServerHelpers.java new file mode 100644 index 0000000000..b3fc606076 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/WireMockServerHelpers.java @@ -0,0 +1,79 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test; + +import com.amazon.opendistroforelasticsearch.jdbc.config.HostConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.config.PortConnectionProperty; +import com.amazon.opendistroforelasticsearch.jdbc.internal.util.UrlParser; +import com.github.tomakehurst.wiremock.WireMockServer; + +import java.util.Properties; + +/** + * Adds {@link WireMockServer} related utility methods for + * to Tests. + *

    + * Useful for Tests that use WireMockServer extensions. + */ +public interface WireMockServerHelpers { + + /** + * Returns a Driver compatible JDBC connection URL that points to + * the {@link WireMockServer} instance specified on a specified + * context path. + * + * @param mockServer {@link WireMockServer} instance + * @param contextPath context path to place in the URL + * + * @return connection URL String + */ + default String getURLForMockServerWithContext(WireMockServer mockServer, String contextPath) { + return getBaseURLForMockServer(mockServer) + contextPath; + } + + /** + * Returns a Driver compatible JDBC connection URL that points to + * the {@link WireMockServer} instance specified. + * + * @param mockServer {@link WireMockServer} instance + * + * @return connection URL String + */ + default String getBaseURLForMockServer(WireMockServer mockServer) { + // Change this in case 'localhost' is not ok to use in + // all environments + return UrlParser.URL_PREFIX + "localhost:" + mockServer.port(); + } + + /** + * Returns a {@link Properties} object populated with connection + * properties needed to establish a connection to the + * {@link WireMockServer} instance specified. + * + * @param mockServer {@link WireMockServer} instance + * + * @return Properties object + */ + default Properties getConnectionPropertiesForMockServer(WireMockServer mockServer) { + Properties properties = new Properties(); + + properties.setProperty(HostConnectionProperty.KEY, "localhost"); + properties.setProperty(PortConnectionProperty.KEY, String.valueOf(mockServer.port())); + + return properties; + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockCloseableHttpResponseBuilder.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockCloseableHttpResponseBuilder.java new file mode 100644 index 0000000000..61ba63325c --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockCloseableHttpResponseBuilder.java @@ -0,0 +1,70 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.message.BasicHeader; + +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MockCloseableHttpResponseBuilder { + + private int httpCode; + private String responseBody; + private Header contentTypeHeader; + + public MockCloseableHttpResponseBuilder withHttpReturnCode(int httpCode) { + this.httpCode = httpCode; + return this; + } + + public MockCloseableHttpResponseBuilder withResponseBody(String responseBody) { + this.responseBody = responseBody; + return this; + } + + public MockCloseableHttpResponseBuilder withContentType(String contentType) { + this.contentTypeHeader = new BasicHeader("content-type", contentType); + return this; + } + + public CloseableHttpResponse build() throws IOException { + StatusLine mockStatusLine = mock(StatusLine.class); + HttpEntity mockEntity = mock(HttpEntity.class); + + CloseableHttpResponse mockResponse = mock(CloseableHttpResponse.class); + when(mockResponse.getStatusLine()).thenReturn(mockStatusLine); + when(mockStatusLine.getStatusCode()).thenReturn(httpCode); + when(mockResponse.getEntity()).thenReturn(mockEntity); + when(mockEntity.getContentType()).thenReturn(contentTypeHeader); + + // this mimics a real stream that can be consumed just once + // as is the case with a server response. This makes this mock + // response object single-use with regards to reading the + // response content. + when(mockEntity.getContent()).thenReturn(responseBody == null ? null + : new ByteArrayInputStream(responseBody.getBytes("UTF-8"))); + return mockResponse; + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockES.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockES.java new file mode 100644 index 0000000000..4750c65e07 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockES.java @@ -0,0 +1,67 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchConnection; + +import java.sql.SQLException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Utility class for obtaining mocked ES responses for tests. + */ +public class MockES { + // can be turned into a mock that can serve ES version specific + // responses + public static final MockES INSTANCE = new MockES(); + + private MockES() { + + } + + public String getConnectionResponse() { + return "{\n" + + " \"name\" : \"NniGzjJ\",\n" + + " \"cluster_name\" : \"c1\",\n" + + " \"cluster_uuid\" : \"JpZSfOJiSLOntGp0zljpVQ\",\n" + + " \"version\" : {\n" + + " \"number\" : \"6.3.1\",\n" + + " \"build_flavor\" : \"default\",\n" + + " \"build_type\" : \"zip\",\n" + + " \"build_hash\" : \"4736258\",\n" + + " \"build_date\" : \"2018-10-11T03:50:25.929309Z\",\n" + + " \"build_snapshot\" : true,\n" + + " \"lucene_version\" : \"7.3.1\",\n" + + " \"minimum_wire_compatibility_version\" : \"5.6.0\",\n" + + " \"minimum_index_compatibility_version\" : \"5.0.0\"\n" + + " },\n" + + " \"tagline\" : \"You Know, for Search\"\n" + + "}"; + } + + public void assertMockESConnectionResponse(ElasticsearchConnection esCon) throws SQLException { + assertEquals("c1", esCon.getClusterName()); + assertEquals("JpZSfOJiSLOntGp0zljpVQ", esCon.getClusterUUID()); + + assertNotNull(esCon.getMetaData().getDatabaseProductVersion()); + assertEquals("6.3.1", esCon.getMetaData().getDatabaseProductVersion()); + assertEquals(6, esCon.getMetaData().getDatabaseMajorVersion()); + assertEquals(3, esCon.getMetaData().getDatabaseMinorVersion()); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockHttpTransport.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockHttpTransport.java new file mode 100644 index 0000000000..386d4be717 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockHttpTransport.java @@ -0,0 +1,44 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocol; +import com.amazon.opendistroforelasticsearch.jdbc.transport.TransportException; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpParam; +import com.amazon.opendistroforelasticsearch.jdbc.transport.http.HttpTransport; +import org.apache.http.Header; +import org.apache.http.client.methods.CloseableHttpResponse; + +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.when; + +public class MockHttpTransport { + + public static void setupConnectionResponse(HttpTransport mockTransport, CloseableHttpResponse mockResponse) + throws TransportException { + when(mockTransport.doGet(eq("/"), any(Header[].class), any(), anyInt())) + .thenReturn(mockResponse); + } + + public static void setupQueryResponse(JsonHttpProtocol protocol, + HttpTransport mockTransport, CloseableHttpResponse mockResponse) + throws TransportException { + when(mockTransport.doPost( + eq(protocol.getSqlContextPath()), any(Header[].class), any(HttpParam[].class), anyString(), anyInt())) + .thenReturn(mockResponse); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSet.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSet.java new file mode 100644 index 0000000000..61d48f9569 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSet.java @@ -0,0 +1,30 @@ +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import java.sql.ResultSet; +import java.sql.SQLException; + +public class MockResultSet { + + MockResultSetMetaData mockResultSetMetaData; + MockResultSetRows mockResultSetRows; + + public MockResultSet(MockResultSetMetaData mockResultSetMetaData, MockResultSetRows mockResultSetRows) { + if (mockResultSetMetaData == null || mockResultSetRows == null) { + throw new IllegalArgumentException("Neither metadata nor rows can be null"); + } + + if (!mockResultSetRows.isEmpty() && mockResultSetMetaData.getColumnCount() != mockResultSetRows.getColumnCount()) { + throw new IllegalArgumentException( + "Column count mismatch. MetaData has " + mockResultSetMetaData.getColumnCount() + + " columns, but rows have " + mockResultSetRows.getColumnCount()); + } + + this.mockResultSetMetaData = mockResultSetMetaData; + this.mockResultSetRows = mockResultSetRows; + } + + public void assertMatches(ResultSet rs) throws SQLException { + mockResultSetMetaData.assertMatches(rs.getMetaData()); + mockResultSetRows.assertMatches(rs); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSetMetaData.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSetMetaData.java new file mode 100644 index 0000000000..1be141cfdb --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSetMetaData.java @@ -0,0 +1,351 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import com.amazon.opendistroforelasticsearch.jdbc.internal.JdbcWrapper; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class MockResultSetMetaData implements ResultSetMetaData, JdbcWrapper { + + private List mockColumnRsmds; + + public MockResultSetMetaData(List mockColumnRsmds) { + this.mockColumnRsmds = Collections.unmodifiableList(mockColumnRsmds); + } + + public static Builder builder() { + return new Builder(); + } + @Override + public int getColumnCount() { + return mockColumnRsmds.size(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isAutoIncrement; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isCaseSensitive; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isSearchable; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isCurrency; + } + + @Override + public int isNullable(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isNullable; + } + + @Override + public boolean isSigned(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isSigned; + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).columnDisplaySize; + } + + @Override + public String getColumnLabel(int column) throws SQLException { + String label = mockColumnRsmds.get(column - 1).columnLabel; + + if (label == null) { + // expected behavior per JDBC spec + label = getColumnName(column); + } + return label; + } + + @Override + public String getColumnName(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).columnName; + } + + @Override + public String getSchemaName(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).schemaName; + } + + @Override + public int getPrecision(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).precision; + } + + @Override + public int getScale(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).scale; + } + + @Override + public String getTableName(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).tableName; + } + + @Override + public String getCatalogName(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).catalogName; + } + + @Override + public int getColumnType(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).columnType; + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).columnTypeName; + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isReadOnly; + } + + @Override + public boolean isWritable(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isWritable; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).isDefinitelyWritable; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + return mockColumnRsmds.get(column - 1).columnClassName; + } + + public static class Builder { + + private MockColumnRsmd currentColumnRsmd; + + private List columnRsmds = new ArrayList<>(); + + private Builder() { + + } + + Builder column() { + MockColumnRsmd columnRsmd = new MockColumnRsmd(); + columnRsmds.add(columnRsmd); + currentColumnRsmd = columnRsmd; + return this; + } + + public Builder column(String columnName) { + column(columnName, ElasticsearchType.TEXT); + return this; + } + + public Builder column(String columnName, ElasticsearchType columnType) { + column(); + setColumnName(columnName); + setColumnESType(columnType); + return this; + } + + public MockResultSetMetaData build() { + return new MockResultSetMetaData(columnRsmds); + } + + + public Builder setAutoIncrement(boolean autoIncrement) { + currentColumnRsmd.isAutoIncrement = autoIncrement; + return this; + } + + public Builder setCaseSensitive(boolean caseSensitive) { + currentColumnRsmd.isCaseSensitive = caseSensitive; + return this; + } + + public Builder setSearchable(boolean searchable) { + currentColumnRsmd.isSearchable = searchable; + return this; + } + + public Builder setIsNullable(int isNullable) { + currentColumnRsmd.isNullable = isNullable; + return this; + } + + public Builder setCurrency(boolean currency) { + currentColumnRsmd.isCurrency = currency; + return this; + } + + public Builder setColumnTypeName(String columnTypeName) { + currentColumnRsmd.columnTypeName = columnTypeName; + return this; + } + + public Builder setSigned(boolean signed) { + currentColumnRsmd.isSigned = signed; + return this; + } + + public Builder setColumnDisplaySize(int columnDisplaySize) { + currentColumnRsmd.columnDisplaySize = columnDisplaySize; + return this; + } + + public Builder setColumnLabel(String columnLabel) { + currentColumnRsmd.columnLabel = columnLabel; + return this; + } + + public Builder setColumnName(String columnName) { + currentColumnRsmd.columnName = columnName; + return this; + } + + public Builder setColumnESType(final ElasticsearchType esType) { + setColumnType(esType.getJdbcType().getVendorTypeNumber()); + setPrecision(esType.getPrecision()); + setColumnDisplaySize(esType.getDisplaySize()); + setColumnClassName(esType.getJavaClassName()); + setColumnTypeName(esType.getJdbcType().getName()); + setSigned(esType.isSigned()); + return this; + } + + public Builder setSchemaName(String schemaName) { + currentColumnRsmd.schemaName = schemaName; + return this; + } + + public Builder setPrecision(int precision) { + currentColumnRsmd.precision = precision; + return this; + } + + public Builder setScale(int scale) { + currentColumnRsmd.scale = scale; + return this; + } + + public Builder setTableName(String tableName) { + currentColumnRsmd.tableName = tableName; + return this; + } + + public Builder setCatalogName(String catalogName) { + currentColumnRsmd.catalogName = catalogName; + return this; + } + + public Builder setColumnType(int columnType) { + currentColumnRsmd.columnType = columnType; + return this; + } + + public Builder setReadOnly(boolean readOnly) { + currentColumnRsmd.isReadOnly = readOnly; + return this; + } + + public Builder setWritable(boolean writable) { + currentColumnRsmd.isWritable = writable; + return this; + } + + public Builder setDefinitelyWritable(boolean definitelyWritable) { + currentColumnRsmd.isDefinitelyWritable = definitelyWritable; + return this; + } + + public Builder setColumnClassName(String columnClassName) { + currentColumnRsmd.columnClassName = columnClassName; + return this; + } + + } + + public void assertMatches(ResultSetMetaData other) throws SQLException { + assertEquals(this.getColumnCount(), other.getColumnCount(), "column count"); + + for (int i = 1; i <= this.getColumnCount(); i++) { + assertEquals(this.getCatalogName(i), other.getCatalogName(i), "column "+i+" catalog name"); + assertEquals(this.getColumnClassName(i), other.getColumnClassName(i), "column "+i+" column class name"); + assertEquals(this.getColumnDisplaySize(i), other.getColumnDisplaySize(i), "column "+i+" column disp size"); + assertEquals(this.getColumnName(i), other.getColumnName(i), "column "+i+" column name"); + assertEquals(this.getColumnLabel(i), other.getColumnLabel(i), "column "+i+" column label"); + assertEquals(this.getColumnType(i), other.getColumnType(i), "column "+i+" column type"); + assertEquals(this.isAutoIncrement(i), other.isAutoIncrement(i), "column "+i+" auto increment"); + assertEquals(this.isCaseSensitive(i), other.isCaseSensitive(i), "column "+i+" case sensitive"); + assertEquals(this.isSearchable(i), other.isSearchable(i), "column "+i+" searchable"); + assertEquals(this.isNullable(i), other.isNullable(i), "column "+i+" nullable"); + assertEquals(this.isCurrency(i), other.isCurrency(i), "column "+i+" is currency"); + assertEquals(this.getColumnTypeName(i), other.getColumnTypeName(i), "column "+i+" column type name"); + assertEquals(this.isSigned(i), other.isSigned(i), "column "+i+" signed"); + assertEquals(this.getSchemaName(i), other.getSchemaName(i), "column "+i+" schema name"); + assertEquals(this.getPrecision(i), other.getPrecision(i), "column "+i+" precision"); + assertEquals(this.getScale(i), other.getScale(i), "column "+i+" scale"); + assertEquals(this.getTableName(i), other.getTableName(i), "column "+i+" table name"); + assertEquals(this.isReadOnly(i), other.isReadOnly(i), "column "+i+" read only"); + assertEquals(this.isWritable(i), other.isWritable(i), "column "+i+" writable"); + assertEquals(this.isDefinitelyWritable(i), other.isDefinitelyWritable(i), "column "+i+" definitely writable"); + } + } + + private static class MockColumnRsmd { + + // initialized to defaults used in the implementation + private boolean isAutoIncrement = false; + private boolean isCaseSensitive = true; + private boolean isSearchable = true; + private int isNullable = columnNullableUnknown; + private boolean isCurrency = false; + private String columnTypeName; + private boolean isSigned = false; + private int columnDisplaySize; + private String columnLabel; + private String columnName; + private String schemaName = ""; + private int precision; + private int scale = 0; + private String tableName = ""; + private String catalogName = ""; + private int columnType; + private boolean isReadOnly = true; + private boolean isWritable = false; + private boolean isDefinitelyWritable = false; + private String columnClassName; + + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSetRows.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSetRows.java new file mode 100644 index 0000000000..5506464c0a --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/MockResultSetRows.java @@ -0,0 +1,135 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +public class MockResultSetRows { + List> rows; + + private MockResultSetRows(List> rows) { + this.rows = rows; + } + + public static Builder builder() { + return new Builder(); + } + + public boolean isEmpty() { + return rows.isEmpty(); + } + + public int getColumnCount() { + return isEmpty() ? -1 : rows.get(0).size(); + } + + public void assertMatches(ResultSet rs) throws SQLException { + int rowNum = 0; + + for (List columnData : rows) { + rowNum++; + rs.next(); + int i = 0; + for (ColumnData data : columnData) { + Object resultSetValue = rs.getObject(++i); + assertEquals(data.getValue(), resultSetValue, "Row [" + rowNum + "], column [" + i + "] value mismatch"); + assertEquals(data.isNull(), rs.wasNull(), + "Row [" + rowNum + "], column [" + i + "] expected to be null: " + data.isNull() + + " but was: " + rs.wasNull()); + } + } + + assertFalse(rs.next(), () -> "ResultSet has more rows than expected. Expected: " + rows.size() + " rows."); + } + + public static class Builder { + List> rows = new ArrayList<>(); + + private ArrayList currentRow; + + private int rowSize = -1; + + private Builder() { + + } + + public Builder row() { + if (rows.size() > 0) { + if (rowSize == -1) { + rowSize = currentRow.size(); + } else { + validateRowSizes(); + } + } + currentRow = new ArrayList<>(); + rows.add(currentRow); + return this; + } + + public Builder column(Object value) { + return column(value, false); + } + + public Builder column(Object value, boolean isNull) { + currentRow.add(new ColumnData(value, isNull)); + return this; + } + + public MockResultSetRows build() { + if (rows.size() > 1) + validateRowSizes(); + return new MockResultSetRows(rows); + } + + private void validateRowSizes() { + if (rowSize != currentRow.size()) { + throw new IllegalArgumentException( + "Expect the row to have " + rowSize + " elements, but only " + currentRow.size() + " were added."); + + } + } + } + + + public static class ColumnData { + private Object value; + private boolean isNull; + + public ColumnData(Object value, boolean isNull) { + this.value = value; + this.isNull = isNull; + } + + public Object getValue() { + return value; + } + + public boolean isNull() { + return isNull; + } + } + + public static MockResultSetRows emptyResultSetRows() { + return MockResultSetRows.builder().build(); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/QueryMock.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/QueryMock.java new file mode 100644 index 0000000000..6d28c0a030 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/test/mocks/QueryMock.java @@ -0,0 +1,243 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.test.mocks; + +import com.amazon.opendistroforelasticsearch.jdbc.ElasticsearchConnection; +import com.amazon.opendistroforelasticsearch.jdbc.protocol.http.JsonHttpProtocol; +import com.amazon.opendistroforelasticsearch.jdbc.test.TestResources; +import com.amazon.opendistroforelasticsearch.jdbc.types.ElasticsearchType; +import com.github.tomakehurst.wiremock.WireMockServer; + +import java.io.IOException; +import java.sql.SQLException; +import java.sql.Timestamp; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; + +public abstract class QueryMock { + + public abstract String getSql(); + + public abstract String getResponseResourcePath(); + + public MockResultSet getMockResultSet() { + // overridden in QueryMocks that intend to vend + // a MockResultSet + return null; + } + + public void setupMockServerStub(final WireMockServer mockServer) + throws java.io.IOException { + setupMockServerStub(mockServer, "/", JsonHttpProtocol.DEFAULT_SQL_CONTEXT_PATH+"?format=jdbc"); + } + + public void setupMockServerStub(final WireMockServer mockServer, final String connectionUrl, final String queryUrl) + throws java.io.IOException { + + setupStubForConnect(mockServer, connectionUrl); + + // query response stub + mockServer.stubFor(post(urlEqualTo(queryUrl)) + .withHeader("Accept", equalTo("application/json")) + .withHeader("Content-Type", equalTo("application/json")) + .withRequestBody(matchingJsonPath("$.query", equalTo(getSql()))) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(getResponseBody()))); + } + + protected void setupStubForConnect(final WireMockServer mockServer, final String contextPath) { + // get Connection stub + mockServer.stubFor(get(urlEqualTo(contextPath)) + .withHeader("Accept", equalTo("application/json")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(MockES.INSTANCE.getConnectionResponse()))); + } + + public String getResponseBody() throws IOException { + return TestResources.readResourceAsString(getResponseResourcePath()); + } + + public void assertConnectionResponse(ElasticsearchConnection esConnection) throws SQLException { + MockES.INSTANCE.assertMockESConnectionResponse(esConnection); + } + + public static class NycTaxisQueryMock extends QueryMock { + @Override + public String getSql() { + return "select pickup_datetime, trip_type, passenger_count, " + + "fare_amount, extra, vendor_id from nyc_taxis LIMIT 5"; + } + + @Override + public String getResponseResourcePath() { + return "mock/protocol/json/queryresponse_nyctaxis.json"; + } + + @Override + public MockResultSet getMockResultSet() { + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("pickup_datetime", ElasticsearchType.DATE) + .column("trip_type", ElasticsearchType.KEYWORD) + .column("passenger_count", ElasticsearchType.INTEGER) + .column("fare_amount", ElasticsearchType.SCALED_FLOAT) + .column("extra", ElasticsearchType.SCALED_FLOAT) + .column("vendor_id", ElasticsearchType.KEYWORD) + .build(); + + MockResultSetRows mockResultSetRows = MockResultSetRows.builder() + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:42")) + .column("1") + .column(1) + .column(5D) + .column(0.5D) + .column("2") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:46")) + .column("1") + .column(1) + .column(12D) + .column(0.5D) + .column("2") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:44")) + .column("1") + .column(1) + .column(5D) + .column(0.5D) + .column("1") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:48")) + .column("1") + .column(1) + .column(5D) + .column(0.5D) + .column("2") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:53")) + .column("1") + .column(1) + .column(24.5D) + .column(0.5D) + .column("2") + .build(); + + return new MockResultSet(mockResultSetMetaData, mockResultSetRows); + } + } + + public static class NycTaxisQueryWithAliasMock extends QueryMock { + @Override + public String getSql() { + return "select pickup_datetime as pdt, trip_type, passenger_count as pc, " + + "fare_amount, extra, vendor_id from nyc_taxis LIMIT 5"; + } + + @Override + public String getResponseResourcePath() { + return "mock/protocol/json/queryresponse_with_alias_nyctaxis.json"; + } + + @Override + public MockResultSet getMockResultSet() { + MockResultSetMetaData mockResultSetMetaData = MockResultSetMetaData.builder() + .column("pickup_datetime", ElasticsearchType.DATE) + .setColumnLabel("pdt") + .column("trip_type", ElasticsearchType.KEYWORD) + .column("passenger_count", ElasticsearchType.INTEGER) + .setColumnLabel("pc") + .column("fare_amount", ElasticsearchType.SCALED_FLOAT) + .column("extra", ElasticsearchType.SCALED_FLOAT) + .column("vendor_id", ElasticsearchType.KEYWORD) + .build(); + + MockResultSetRows mockResultSetRows = MockResultSetRows.builder() + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:42")) + .column("1") + .column(1) + .column(5D) + .column(0.5D) + .column("2") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:46")) + .column("1") + .column(1) + .column(12D) + .column(0.5D) + .column("2") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:44")) + .column("1") + .column(1) + .column(5D) + .column(0.5D) + .column("1") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:48")) + .column("1") + .column(1) + .column(5D) + .column(0.5D) + .column("2") + .row() + .column(Timestamp.valueOf("2015-01-01 00:34:53")) + .column("1") + .column(1) + .column(24.5D) + .column(0.5D) + .column("2") + .build(); + + return new MockResultSet(mockResultSetMetaData, mockResultSetRows); + } + } + + public static class SoNestedQueryMock extends QueryMock { + @Override + public String getSql() { + return "select user, title, qid, creation_date from sonested LIMIT 5"; + } + + @Override + public String getResponseResourcePath() { + return "mock/protocol/json/queryresponse_sonested.json"; + } + } + + public static class NycTaxisQueryInternalErrorMock extends NycTaxisQueryMock { + + @Override + public String getResponseResourcePath() { + return "mock/protocol/json/queryresponse_internal_server_error.json"; + } + } + + public static class NullableFieldsQueryMock extends QueryMock { + @Override + public String getSql() { + return "select * from nullablefields"; + } + + @Override + public String getResponseResourcePath() { + return "mock/protocol/json/queryresponse_nullablefields.json"; + } + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/auth/aws/AWSRequestSigningApacheInterceptorTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/auth/aws/AWSRequestSigningApacheInterceptorTests.java new file mode 100644 index 0000000000..99181b2739 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/transport/http/auth/aws/AWSRequestSigningApacheInterceptorTests.java @@ -0,0 +1,113 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.transport.http.auth.aws; + +import com.amazonaws.SignableRequest; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.AnonymousAWSCredentials; +import com.amazonaws.auth.Signer; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHttpEntityEnclosingRequest; +import org.apache.http.message.BasicHttpRequest; +import org.apache.http.protocol.BasicHttpContext; +import org.apache.http.protocol.HttpCoreContext; +import org.junit.jupiter.api.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class AWSRequestSigningApacheInterceptorTests { + + @Test + public void testSimpleSigner() throws Exception { + HttpEntityEnclosingRequest request = + new BasicHttpEntityEnclosingRequest("GET", "/query?a=b"); + request.setEntity(new StringEntity("I'm an entity")); + request.addHeader("foo", "bar"); + request.addHeader("content-length", "0"); + + HttpCoreContext context = new HttpCoreContext(); + context.setTargetHost(HttpHost.create("localhost")); + + createInterceptor().process(request, context); + + assertEquals("bar", request.getFirstHeader("foo").getValue()); + assertEquals("wuzzle", request.getFirstHeader("Signature").getValue()); + assertNull(request.getFirstHeader("content-length")); + } + + @Test + public void testBadRequest() throws Exception { + + HttpRequest badRequest = new BasicHttpRequest("GET", "?#!@*%"); + assertThrows(IOException.class, + () -> createInterceptor().process(badRequest, new BasicHttpContext())); + } + + private static class AddHeaderSigner implements Signer { + private final String name; + private final String value; + + private AddHeaderSigner(String name, String value) { + this.name = name; + this.value = value; + } + + + @Override + public void sign(SignableRequest request, AWSCredentials credentials) { + request.addHeader(name, value); + request.addHeader("resourcePath", request.getResourcePath()); + } + } + + @Test + public void testEncodedUriSigner() throws Exception { + HttpEntityEnclosingRequest request = + new BasicHttpEntityEnclosingRequest("GET", "/foo-2017-02-25%2Cfoo-2017-02-26/_search?a=b"); + request.setEntity(new StringEntity("I'm an entity")); + request.addHeader("foo", "bar"); + request.addHeader("content-length", "0"); + + HttpCoreContext context = new HttpCoreContext(); + context.setTargetHost(HttpHost.create("localhost")); + + createInterceptor().process(request, context); + + assertEquals("bar", request.getFirstHeader("foo").getValue()); + assertEquals("wuzzle", request.getFirstHeader("Signature").getValue()); + assertNull(request.getFirstHeader("content-length")); + assertEquals("/foo-2017-02-25%2Cfoo-2017-02-26/_search", request.getFirstHeader("resourcePath").getValue()); + } + + private static AWSRequestSigningApacheInterceptor createInterceptor() { + AWSCredentialsProvider anonymousCredentialsProvider = + new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()); + return new AWSRequestSigningApacheInterceptor("servicename", + new AddHeaderSigner("Signature", "wuzzle"), + anonymousCredentialsProvider); + + } +} \ No newline at end of file diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/ByteTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/ByteTypeTests.java new file mode 100644 index 0000000000..75652c460d --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/ByteTypeTests.java @@ -0,0 +1,117 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.SQLDataException; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.*; + +public class ByteTypeTests { + @ParameterizedTest + @CsvSource(value = { + "127, 127", // Byte.MAX_VALUE + "-128, -128", // Byte.MIN_VALUE + "100, 100", + "-94, -94", + "45.40, 45", + "-100.95, -101", + "127.2, 127", + "-128.41, -128" + }) + void testByteFromValidString(String stringValue, byte expectedValue) { + byte byteValue = Assertions.assertDoesNotThrow( + () -> ByteType.INSTANCE.fromValue(stringValue, null)); + assertEquals(expectedValue, byteValue); + } + + @ParameterizedTest + @CsvSource(value = { + "128", + "21474836470", + "-129", + "127.6", + "-128.6" + }) + void testByteFromOutOfRangeString(String stringValue) { + assertThrows(SQLDataException.class, + () -> ByteType.INSTANCE.fromValue(stringValue, null)); + } + + @ParameterizedTest + @MethodSource("validRangeNumberProvider") + void testByteFromValidRangeNumber(Number numberValue, byte expectedValue) { + byte byteValue = Assertions.assertDoesNotThrow( + () -> ByteType.INSTANCE.fromValue(numberValue, null)); + assertEquals(expectedValue, byteValue); + } + + + @ParameterizedTest + @MethodSource("outOfRangeNumberProvider") + void testByteFromOutOfRangeNumber(Number numberValue) { + SQLDataException ex = assertThrows(SQLDataException.class, + () -> ByteType.INSTANCE.fromValue(numberValue, null)); + assertTrue(ex.getMessage().contains("out of range")); + } + + private static Stream outOfRangeNumberProvider() { + return Stream.of( + // ints + Arguments.of(128), + Arguments.of(-129), + + // longs + Arguments.of(128L), + Arguments.of(-129L), + + // doubles + Arguments.of(127.6D), + Arguments.of(-128.55D) + ); + } + + + private static Stream validRangeNumberProvider() { + return Stream.of( + // ints + Arguments.of(127, (byte) 127), + Arguments.of(-128, (byte) -128), + + // longs + Arguments.of(127L, (byte) 127), + Arguments.of(125L, (byte) 125), + Arguments.of(-128L, (byte) -128), + + // doubles + Arguments.of(127.20D, (byte) 127), + Arguments.of(-128.20D, (byte) -128), + Arguments.of(125D, (byte) 125), + + // floats + Arguments.of(127.20f, (byte) 127), + Arguments.of(-128.20f, (byte) -128), + Arguments.of(125f, (byte) 125) + ); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/DateTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/DateTypeTests.java new file mode 100644 index 0000000000..5809126e61 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/DateTypeTests.java @@ -0,0 +1,102 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import com.amazon.opendistroforelasticsearch.jdbc.test.UTCTimeZoneTestExtension; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.Date; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.HashMap; +import java.util.Map; +import java.util.TimeZone; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; + +@ExtendWith(UTCTimeZoneTestExtension.class) +public class DateTypeTests { + + @ParameterizedTest + @CsvSource(value = { + "2015-01-01, 1420070400000", + "1972-12-31, 94608000000", + "1950-01-01, -631152000000" + }) + void testDateFromStringDefaultTZ(String stringValue, long longValue) { + Date date = Assertions.assertDoesNotThrow( + () -> DateType.INSTANCE.fromValue(stringValue, null)); + assertEquals(longValue, date.getTime()); + } + + @ParameterizedTest + @CsvSource(value = { + "2015-01-01, PST, 1420099200000", + "1972-12-31, PST, 94636800000", + "1950-01-01, PST, -631123200000" + }) + void testDateFromStringCustomTZ(String stringValue, String timezone, long longValue) { + Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone(timezone)); + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", calendar); + + Date date = Assertions.assertDoesNotThrow( + () -> DateType.INSTANCE.fromValue(stringValue, conversionParams)); + assertEquals(longValue, date.getTime()); + } + + @ParameterizedTest + @MethodSource("numberProvider") + void testDateFromNumber(Number numericValue) { + Timestamp timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(numericValue, null)); + assertEquals(numericValue.longValue(), timestamp.getTime()); + + // timestamp does not matter when converting from numeric value + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", Calendar.getInstance(TimeZone.getTimeZone("PST"))); + timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(numericValue, conversionParams)); + assertEquals(numericValue.longValue(), timestamp.getTime()); + } + + private static Stream numberProvider() { + return Stream.of( + // longs + Arguments.of(1245137332333L), + Arguments.of(1420101286778L), + Arguments.of(1L), + Arguments.of(0L), + Arguments.of(-10023456L), + + // ints + Arguments.of(1245137332), + Arguments.of(1420101286), + Arguments.of(1), + Arguments.of(0), + Arguments.of(-10023456) + ); + } + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/FloatTypeTest.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/FloatTypeTest.java new file mode 100644 index 0000000000..ffcd3134ba --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/FloatTypeTest.java @@ -0,0 +1,84 @@ +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.SQLDataException; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class FloatTypeTest { + @ParameterizedTest + @CsvSource(value = { + "2147483647, 2147483647", // Integer.MAX_VALUE + "-2147483648, -2147483648", // Integer.MIN_VALUE + "9460800, 9460800", + "100, 100", + "-9460800, -9460800", + "100.25, 100.25", + "100.80, 100.80", + "-100.80, -100.80", + "0, 0", + "3.4028234E38, 3.4028234E38" + }) + void testIntegerFromValidString(String stringValue, float expectedValue) { + float floatValue = Assertions.assertDoesNotThrow( + () -> FloatType.INSTANCE.fromValue(stringValue, null)); + assertEquals(expectedValue, floatValue); + } + + @ParameterizedTest + @CsvSource(value = { + "3.4028235E38", + "3.4028235E39", + "-3.4028235E38", + "-3.4028235E39" + }) + void testIntegerFromOutOfRangeString(String stringValue) { + assertThrows( + SQLDataException.class, + () -> FloatType.INSTANCE.fromValue(stringValue, null)); + } + + @ParameterizedTest + @MethodSource("outOfRangeNumberProvider") + void testIntegerFromOutOfRangeNumber(Number numberValue) { + SQLDataException ex = assertThrows(SQLDataException.class, + () -> IntegerType.INSTANCE.fromValue(numberValue, null)); + assertTrue(ex.getMessage().contains("out of range")); + } + + @ParameterizedTest + @MethodSource("validRangeNumberProvider") + void testIntegerFromValidRangeNumber(Number numberValue, float expectedValue) { + float floatValue = Assertions.assertDoesNotThrow( + () -> FloatType.INSTANCE.fromValue(numberValue, null)); + assertEquals(expectedValue, floatValue); + } + + private static Stream outOfRangeNumberProvider() { + return Stream.of( + // floats + Arguments.of(Float.MAX_VALUE), + Arguments.of(-Float.MAX_VALUE) + ); + } + + private static Stream validRangeNumberProvider() { + return Stream.of( + // floats + Arguments.of(3.14f, 3.14f), + Arguments.of(-3.14f, -3.14f), + Arguments.of(0, 0), + Arguments.of(0x1.fffffdP+127f, 0x1.fffffdP+127f), + Arguments.of(-0x1.fffffdP+127f, -0x1.fffffdP+127f) + ); + } + +} \ No newline at end of file diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/IntegerTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/IntegerTypeTests.java new file mode 100644 index 0000000000..96724ba5c4 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/IntegerTypeTests.java @@ -0,0 +1,109 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + + +import java.sql.SQLDataException; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.*; + +public class IntegerTypeTests { + + @ParameterizedTest + @CsvSource(value = { + "2147483647, 2147483647", // Integer.MAX_VALUE + "-2147483648, -2147483648", // Integer.MIN_VALUE + "9460800, 9460800", + "100, 100", + "-9460800, -9460800", + "100.25, 100", + "100.80, 101", + "-100.80, -101" + }) + void testIntegerFromValidString(String stringValue, int expectedValue) { + int integerValue = Assertions.assertDoesNotThrow( + () -> IntegerType.INSTANCE.fromValue(stringValue, null)); + assertEquals(expectedValue, integerValue); + } + + @ParameterizedTest + @CsvSource(value = { + "2147483648", + "21474836470", + "-2147483649", + "2147483647.6" + }) + void testIntegerFromOutOfRangeString(String stringValue) { + assertThrows(SQLDataException.class, + () -> IntegerType.INSTANCE.fromValue(stringValue, null)); + } + + @ParameterizedTest + @MethodSource("outOfRangeNumberProvider") + void testIntegerFromOutOfRangeNumber(Number numberValue) { + SQLDataException ex = assertThrows(SQLDataException.class, + () -> IntegerType.INSTANCE.fromValue(numberValue, null)); + assertTrue(ex.getMessage().contains("out of range")); + } + + @ParameterizedTest + @MethodSource("validRangeNumberProvider") + void testIntegerFromValidRangeNumber(Number numberValue, int expectedValue) { + int intValue = Assertions.assertDoesNotThrow( + () -> IntegerType.INSTANCE.fromValue(numberValue, null)); + assertEquals(expectedValue, intValue); + } + + private static Stream outOfRangeNumberProvider() { + return Stream.of( + // longs + Arguments.of(2147483648L), + Arguments.of(-2147483649L), + + // doubles + Arguments.of(21474836400D), + Arguments.of(-21474836400D), + Arguments.of(2147483647.61D) + ); + } + + private static Stream validRangeNumberProvider() { + return Stream.of( + // longs + Arguments.of(2147483647L, 2147483647), + Arguments.of(-2147483648L, -2147483648), + + // doubles + Arguments.of(2147483647.0D, 2147483647), + Arguments.of(-2147483648.0D, -2147483648), + Arguments.of(2147483647.21D, 2147483647), + Arguments.of(2147483646.81D, 2147483647), + + // shorts + Arguments.of((short) 32767, 32767), + Arguments.of((short) -32768, -32768), + Arguments.of((short) 250, 250) + ); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/KeywordTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/KeywordTypeTests.java new file mode 100644 index 0000000000..3fc7a35798 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/KeywordTypeTests.java @@ -0,0 +1,30 @@ +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class KeywordTypeTests { + + @ParameterizedTest + @MethodSource("validIpStringProvider") + void testIpFromValidIpString(String stringValue, String expectedValue) { + String result = Assertions.assertDoesNotThrow( + () -> StringType.INSTANCE.fromValue(stringValue, null)); + assertEquals(expectedValue, result); + } + + private static Stream validIpStringProvider() { + return Stream.of( + Arguments.of("199.72.81.55", "199.72.81.55"), + Arguments.of("205.212.115.106", "205.212.115.106"), + Arguments.of("255.255.255.255", "255.255.255.255"), + Arguments.of("255.0.0.0", "255.0.0.0") + ); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/LongTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/LongTypeTests.java new file mode 100644 index 0000000000..8ec29beb44 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/LongTypeTests.java @@ -0,0 +1,95 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.SQLDataException; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.*; + +public class LongTypeTests { + + @ParameterizedTest + @CsvSource(value = { + "9223372036854775807, 9223372036854775807", // Long.MAX_VALUE + "-9223372036854775808, -9223372036854775808", // Long.MIN_VALUE + "9460800, 9460800", + "100, 100", + "-9460800, -9460800", + "100.25, 100", + "100.80, 101", + "-100000123456.80, -100000123457", + "-100000123456.30, -100000123456" + }) + void testLongFromValidString(String stringValue, long expectedValue) { + long longValue = Assertions.assertDoesNotThrow( + () -> LongType.INSTANCE.fromValue(stringValue, null)); + assertEquals(expectedValue, longValue); + } + + @ParameterizedTest + @CsvSource(value = { + "9223372036854775809", + "9223372036854775807.8", + "-9223372036854775809", + "-9223372036854775808.6" + }) + void testLongFromOutOfRangeString(String stringValue) { + assertThrows(SQLDataException.class, + () -> LongType.INSTANCE.fromValue(stringValue, null)); + } + + @ParameterizedTest + @MethodSource("validRangeNumberProvider") + void testLongFromValidRangeNumber(Number numberValue, long expectedValue) { + long longValue = Assertions.assertDoesNotThrow( + () -> LongType.INSTANCE.fromValue(numberValue, null)); + assertEquals(expectedValue, longValue); + } + + private static Stream validRangeNumberProvider() { + return Stream.of( + + // doubles + Arguments.of(2147483647.0D, 2147483647L), + Arguments.of(-2147483648.0D, -2147483648L), + Arguments.of(2147483647.21D, 2147483647L), + Arguments.of(2147483646.81D, 2147483647L), + + // ints + Arguments.of(2147483647, 2147483647L), + Arguments.of(-2147483648, -2147483648L), + Arguments.of(9999, 9999L), + + // shorts + Arguments.of((short) 32767, 32767), + Arguments.of((short) -32768, -32768), + Arguments.of((short) 250, 250), + + // floats + Arguments.of(32767.8f, 32768L), + Arguments.of(-32768.2f, -32768L), + Arguments.of(250.1f, 250L) + ); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/ShortTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/ShortTypeTests.java new file mode 100644 index 0000000000..f211ab87c8 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/ShortTypeTests.java @@ -0,0 +1,107 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.SQLDataException; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.*; + +public class ShortTypeTests { + @ParameterizedTest + @CsvSource(value = { + "32767, 32767", // Short.MAX_VALUE + "-32768, -32768", // Short.MIN_VALUE + "100, 100", + "-9460, -9460", + "45.40, 45", + "100.95, 101" + }) + void testShortFromValidString(String stringValue, short expectedValue) { + short shortValue = Assertions.assertDoesNotThrow( + () -> ShortType.INSTANCE.fromValue(stringValue, null)); + assertEquals(expectedValue, shortValue); + } + + @ParameterizedTest + @CsvSource(value = { + "32768", + "21474836470", + "-32769" + }) + void testShortFromOutOfRangeString(String stringValue) { + assertThrows(SQLDataException.class, + () -> ShortType.INSTANCE.fromValue(stringValue, null)); + } + + @ParameterizedTest + @MethodSource("outOfRangeNumberProvider") + void testShortFromOutOfRangeNumber(Number numberValue) { + SQLDataException ex = assertThrows(SQLDataException.class, + () -> ShortType.INSTANCE.fromValue(numberValue, null)); + assertTrue(ex.getMessage().contains("out of range")); + } + + + @ParameterizedTest + @MethodSource("validRangeNumberProvider") + void testShortFromValidRangeNumber(Number numberValue, short expectedValue) { + short shortValue = Assertions.assertDoesNotThrow( + () -> ShortType.INSTANCE.fromValue(numberValue, null)); + assertEquals(expectedValue, shortValue); + } + + private static Stream outOfRangeNumberProvider() { + return Stream.of( + // ints + Arguments.of(32768), + Arguments.of(-32769), + + // longs + Arguments.of(2147483648L), + Arguments.of(-2147483649L), + + // doubles + Arguments.of(21474836400D), + Arguments.of(-21474836400D) + ); + } + + private static Stream validRangeNumberProvider() { + return Stream.of( + // ints + Arguments.of(32767, (short) 32767), + Arguments.of(-32768, (short) -32768), + + // longs + Arguments.of(32767L, (short) 32767), + Arguments.of(250L, (short) 250), + Arguments.of(-32768L, (short) -32768), + + // doubles + Arguments.of(32767.20D, (short) 32767), + Arguments.of(-32768.20D, (short) -32768), + Arguments.of(250D, (short) 250) + ); + } +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/TimestampTypeTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/TimestampTypeTests.java new file mode 100644 index 0000000000..9fcd64d260 --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/TimestampTypeTests.java @@ -0,0 +1,125 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +import com.amazon.opendistroforelasticsearch.jdbc.test.UTCTimeZoneTestExtension; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.HashMap; +import java.util.Map; +import java.util.TimeZone; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +@ExtendWith(UTCTimeZoneTestExtension.class) +public class TimestampTypeTests { + + // Test inputs here assume default JVM TimeZone of UTC. + // The UTCTimeZoneTestExtension applied to this class ensures + // the tests see behavior consistent with a JVM running under + // a UTC TimeZone. + + @ParameterizedTest + @CsvSource(value = { + "2009-06-16T07:28:52.333, 1245137332333", + "2015-01-01 00:34:46, 1420072486000", + "2015-01-01 00:34:46.778, 1420072486778", + "2015-01-01 00:34:46.778+00:00, 1420072486778", + "2015-01-01 00:34:46.778Z, 1420072486778", + "2015-01-01T00:34:46.778+01:00, 1420068886778", + "2015-01-01 00:34:46.778-02, 1420079686778", + }) + void testTimestampFromStringDefaultTZ(String stringValue, long longValue) { + Timestamp timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(stringValue, null)); + assertEquals(longValue, timestamp.getTime()); + } + + @ParameterizedTest + @CsvSource(value = { + "2009-06-16T07:28:52.333, UTC, 1245137332333", + "2015-01-01 00:34:46, PST, 1420101286000", + "2015-01-01 00:34:46.778, PST, 1420101286778" + }) + void testTimestampFromStringCustomTZ(String stringValue, String timezone, long longValue) { + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", Calendar.getInstance(TimeZone.getTimeZone(timezone))); + Timestamp timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(stringValue, conversionParams)); + assertEquals(longValue, timestamp.getTime()); + } + + @ParameterizedTest + @MethodSource("numberProvider") + void testTimestampFromNumber(Number numericValue) { + Timestamp timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(numericValue, null)); + assertEquals(numericValue.longValue(), timestamp.getTime()); + + // timestamp does not matter when converting from numeric value + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", Calendar.getInstance(TimeZone.getTimeZone("PST"))); + timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(numericValue, conversionParams)); + assertEquals(numericValue.longValue(), timestamp.getTime()); + } + + private static Stream numberProvider() { + return Stream.of( + // longs + Arguments.of(1245137332333L), + Arguments.of(1420101286778L), + Arguments.of(1L), + Arguments.of(0L), + Arguments.of(-10023456L), + + // ints + Arguments.of(1245137332), + Arguments.of(1420101286), + Arguments.of(1), + Arguments.of(0), + Arguments.of(-10023456) + ); + } + + @Test + void testTimestampFromNull() { + Timestamp timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(null, null)); + assertNull(timestamp); + + // timestamp does not matter when converting from null value + Map conversionParams = new HashMap<>(); + conversionParams.put("calendar", Calendar.getInstance(TimeZone.getTimeZone("PST"))); + timestamp = Assertions.assertDoesNotThrow( + () -> TimestampType.INSTANCE.fromValue(null, conversionParams)); + assertNull(timestamp); + } + + +} diff --git a/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypesTests.java b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypesTests.java new file mode 100644 index 0000000000..055cf27f7f --- /dev/null +++ b/sql-jdbc/src/test/java/com/amazon/opendistroforelasticsearch/jdbc/types/TypesTests.java @@ -0,0 +1,24 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.jdbc.types; + +public class TypesTests { + + public void testIntegerTypeConverter() { + TypeConverters.IntegerTypeConverter tc = new TypeConverters.IntegerTypeConverter(); + } +} diff --git a/sql-jdbc/src/test/resources/mock/jks/keystore_with_client_key.jks b/sql-jdbc/src/test/resources/mock/jks/keystore_with_client_key.jks new file mode 100644 index 0000000000..b768b0c504 Binary files /dev/null and b/sql-jdbc/src/test/resources/mock/jks/keystore_with_client_key.jks differ diff --git a/sql-jdbc/src/test/resources/mock/jks/keystore_with_non_localhost_server_key.jks b/sql-jdbc/src/test/resources/mock/jks/keystore_with_non_localhost_server_key.jks new file mode 100644 index 0000000000..6cd7863663 Binary files /dev/null and b/sql-jdbc/src/test/resources/mock/jks/keystore_with_non_localhost_server_key.jks differ diff --git a/sql-jdbc/src/test/resources/mock/jks/keystore_with_server_key.jks b/sql-jdbc/src/test/resources/mock/jks/keystore_with_server_key.jks new file mode 100644 index 0000000000..c59db29b86 Binary files /dev/null and b/sql-jdbc/src/test/resources/mock/jks/keystore_with_server_key.jks differ diff --git a/sql-jdbc/src/test/resources/mock/jks/truststore_with_client_cert.jks b/sql-jdbc/src/test/resources/mock/jks/truststore_with_client_cert.jks new file mode 100644 index 0000000000..7a95cafa0e Binary files /dev/null and b/sql-jdbc/src/test/resources/mock/jks/truststore_with_client_cert.jks differ diff --git a/sql-jdbc/src/test/resources/mock/jks/truststore_with_server_cert.jks b/sql-jdbc/src/test/resources/mock/jks/truststore_with_server_cert.jks new file mode 100644 index 0000000000..a1648dd722 Binary files /dev/null and b/sql-jdbc/src/test/resources/mock/jks/truststore_with_server_cert.jks differ diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_00.json b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_00.json new file mode 100644 index 0000000000..090cac8eb3 --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_00.json @@ -0,0 +1,30 @@ +{ + "schema": [ + { + "name": "firstname", + "type": "text" + }, + { + "name": "age", + "type": "long" + } + ], + "cursor": "abcde_1", + "total": 20, + "datarows": [ + [ + "Amber", + 32 + ], + [ + "Hattie", + 36 + ], + [ + "Nanette", + 28 + ] + ], + "size": 3, + "status": 200 +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_01.json b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_01.json new file mode 100644 index 0000000000..3df9caa1ea --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_01.json @@ -0,0 +1,17 @@ +{ + "cursor": "abcde_2", + "datarows": [ + [ + "Dale", + 33 + ], + [ + "Elinor", + 36 + ], + [ + "Virginia", + 39 + ] + ] +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_02.json b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_02.json new file mode 100644 index 0000000000..d76d17ade3 --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_02.json @@ -0,0 +1,17 @@ +{ + "cursor": "abcde_3", + "datarows": [ + [ + "Dillard", + 34 + ], + [ + "Mcgee", + 39 + ], + [ + "Aurelia", + 37 + ] + ] +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_03.json b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_03.json new file mode 100644 index 0000000000..99b4448df7 --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/cursor/queryresponse_accounts_03.json @@ -0,0 +1,16 @@ +{ + "datarows": [ + [ + "Fulton", + 23 + ], + [ + "Burton", + 31 + ], + [ + "Josie", + 32 + ] + ] +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/nyctaxis_queryrequest.json b/sql-jdbc/src/test/resources/mock/protocol/json/nyctaxis_queryrequest.json new file mode 100644 index 0000000000..6a3210782e --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/nyctaxis_queryrequest.json @@ -0,0 +1,3 @@ +{ + "query": "select pickup_datetime, trip_type, passenger_count, fare_amount, extra, vendor_id from nyc_taxis LIMIT 5" +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_internal_server_error.json b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_internal_server_error.json new file mode 100644 index 0000000000..4fa8d9154b --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_internal_server_error.json @@ -0,0 +1,8 @@ +{ + "error": { + "reason": "error reason", + "details": "java.lang.NullPointerException\n\tat org.elasticsearch.plugin.nlpcn.Schema.getTypeFromMetaData(Schema.java:156)\n\tat org.elasticsearch.plugin.nlpcn.Schema.populateColumns(Schema.java:146)\n\tat java.base/java.lang.Thread.run(Thread.java:844)\n", + "type": "java.lang.NullPointerException" + }, + "status": 500 +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_nullablefields.json b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_nullablefields.json new file mode 100644 index 0000000000..780a075594 --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_nullablefields.json @@ -0,0 +1,85 @@ +{ + "schema": [ + { + "name": "testBoolean", + "type": "boolean" + }, + { + "name": "docId", + "type": "text" + }, + { + "name": "testByte", + "type": "byte" + }, + { + "name": "testFloat", + "type": "float" + }, + { + "name": "testLong", + "type": "long" + }, + { + "name": "testShort", + "type": "short" + }, + { + "name": "testHalfFloat", + "type": "half_float" + }, + { + "name": "testTimeStamp", + "type": "date" + }, + { + "name": "testScaledFloat", + "type": "scaled_float" + }, + { + "name": "testKeyword", + "type": "keyword" + }, + { + "name": "testText", + "type": "text" + }, + { + "name": "testDouble", + "type": "double" + } + ], + "total": 2, + "datarows": [ + [ + null, + "2", + null, + 22.145135459218345, + null, + null, + 24.324234543532153, + "2015-01-01 12:10:30", + 24.324234543532153, + "Test String", + "document3", + null + ], + [ + true, + "1", + 126, + null, + 32000320003200030, + 29000, + null, + null, + null, + null, + null, + 22.312423148903218 + ] + ], + "size": 2, + "status": 200 +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_nyctaxis.json b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_nyctaxis.json new file mode 100644 index 0000000000..7f2db8ac70 --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_nyctaxis.json @@ -0,0 +1,73 @@ +{ + "schema": [ + { + "name": "pickup_datetime", + "type": "date" + }, + { + "name": "trip_type", + "type": "keyword" + }, + { + "name": "passenger_count", + "type": "integer" + }, + { + "name": "fare_amount", + "type": "scaled_float" + }, + { + "name": "extra", + "type": "scaled_float" + }, + { + "name": "vendor_id", + "type": "keyword" + } + ], + "total": 1000, + "datarows": [ + [ + "2015-01-01 00:34:42", + "1", + 1, + 5, + 0.5, + "2" + ], + [ + "2015-01-01 00:34:46", + "1", + 1, + 12, + 0.5, + "2" + ], + [ + "2015-01-01 00:34:44", + "1", + 1, + 5, + 0.5, + "1" + ], + [ + "2015-01-01 00:34:48", + "1", + 1, + 5, + 0.5, + "2" + ], + [ + "2015-01-01 00:34:53", + "1", + 1, + 24.5, + 0.5, + "2" + ] + ], + "size": 5, + "status": 200 +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_sonested.json b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_sonested.json new file mode 100644 index 0000000000..8f252243c4 --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_sonested.json @@ -0,0 +1,55 @@ +{ + "schema": [ + { + "name": "user", + "type": "keyword" + }, + { + "name": "title", + "type": "text" + }, + { + "name": "qid", + "type": "keyword" + }, + { + "name": "creationDate", + "type": "date" + } + ], + "total": 20000, + "datarows": [ + [ + "Jash", + "Display Progress Bar at the Time of Processing", + "1000000", + "2009-06-16T07:28:42.770" + ], + [ + "Michael Ecklund (804104)", + "PHP Sort array by field?", + "10000005", + "2012-04-03T19:25:46.213" + ], + [ + "farley (1311218)", + "Arrays in PHP seems to drop elements", + "10000007", + "2012-04-03T19:26:05.400" + ], + [ + "John Strickler (292614)", + "RESTful servlet URLs - servlet-mapping in web.xml", + "10000008", + "2012-04-03T19:26:09.137" + ], + [ + "rahulm (123536)", + "Descriptor conversion problem", + "1000001", + "2009-06-16T07:28:52.333" + ] + ], + "size": 5, + "status": 200 +} \ No newline at end of file diff --git a/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_with_alias_nyctaxis.json b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_with_alias_nyctaxis.json new file mode 100644 index 0000000000..153c9cb66b --- /dev/null +++ b/sql-jdbc/src/test/resources/mock/protocol/json/queryresponse_with_alias_nyctaxis.json @@ -0,0 +1,75 @@ +{ + "schema": [ + { + "name": "pickup_datetime", + "alias": "pdt", + "type": "date" + }, + { + "name": "trip_type", + "type": "keyword" + }, + { + "name": "passenger_count", + "alias": "pc", + "type": "integer" + }, + { + "name": "fare_amount", + "type": "scaled_float" + }, + { + "name": "extra", + "type": "scaled_float" + }, + { + "name": "vendor_id", + "type": "keyword" + } + ], + "total": 1000, + "datarows": [ + [ + "2015-01-01 00:34:42", + "1", + 1, + 5, + 0.5, + "2" + ], + [ + "2015-01-01 00:34:46", + "1", + 1, + 12, + 0.5, + "2" + ], + [ + "2015-01-01 00:34:44", + "1", + 1, + 5, + 0.5, + "1" + ], + [ + "2015-01-01 00:34:48", + "1", + 1, + 5, + 0.5, + "2" + ], + [ + "2015-01-01 00:34:53", + "1", + 1, + 24.5, + 0.5, + "2" + ] + ], + "size": 5, + "status": 200 +} \ No newline at end of file diff --git a/sql-odbc/.clang-format b/sql-odbc/.clang-format new file mode 100644 index 0000000000..b8fd6433b6 --- /dev/null +++ b/sql-odbc/.clang-format @@ -0,0 +1,16 @@ +# Common settings +BasedOnStyle: Google +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: 'false' +AllowShortLoopsOnASingleLine: 'false' +BreakBeforeBinaryOperators: NonAssignment +IndentWidth: '4' +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: 'false' +SpacesInAngles: 'true' +SpacesInParentheses: 'false' +SpacesInSquareBrackets: 'false' +TabWidth: '4' +UseTab: 'false' + +# See https://zed0.co.uk/clang-format-configurator/ for generating this file. \ No newline at end of file diff --git a/sql-odbc/.gitignore b/sql-odbc/.gitignore new file mode 100644 index 0000000000..3dd5ca542e --- /dev/null +++ b/sql-odbc/.gitignore @@ -0,0 +1,58 @@ +*.aps +*.obj +*.iobj +*.ipdb +*.pdb +*.exp +*.lib +*.res +*.idb +*.res +*.db +*.opendb +*.suo +*.ipch +*.cache +*.log +bin32/* +bin64/* +lib32/* +lib64/* +*.tlog +include/* +packages/* +# Visual Studio +.vs/* +# Visual Studio Code +.vscode/* +src/psqlodbc/psqlodbcBuilder/x64_* +src/*/x64 +src/*/Win32 +src/*/*/x64 +src/*/*/Win32 +src/IntegrationTests/ITODBCResults/test_odbc_results1.cpp +src/IntegrationTests/ITODBCResults/memory_leak_report.txt +src/IntegrationTests/ITODBCHelper/ITODBCHelper.vcxproj.user +src/IntegrationTests/ITODBCExecution/memory_leak_report.txt +src/IntegrationTests/ITODBCResults/ITODBCResults.vcxproj.user +LICENSE.txt +/AWSSDK/ +/sdk-build/ +/CMakeFiles/ +/googletest/ +/installer/ +/IntegrationTests/ +/odfeenlist/ +/odfesqlodbc/ +/PerformanceTests/ +/UnitTests/ +*.filters +*.vcxproj +*.sln +cmake_install.cmake +CMakeCache.txt +CPackConfig.cmake +CPackSourceConfig.cmake +CTestTestfile.cmake +/sdk-build64/ +/cmake-build64/ diff --git a/sql-odbc/BUILD_INSTRUCTIONS.md b/sql-odbc/BUILD_INSTRUCTIONS.md new file mode 100644 index 0000000000..cefda0e6a5 --- /dev/null +++ b/sql-odbc/BUILD_INSTRUCTIONS.md @@ -0,0 +1,360 @@ +# Elasticsearch ODBC Driver Build Instructions + +The ElasticsearchODBC driver can be build on Windows and Mac. + +## Setting up Dependencies + +The driver [source code](https://github.com/opendistro-for-elasticsearch/sql-odbc) must be downloaded onto the system to build it. + +### Windows Dependencies + +Windows requires the following dependencies + +* [cmake](https://cmake.org/install/) +* [Visual Studio 2019](https://visualstudio.microsoft.com/vs/) (Other versions may work, but only 2019 has been tested) + +### Mac Dependencies + +Homebrew must be installed to manage packages, to install homebrew see the [homebrew homepage](https://brew.sh/). +Using homebrew, install the following packages using the command provided: +>brew install [package] +> +>* curl +>* cmake +>* libiodbc + +## Building the Driver + +Before building the driver, the build files for the system must be generated, this is done using cmake. + +### Providing AWS Credentials + +As project uses AWS services for AWS SIGV4 authentication, you must [provide AWS credentials](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html). + +### Setting up AWS SDK + +#### Windows +* Open Developer PowerShell for VS. +* Run aws_sdk_cpp_setup.ps1 script from the project's root directory. + +#### Mac +* Run aws_sdk_cpp_setup.sh script from the project's root directory. + +### Generating the Build Files + +Open the project's root directory in a command line interface of your choice. Execute +>**cmake ./src -D CMAKE_INSTALL_PREFIX=\/AWSSDK/** + +**Note:** It is desirable to not run cmake directly in the 'src' directory, because it will generate build files inline with code. + +### General CMake Options + +**BUILD_WITH_TESTS** + +(Defaults to ON) If disabled, all tests and and test dependencies will be excluded from build which will optimize the installer package size. This option can set with the command line (using `-D`). + +### Building with Windows + +Building the driver on Windows is done using **Visual Studio**. +>Open **global_make_list.sln** with **Visual Studio 2019**. +> +>* Set the **Solution Configuration** to **Release** +>* Set the **Solution Platform** to **x64** +> +>**Build the solution** by right clicking it in the **Solution Explorer** and selecting **Build Solution** + +### Building with Mac + +Building the driver on Mac is done using make. Using the CLI, enter: +>**make** + +## Output Files + +Building the driver will yield the driver, tests, and a library files (Windows only). + +### Output Location on Windows + +Compiling on Windows will output the tests and the driver to **bin64/Release** and the driver library file to **lib64/Release** directory. There are also some additional test infrastructure files which output to the **bin64/Release** directory and the **lib64/Release** directory. + +The driver can be consumed by linking to it using the library file (elasticodbc.lib in lib64/Release). BI tools can consume the driver by specifying the location of the dll (elasticodbc.dll in bin64/Release) in the [DSN](#setting-up-a-dsn). + +### Output Location on Mac + +Compiling on Mac will output the tests to **bin64** and the driver to **lib64**. There are also some additional test infrastructure files which output to the **lib64** directory. + +## Packaging installer + +Build the driver with `BUILD_WITH_TESTS` option disabled. + +#### Windows + +Open the project's build directory in Developer PowerShell for VS. +> msbuild .\PACKAGE.vcxproj -p:Configuration=Release + +Installer named as `Open Distro for Elasticsearch SQL ODBC Driver--Windows.msi` will be generated in the build directory. + +#### Mac + +Run below command from the project's build directory. +>cpack . + +Installer named as `Open Distro for Elasticsearch SQL ODBC Driver--Darwin.pkg` will be generated in the build directory. + +## Running Tests + +Tests can be **executed directly**, or by using the **Test Runner**. + +**NOTES:** + +* A test DSN named `test_dsn` must be set up in order for certain tests in ITODBCConnection to pass. To configure the DSN, see the instructions, below. +* Datasets must be loaded into Elasticsearch using [kibana](https://www.elastic.co/guide/en/kibana/current/connect-to-elasticsearch.html). See the section on loading datasets below. + +### Windows Test DSN Setup + +1. Open `src/IntegrationTests/ITODBCConnection/test_dsn.reg`. + * This contains the registry entries needed for setting up `test_dsn`. +2. Do one of the following: + * As an Administrator, run a command prompt or Powershell and run `reg import <.reg-file>` to add the entries to your registry. + * Manually add the entries to your registry using Registry Editor. + +### Mac Test DSN Setup + +1. Open `src/IntegrationTests/ITODBCConnection/test_odbc.ini` and `src/IntegrationTests/ITODBCConnection/test_odbcinst.ini` + * These contain the minimal configuration necessary for setting up `test_dsn`. +2. Do one of the following: + * Add the following lines to your .bash_profile to point the driver to these files. + * `export ODBCINI=/src/IntegrationTests/ITODBCConnection/test_odbc.ini` + * `export ODBCINSTINI=/src/IntegrationTests/ITODBCConnection/test_odbcinst.ini` + * Manually add the entries to your existing `odbc.ini` and `odbcinst.ini` entries. (normally found at `~/.odbc.ini` and `~/.odbcinst.ini`) + +### Loading Test Datasets + +Loading a dataset requires an [elasticsearch](https://opendistro.github.io/for-elasticsearch-docs/docs/install/) service running with [kibana](https://opendistro.github.io/for-elasticsearch-docs/docs/kibana/). If either of these are missing, please refer to the documentation on how to set them up. + +Note, if you wish to work with SSL/TLS, you need to configure Elasticsearch and Kibana to support it. See Working With SSL/TLS below. + +First load the sample datasets provided by kibana. + +1. Select home (top left corner) +2. Select 'Load a data set and a Kibana dashboard' +3. Select 'Add data' under 'Sample flight data' +4. Select 'Add data' under 'Sample eCommerce orders' +5. Select 'Add data' under 'Sample web logs' + +Then load custom data sets using the kibana console. +Select the wrench on the left control panel. Enter the following commands into the console and hit the play button after each one. + +```json +PUT /kibana_sample_data_types + { + "mappings": { + "properties": { + "type_boolean" : { "type": "boolean"}, + "type_byte" : { "type": "byte"}, + "type_short" : { "type": "short"}, + "type_integer" : { "type": "integer"}, + "type_long" : { "type": "long"}, + "type_half_float" : { "type": "half_float"}, + "type_float" : { "type": "float"}, + "type_double" : { "type": "double"}, + "type_scaled_float" : { "type": "scaled_float", "scaling_factor": 100 }, + "type_keyword" : { "type": "keyword"}, + "type_text" : { "type": "text"}, + "type_date" : { "type": "date"}, + "type_object" : { "type": "object"}, + "type_nested" : { "type": "nested"} + } + } + } +``` + +```json +POST /kibana_sample_data_types/_doc +{ + "type_boolean": true, + "type_byte" : -120, + "type_short" : -2000, + "type_integer" :-350000000, + "type_long" : -8010000000, + "type_half_float" : -2.115, + "type_float" : -3.1512, + "type_double" : -5335.2215, + "type_scaled_float" : -100.1, + "type_keyword" : "goodbye", + "type_text" : "planet", + "type_date" : "2016-02-21T12:23:52.803Z", + "type_object" : { "foo" : "bar" }, + "type_nested" : {"foo":"bar"} +} +``` + +```json +POST /kibana_sample_data_types/_doc +{ + "type_boolean": false, + "type_byte" : 100, + "type_short" : 1000, + "type_integer" : 250000000, + "type_long" : 8000000000, + "type_half_float" : 1.115, + "type_float" : 2.1512, + "type_double" : 25235.2215, + "type_scaled_float" : 100, + "type_keyword" : "hello", + "type_text" : "world", + "type_date" : "2018-07-22T12:23:52.803Z", + "type_object" : { "foo" : "bar" }, + "type_nested" : {"foo":"bar"} +} +``` + +### Working With SSL/TLS + +To disable SSL/TLS in the tests, the main CMakeLists.txt file must be edited. This can be found in the project 'src' directory. In the 'General compiler definitions' in the CMakeLists.txt file, USE_SSL is set. Remove this from the add_compile_definitions function to stop SSL/TLS from being used in the tests. + +To enable SSL/TLS on Elasticsearch, you must edit the Elasticsearch.yml file, found in the config directory of Elasticsearch. An example Elasticsearch yml file can be found in the dev folder of this project. The certificates specified MUST be in the config directory of the Elasticsearch instance. For more information, please refer to the [Elasticsearch security settings documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html). + +If you plan to use Kibana, as suggested for this project, you must also edit the Kibana settings. Notice, when specifying a certificate for Kibana, you do not need to place it in the Kibana config directory, but instead must provide the absolute path to it. An example Kibana.yml file can be found in the dev folder of this project. For more information, please refer to the [Kibana settings documentation](https://www.elastic.co/guide/en/kibana/current/settings.html). + +### Running Tests directly on Windows + +Tests can be executed directly using **Visual Studio** by setting the desired test as a **Start up Project** + +>* **Right click** the desired test project in the **Solution Explorer** +>* Select **Set as Startup Project** +>* Run the test by selecting **Local Windows Debugger** in the toolbar at the top of the application + +For more information, see the [Visual Studio Console Application documentation](https://docs.microsoft.com/en-us/cpp/build/vscpp-step-2-build?view=vs-2019). + +### Running Tests directly on Mac + +Tests can be executed using a command line interface. From the project root directory, enter: +> **bin64/** + +To execute a test. + +### Running Tests using the Test Runner + +The **Test Runner** requires [python](https://wiki.python.org/moin/BeginnersGuide/Download) to be installed on the system. Running the **Test Runner** will execute all the tests and compile a report with the results. The report indicates the execution status of all tests along with the execution time. To find error details of any failed test, hover over the test. + +#### Running Tests using the Test Runner on Windows + +Open the project's root directory in a command line interface of your choice. Execute +>**.\run_test_runner.bat** + +The **Test Runner** has been tried and tested with [Python3.8](https://www.python.org/downloads/release/python-380/) on **Windows systems**. Other versions of Python may work, but are untested. + +#### Running Tests using the Test Runner on Mac + +Open the project's root directory in a command line interface of your choice. Execute +>**./run_test_runner.sh** + +The **Test Runner** has been tried and tested with [Python3.7.6](https://www.python.org/downloads/release/python-376/) on **Mac systems**. Other versions of Python may work, but are untested. + +### Running Tests with Coverage (Mac only) + +(using a CMake script provided by George Cave (StableCoder) under the Apache 2.0 license, found [here](https://github.com/StableCoder/cmake-scripts/blob/master/code-coverage.cmake)) + +> **NOTE**: Before building with coverage, make sure the following directory is in your PATH environment variable: +> `/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin` + +To build the tests with code coverage enabled, set the `CODE_COVERAGE` variable to `ON` when preparing your CMake build. +```bash +cmake ... -DBUILD_WITH_TESTS=ON -DCODE_COVERAGE=ON +``` + +To get coverage for the driver library, you must use the `ccov-all` target, which runs all test suites and components with coverage. +```bash +make ccov-all +``` + +This will generate an HTML report at `/ccov/all-merged/index.html`, which can be opened in a web browser to view a summary of the overall code coverage, as well as line-by-line coverage for individual files. + +For more information interpreting this report, see https://clang.llvm.org/docs/SourceBasedCodeCoverage.html#interpreting-reports. + +## Setting up a DSN + +A **D**ata **S**ouce **N**ame is used to store driver information in the system. By storing the information in the system, the information does not need to be specified each time the driver connects. + +### Windows + +> To setup DSN, add following keys in the Registry +> + >* **HKEY_LOCAL_MACHINE/SOFTWARE/ ODBC/ODBC.INI** : Contains a key for each Data Source Name (DSN) + >* **HKEY_LOCAL_MACHINE/SOFTWARE/ ODBC/ODBC.INI/ODBC Data Sources** : Lists the data sources + >* **HKEY_LOCAL_MACHINE/SOFTWARE/ODBC/ODBCINST.INI** : Define each driver's name and setup location + >* **HKEY_LOCAL_MACHINE/SOFTWARE/ODBC/ODBCINST.INI/ODBC Drivers** : Lists the installed drivers. +> +>These keys can be added manually in the Registry Editor (Start > Run > Regedit) one by one. Alternatively, keys can be added together as follows: +> +>1. Modify the appropriate values for these keys in `src/IntegrationTests/ITODBCConnection/test_dsn.reg` +>2. Double click on the `test_dsn.reg` file. +>3. Click `Yes` on the confirmation window to add keys in the registry. + +### Mac + +**iODBC Administrator** can be used to setup a **DSN** on Mac. + +> 1. Open **iODBC Administrator** + > * **iODBC Administrator** is installed with **iODBC Driver Manager** and can be found by searching the **Spotlight** (or found in **/Applications**) +> 2. Go to the **ODBC Drivers** tab +> 3. Click **Add a Driver** + > * **Description of the Driver**: The driver name used for the **ODBC connections** (ex. *ElasticsearchODBC*) + > * **Driver File Name**: The path to the **driver file** (*< Project Directory >/lib64/libelasticodbc.dylib*) + > * **Setup File Name**: The path to the **driver file** (*< Project Directory >/lib64/libelasticodbc.dylib*) + > * Set as a **User** driver + > * Select **OK** to save the options +> 4. Go to the **User DSN** tab +> 5. Select **Add** + > * Choose the driver that was added in **Step 3** + > * **Data Source Name (DSN)**: The name of the DSN used to store connection options (ex. *ElasticsearchODBC*) + > * **Comment**: Not required + > * Add the following **key-value pairs** using the **'+'** button + > * **Host** | **localhost** // Or a different server endpoint + > * **Port** | **9200** // Or whatever your endpoints port is + > * **Username** | **admin** // Or whatever your endpoints username is + > * **Password** | **admin** // Or whatever your endpoints password is + > * Select **OK** to **save options** +> 6. Select **OK** to exit the **Administrator** + +If “General installer error” is encountered when saving the ODBC Driver, see Troubleshooting, below. + +## Working with Tableau + +[Tableau Desktop](https://www.tableau.com/products/desktop) must be installed on the target machine. + + 1. Open **Tableau Desktop** + 2. Select **More…** + 3. Select **Other Databases (ODBC)** + 4. In the **DSN drop-down**, select the *Elasticsearch DSN* you set up in the previous set of steps + 5. The options you added will *automatically* be filled into the **Connection Attributes** + 6. Select **Sign In** + 7. After a few seconds, Tableau will connect to your Elasticsearch server + +## Troubleshooting + +### iODBC Administrator: “General installer error” when saving new ODBC Driver + +Try the following: + +1. Create the folder ~/Library/ODBC, then try again +2. Create two files in ~/Library/ODBC, then open iODBC Administrator and verify the contents of **odbcinst.ini** and **odbc.ini** align with the format below. + * **odbcinst.ini** (will be found in **ODBC Drivers**) + >[ODBC Drivers] + \ = Installed + > + >[\] + Driver = \/lib64/libelasticodbc.dylib + Setup = \/lib64/libelasticodbc.dylib + + * **odbc.ini** (will be found in **User DSNs**) + >[ODBC Data Sources] + \ = \ + > + >[\] + Driver = \/lib64/libelasticodbc.dylib + Description = + Host = localhost + Port = 9200 + Username = admin + Password = admin diff --git a/sql-odbc/CODE_OF_CONDUCT.md b/sql-odbc/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..ec98f2b76e --- /dev/null +++ b/sql-odbc/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +## Code of Conduct + +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/sql-odbc/CONTRIBUTING.md b/sql-odbc/CONTRIBUTING.md new file mode 100644 index 0000000000..f25870380c --- /dev/null +++ b/sql-odbc/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/OpenDistro/elasticsearch-security-tlstool/issues), or [recently closed](https://github.com/OpenDistro/elasticsearch-security-tlstool/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/OpenDistro/elasticsearch-security-tlstool/labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](https://github.com/OpenDistro/elasticsearch-security-tlstool/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. diff --git a/sql-odbc/LICENSE b/sql-odbc/LICENSE new file mode 100644 index 0000000000..67db858821 --- /dev/null +++ b/sql-odbc/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/sql-odbc/NOTICE b/sql-odbc/NOTICE new file mode 100644 index 0000000000..3dec9cbc81 --- /dev/null +++ b/sql-odbc/NOTICE @@ -0,0 +1,2 @@ +Open Distro for Elasticsearch ODBC +Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. \ No newline at end of file diff --git a/sql-odbc/README.md b/sql-odbc/README.md new file mode 100644 index 0000000000..86fc8353b7 --- /dev/null +++ b/sql-odbc/README.md @@ -0,0 +1,95 @@ +# Open Distro for Elasticsearch ODBC Driver + +ElasticsearchODBC is a read-only ODBC driver for Windows and Mac for connecting to Open Distro for Elasticsearch SQL support. + +## Specifications + +The driver is compatible with ODBC 3.51. + +## Supported Versions + + + | Operating System | Version | Supported Bitness | + | ------------- |-------------| ----------------- | + | Windows | Windows 10 | 32-bit, 64-bit | + | MacOS | Catalina 10.15.4, Mojave 10.14.6 | 64-bit | + +## Installing the Driver + +You can use the installers generated as part of the most recent release. + +### Windows + +1. Run the `.msi` installer to install the Open Distro for Elasticsearch SQL ODBC Driver. + +To use the driver with Tableau: +1. Copy the `.tdc` file from `/resources` to `/Documents/My Tableau Repository/Datasources`. + +This will customize the connection from Tableau to Open Distro for Elasticsearch, ensuring that the correct forms of queries are used. + +### Mac + +iODBC Driver Manager should be installed before installing the Open Distro for Elasticsearch SQL ODBC Driver on Mac. + +1. Run the `.pkg` installer to install the Open Distro for Elasticsearch SQL ODBC Driver. +2. Configure a Driver and DSN entry for the Open Distro for Elasticsearch SQL ODBC Driver, following the instructions [here](./docs/user/mac_configure_dsn.md). + +To use the driver with Tableau: +1. Copy the `.tdc` file from `/resources` to `/Documents/My Tableau Repository/Datasources`. + +This will customize the connection from Tableau to Open Distro for Elasticsearch, ensuring that the correct forms of queries are used. + +## Using the Driver + +The driver comes in the form of a library file: +* Windows: `odfesqlodbc.dll` +* Mac: `libodfesqlodbc.dylib` + +If using with ODBC compatible BI tools, refer to the tool documentation on configuring a new ODBC driver. In most cases, you will need to make the tool aware of the location of the driver library file and then use it to setup Open Distro for Elasticsearch database connections. + +### Connection Strings and Configuring the Driver + +A list of options available for configuring driver behaviour is available [here](./docs/user/configuration_options.md). + +To setup a connection, the driver uses an ODBC connection string. Connection strings are semicolon-delimited strings specifying the set of options to use for a connection. Typically, a connection string will either: + +1. specify a Data Source Name containing a pre-configured set of options (`DSN=xxx;User=xxx;Password=xxx;`) +2. or configure options explicitly using the string (`Host=xxx;Port=xxx;LogLevel=ES_DEBUG;...`) + +## Building from source + +### Building + +Please refer to the [build instructions](./BUILD_INSTRUCTIONS.md) for detailed build instructions on your platform. +If your PC is already setup to build the library, you can simply invoke cmake using + +> cmake ./src + +From the projects root directory, then build the project using Visual Studio (Windows) or make (Mac). + +* Visual Studio: Open **./global_make_list.sln** +* Make: Run `make` from the build root. + +### Testing + +**NOTE**: Some tests in ITODBCConnection will fail if a test DSN (Data Source Name) is not configured on your system. Refer to "Running Tests" in the [build instructions](./BUILD_INSTRUCTIONS.md) for more information on configuring this. + +## Documentation + +Please refer to the [documentation](https://opendistro.github.io/for-elasticsearch-docs/) for detailed information on installing and configuring Open Distro for Elasticsearch. + +## Code of Conduct + +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). + +## Security issue notifications + +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue. + +## Licensing + +See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +## Copyright + +Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/sql-odbc/THIRD-PARTY b/sql-odbc/THIRD-PARTY new file mode 100644 index 0000000000..bb1ea3bb64 --- /dev/null +++ b/sql-odbc/THIRD-PARTY @@ -0,0 +1,1775 @@ +** aws-cpp-sdk-core -- https://github.com/aws/aws-sdk-cpp +** rapidjson -- https://github.com/Tencent/rapidjson +** rabbit -- https://github.com/mashiro/rabbit +** Visual Leak Detector -- https://github.com/KindDragon/vld +** PostgreSQL v12.0 -- https://www.postgresql.org/ftp/source/v12.0/ +** Googletest -- https://github.com/google/googletest +** StableCoder - code-coverage.cmake -- https://github.com/StableCoder/cmake-scripts/blob/master/code-coverage.cmake + +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND +DISTRIBUTION + +1. Definitions. + + + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this +document. + + + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + + + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + + + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + + + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and +configuration files. + + + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object +code, generated documentation, and conversions to other media +types. + + + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that is +included in or attached to the work (an example is provided in the Appendix +below). + + + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works +thereof. + + + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the purposes +of this definition, "submitted" means any form of electronic, verbal, or +written communication sent to the Licensor or its representatives, including +but not limited to communication on electronic mailing lists, source code +control systems, and issue tracking systems that are managed by, or on behalf +of, the Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise designated in +writing by the copyright owner as "Not a Contribution." + + + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this +section) patent license to make, have made, use, offer to sell, sell, import, +and otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily infringed by +their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent +litigation against any entity (including a cross-claim or counterclaim in a +lawsuit) alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent +licenses granted to You under this License for that Work shall terminate as of +the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +(a) You must give any other recipients of the Work or Derivative Works a copy +of this License; and + +(b) You must cause any modified files to carry prominent notices stating that +You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from the +Source form of the Work, excluding those notices that do not pertain to any +part of the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its distribution, then +any Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise +complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, work +stoppage, computer failure or malfunction, or any and all other commercial +damages or losses), even if such Contributor has been advised of the +possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree +to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. END OF TERMS AND +CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification +within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software + +distributed under the License is distributed on an "AS IS" BASIS, + +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and + +limitations under the License. + + +* For aws-cpp-sdk-core see also this required NOTICE: +Copyright 2013-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +=============================================================================== + +* For rapidjson see also this required NOTICE: + +Tencent is pleased to support the open source community by making RapidJSON available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please note that RapidJSON source code is licensed under the MIT License, except for the third-party components listed below which are subject to different license terms. Your integration of RapidJSON into your own projects may require compliance with the MIT License, as well as the other licenses applicable to the third-party components included within RapidJSON. To avoid the problematic JSON license in your own projects, it's sufficient to exclude the bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + +Open Source Software Licensed Under the BSD License: +-------------------------------------------------------------------- + +The msinttypes r29 +Copyright (c) 2006-2013 Alexander Chemeris +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Open Source Software Licensed Under the JSON License: +-------------------------------------------------------------------- + +json.org +Copyright (c) 2002 JSON.org +All Rights Reserved. + +JSON_checker +Copyright (c) 2002 JSON.org +All Rights Reserved. + + +Terms of the JSON License: +--------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +Terms of the MIT License: +-------------------------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +=============================================================================== + +* For rabbit see also this required NOTICE: + +The MIT License (MIT) + +Copyright (c) 2013-2014 mashiro + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +=============================================================================== + +* For Visual Leak Detector see also this required NOTICE: + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + +=============================================================================== + +* For PostgreSQL v12.0 see also this required NOTICE: + + GNU LIBRARY GENERAL PUBLIC LICENSE + + Version 2, June 1991 + + + + Copyright (C) 1991 Free Software Foundation, Inc. + + 675 Mass Ave, Cambridge, MA 02139, USA + + Everyone is permitted to copy and distribute verbatim copies + + of this license document, but changing it is not allowed. + + + +[This is the first released version of the library GPL. It is + + numbered 2 because it goes with version 2 of the ordinary GPL.] + + + + Preamble + + + + The licenses for most software are designed to take away your + +freedom to share and change it. By contrast, the GNU General Public + +Licenses are intended to guarantee your freedom to share and change + +free software--to make sure the software is free for all its users. + + + + This license, the Library General Public License, applies to some + +specially designated Free Software Foundation software, and to any + +other libraries whose authors decide to use it. You can use it for + +your libraries, too. + + + + When we speak of free software, we are referring to freedom, not + +price. Our General Public Licenses are designed to make sure that you + +have the freedom to distribute copies of free software (and charge for + +this service if you wish), that you receive source code or can get it + +if you want it, that you can change the software or use pieces of it + +in new free programs; and that you know you can do these things. + + + + To protect your rights, we need to make restrictions that forbid + +anyone to deny you these rights or to ask you to surrender the rights. + +These restrictions translate to certain responsibilities for you if + +you distribute copies of the library, or if you modify it. + + + + For example, if you distribute copies of the library, whether gratis + +or for a fee, you must give the recipients all the rights that we gave + +you. You must make sure that they, too, receive or can get the source + +code. If you link a program with the library, you must provide + +complete object files to the recipients so that they can relink them + +with the library, after making changes to the library and recompiling + +it. And you must show them these terms so they know their rights. + + + + Our method of protecting your rights has two steps: (1) copyright + +the library, and (2) offer you this license which gives you legal + +permission to copy, distribute and/or modify the library. + + + + Also, for each distributor's protection, we want to make certain + +that everyone understands that there is no warranty for this free + +library. If the library is modified by someone else and passed on, we + +want its recipients to know that what they have is not the original + +version, so that any problems introduced by others will not reflect on + +the original authors' reputations. + + + + Finally, any free program is threatened constantly by software + +patents. We wish to avoid the danger that companies distributing free + +software will individually obtain patent licenses, thus in effect + +transforming the program into proprietary software. To prevent this, + +we have made it clear that any patent must be licensed for everyone's + +free use or not licensed at all. + + + + Most GNU software, including some libraries, is covered by the ordinary + +GNU General Public License, which was designed for utility programs. This + +license, the GNU Library General Public License, applies to certain + +designated libraries. This license is quite different from the ordinary + +one; be sure to read it in full, and don't assume that anything in it is + +the same as in the ordinary license. + + + + The reason we have a separate public license for some libraries is that + +they blur the distinction we usually make between modifying or adding to a + +program and simply using it. Linking a program with a library, without + +changing the library, is in some sense simply using the library, and is + +analogous to running a utility program or application program. However, in + +a textual and legal sense, the linked executable is a combined work, a + +derivative of the original library, and the ordinary General Public License + +treats it as such. + + + + Because of this blurred distinction, using the ordinary General + +Public License for libraries did not effectively promote software + +sharing, because most developers did not use the libraries. We + +concluded that weaker conditions might promote sharing better. + + + + However, unrestricted linking of non-free programs would deprive the + +users of those programs of all benefit from the free status of the + +libraries themselves. This Library General Public License is intended to + +permit developers of non-free programs to use free libraries, while + +preserving your freedom as a user of such programs to change the free + +libraries that are incorporated in them. (We have not seen how to achieve + +this as regards changes in header files, but we have achieved it as regards + +changes in the actual functions of the Library.) The hope is that this + +will lead to faster development of free libraries. + + + + The precise terms and conditions for copying, distribution and + +modification follow. Pay close attention to the difference between a + +"work based on the library" and a "work that uses the library". The + +former contains code derived from the library, while the latter only + +works together with the library. + + + + Note that it is possible for a library to be covered by the ordinary + +General Public License rather than by this special one. + + + + GNU LIBRARY GENERAL PUBLIC LICENSE + + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + + + 0. This License Agreement applies to any software library which + +contains a notice placed by the copyright holder or other authorized + +party saying it may be distributed under the terms of this Library + +General Public License (also called "this License"). Each licensee is + +addressed as "you". + + + + A "library" means a collection of software functions and/or data + +prepared so as to be conveniently linked with application programs + +(which use some of those functions and data) to form executables. + + + + The "Library", below, refers to any such software library or work + +which has been distributed under these terms. A "work based on the + +Library" means either the Library or any derivative work under + +copyright law: that is to say, a work containing the Library or a + +portion of it, either verbatim or with modifications and/or translated + +straightforwardly into another language. (Hereinafter, translation is + +included without limitation in the term "modification".) + + + + "Source code" for a work means the preferred form of the work for + +making modifications to it. For a library, complete source code means + +all the source code for all modules it contains, plus any associated + +interface definition files, plus the scripts used to control compilation + +and installation of the library. + + + + Activities other than copying, distribution and modification are not + +covered by this License; they are outside its scope. The act of + +running a program using the Library is not restricted, and output from + +such a program is covered only if its contents constitute a work based + +on the Library (independent of the use of the Library in a tool for + +writing it). Whether that is true depends on what the Library does + +and what the program that uses the Library does. + + + + 1. You may copy and distribute verbatim copies of the Library's + +complete source code as you receive it, in any medium, provided that + +you conspicuously and appropriately publish on each copy an + +appropriate copyright notice and disclaimer of warranty; keep intact + +all the notices that refer to this License and to the absence of any + +warranty; and distribute a copy of this License along with the + +Library. + + + + You may charge a fee for the physical act of transferring a copy, + +and you may at your option offer warranty protection in exchange for a + +fee. + + + + 2. You may modify your copy or copies of the Library or any portion + +of it, thus forming a work based on the Library, and copy and + +distribute such modifications or work under the terms of Section 1 + +above, provided that you also meet all of these conditions: + + + + a) The modified work must itself be a software library. + + + + b) You must cause the files modified to carry prominent notices + + stating that you changed the files and the date of any change. + + + + c) You must cause the whole of the work to be licensed at no + + charge to all third parties under the terms of this License. + + + + d) If a facility in the modified Library refers to a function or a + + table of data to be supplied by an application program that uses + + the facility, other than as an argument passed when the facility + + is invoked, then you must make a good faith effort to ensure that, + + in the event an application does not supply such function or + + table, the facility still operates, and performs whatever part of + + its purpose remains meaningful. + + + + (For example, a function in a library to compute square roots has + + a purpose that is entirely well-defined independent of the + + application. Therefore, Subsection 2d requires that any + + application-supplied function or table used by this function must + + be optional: if the application does not supply it, the square + + root function must still compute square roots.) + + + +These requirements apply to the modified work as a whole. If + +identifiable sections of that work are not derived from the Library, + +and can be reasonably considered independent and separate works in + +themselves, then this License, and its terms, do not apply to those + +sections when you distribute them as separate works. But when you + +distribute the same sections as part of a whole which is a work based + +on the Library, the distribution of the whole must be on the terms of + +this License, whose permissions for other licensees extend to the + +entire whole, and thus to each and every part regardless of who wrote + +it. + + + +Thus, it is not the intent of this section to claim rights or contest + +your rights to work written entirely by you; rather, the intent is to + +exercise the right to control the distribution of derivative or + +collective works based on the Library. + + + +In addition, mere aggregation of another work not based on the Library + +with the Library (or with a work based on the Library) on a volume of + +a storage or distribution medium does not bring the other work under + +the scope of this License. + + + + 3. You may opt to apply the terms of the ordinary GNU General Public + +License instead of this License to a given copy of the Library. To do + +this, you must alter all the notices that refer to this License, so + +that they refer to the ordinary GNU General Public License, version 2, + +instead of to this License. (If a newer version than version 2 of the + +ordinary GNU General Public License has appeared, then you can specify + +that version instead if you wish.) Do not make any other change in + +these notices. + + + + Once this change is made in a given copy, it is irreversible for + +that copy, so the ordinary GNU General Public License applies to all + +subsequent copies and derivative works made from that copy. + + + + This option is useful when you wish to copy part of the code of + +the Library into a program that is not a library. + + + + 4. You may copy and distribute the Library (or a portion or + +derivative of it, under Section 2) in object code or executable form + +under the terms of Sections 1 and 2 above provided that you accompany + +it with the complete corresponding machine-readable source code, which + +must be distributed under the terms of Sections 1 and 2 above on a + +medium customarily used for software interchange. + + + + If distribution of object code is made by offering access to copy + +from a designated place, then offering equivalent access to copy the + +source code from the same place satisfies the requirement to + +distribute the source code, even though third parties are not + +compelled to copy the source along with the object code. + + + + 5. A program that contains no derivative of any portion of the + +Library, but is designed to work with the Library by being compiled or + +linked with it, is called a "work that uses the Library". Such a + +work, in isolation, is not a derivative work of the Library, and + +therefore falls outside the scope of this License. + + + + However, linking a "work that uses the Library" with the Library + +creates an executable that is a derivative of the Library (because it + +contains portions of the Library), rather than a "work that uses the + +library". The executable is therefore covered by this License. + +Section 6 states terms for distribution of such executables. + + + + When a "work that uses the Library" uses material from a header file + +that is part of the Library, the object code for the work may be a + +derivative work of the Library even though the source code is not. + +Whether this is true is especially significant if the work can be + +linked without the Library, or if the work is itself a library. The + +threshold for this to be true is not precisely defined by law. + + + + If such an object file uses only numerical parameters, data + +structure layouts and accessors, and small macros and small inline + +functions (ten lines or less in length), then the use of the object + +file is unrestricted, regardless of whether it is legally a derivative + +work. (Executables containing this object code plus portions of the + +Library will still fall under Section 6.) + + + + Otherwise, if the work is a derivative of the Library, you may + +distribute the object code for the work under the terms of Section 6. + +Any executables containing that work also fall under Section 6, + +whether or not they are linked directly with the Library itself. + + + + 6. As an exception to the Sections above, you may also compile or + +link a "work that uses the Library" with the Library to produce a + +work containing portions of the Library, and distribute that work + +under terms of your choice, provided that the terms permit + +modification of the work for the customer's own use and reverse + +engineering for debugging such modifications. + + + + You must give prominent notice with each copy of the work that the + +Library is used in it and that the Library and its use are covered by + +this License. You must supply a copy of this License. If the work + +during execution displays copyright notices, you must include the + +copyright notice for the Library among them, as well as a reference + +directing the user to the copy of this License. Also, you must do one + +of these things: + + + + a) Accompany the work with the complete corresponding + + machine-readable source code for the Library including whatever + + changes were used in the work (which must be distributed under + + Sections 1 and 2 above); and, if the work is an executable linked + + with the Library, with the complete machine-readable "work that + + uses the Library", as object code and/or source code, so that the + + user can modify the Library and then relink to produce a modified + + executable containing the modified Library. (It is understood + + that the user who changes the contents of definitions files in the + + Library will not necessarily be able to recompile the application + + to use the modified definitions.) + + + + b) Accompany the work with a written offer, valid for at + + least three years, to give the same user the materials + + specified in Subsection 6a, above, for a charge no more + + than the cost of performing this distribution. + + + + c) If distribution of the work is made by offering access to copy + + from a designated place, offer equivalent access to copy the above + + specified materials from the same place. + + + + d) Verify that the user has already received a copy of these + + materials or that you have already sent this user a copy. + + + + For an executable, the required form of the "work that uses the + +Library" must include any data and utility programs needed for + +reproducing the executable from it. However, as a special exception, + +the source code distributed need not include anything that is normally + +distributed (in either source or binary form) with the major + +components (compiler, kernel, and so on) of the operating system on + +which the executable runs, unless that component itself accompanies + +the executable. + + + + It may happen that this requirement contradicts the license + +restrictions of other proprietary libraries that do not normally + +accompany the operating system. Such a contradiction means you cannot + +use both them and the Library together in an executable that you + +distribute. + + + + 7. You may place library facilities that are a work based on the + +Library side-by-side in a single library together with other library + +facilities not covered by this License, and distribute such a combined + +library, provided that the separate distribution of the work based on + +the Library and of the other library facilities is otherwise + +permitted, and provided that you do these two things: + + + + a) Accompany the combined library with a copy of the same work + + based on the Library, uncombined with any other library + + facilities. This must be distributed under the terms of the + + Sections above. + + + + b) Give prominent notice with the combined library of the fact + + that part of it is a work based on the Library, and explaining + + where to find the accompanying uncombined form of the same work. + + + + 8. You may not copy, modify, sublicense, link with, or distribute + +the Library except as expressly provided under this License. Any + +attempt otherwise to copy, modify, sublicense, link with, or + +distribute the Library is void, and will automatically terminate your + +rights under this License. However, parties who have received copies, + +or rights, from you under this License will not have their licenses + +terminated so long as such parties remain in full compliance. + + + + 9. You are not required to accept this License, since you have not + +signed it. However, nothing else grants you permission to modify or + +distribute the Library or its derivative works. These actions are + +prohibited by law if you do not accept this License. Therefore, by + +modifying or distributing the Library (or any work based on the + +Library), you indicate your acceptance of this License to do so, and + +all its terms and conditions for copying, distributing or modifying + +the Library or works based on it. + + + + 10. Each time you redistribute the Library (or any work based on the + +Library), the recipient automatically receives a license from the + +original licensor to copy, distribute, link with or modify the Library + +subject to these terms and conditions. You may not impose any further + +restrictions on the recipients' exercise of the rights granted herein. + +You are not responsible for enforcing compliance by third parties to + +this License. + + + + 11. If, as a consequence of a court judgment or allegation of patent + +infringement or for any other reason (not limited to patent issues), + +conditions are imposed on you (whether by court order, agreement or + +otherwise) that contradict the conditions of this License, they do not + +excuse you from the conditions of this License. If you cannot + +distribute so as to satisfy simultaneously your obligations under this + +License and any other pertinent obligations, then as a consequence you + +may not distribute the Library at all. For example, if a patent + +license would not permit royalty-free redistribution of the Library by + +all those who receive copies directly or indirectly through you, then + +the only way you could satisfy both it and this License would be to + +refrain entirely from distribution of the Library. + + + +If any portion of this section is held invalid or unenforceable under any + +particular circumstance, the balance of the section is intended to apply, + +and the section as a whole is intended to apply in other circumstances. + + + +It is not the purpose of this section to induce you to infringe any + +patents or other property right claims or to contest validity of any + +such claims; this section has the sole purpose of protecting the + +integrity of the free software distribution system which is + +implemented by public license practices. Many people have made + +generous contributions to the wide range of software distributed + +through that system in reliance on consistent application of that + +system; it is up to the author/donor to decide if he or she is willing + +to distribute software through any other system and a licensee cannot + +impose that choice. + + + +This section is intended to make thoroughly clear what is believed to + +be a consequence of the rest of this License. + + + + 12. If the distribution and/or use of the Library is restricted in + +certain countries either by patents or by copyrighted interfaces, the + +original copyright holder who places the Library under this License may add + +an explicit geographical distribution limitation excluding those countries, + +so that distribution is permitted only in or among countries not thus + +excluded. In such case, this License incorporates the limitation as if + +written in the body of this License. + + + + 13. The Free Software Foundation may publish revised and/or new + +versions of the Library General Public License from time to time. + +Such new versions will be similar in spirit to the present version, + +but may differ in detail to address new problems or concerns. + + + +Each version is given a distinguishing version number. If the Library + +specifies a version number of this License which applies to it and + +"any later version", you have the option of following the terms and + +conditions either of that version or of any later version published by + +the Free Software Foundation. If the Library does not specify a + +license version number, you may choose any version ever published by + +the Free Software Foundation. + + + + 14. If you wish to incorporate parts of the Library into other free + +programs whose distribution conditions are incompatible with these, + +write to the author to ask for permission. For software which is + +copyrighted by the Free Software Foundation, write to the Free + +Software Foundation; we sometimes make exceptions for this. Our + +decision will be guided by the two goals of preserving the free status + +of all derivatives of our free software and of promoting the sharing + +and reuse of software generally. + + + + NO WARRANTY + + + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO + +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. + +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR + +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY + +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE + +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE + +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME + +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU + +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE + +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING + +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A + +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF + +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + +DAMAGES. + + + + END OF TERMS AND CONDITIONS + + + + Appendix: How to Apply These Terms to Your New Libraries + + + + If you develop a new library, and you want it to be of the greatest + +possible use to the public, we recommend making it free software that + +everyone can redistribute and change. You can do so by permitting + +redistribution under these terms (or, alternatively, under the terms of the + +ordinary General Public License). + + + + To apply these terms, attach the following notices to the library. It is + +safest to attach them to the start of each source file to most effectively + +convey the exclusion of warranty; and each file should have at least the + +"copyright" line and a pointer to where the full notice is found. + + + + + + Copyright (C) + + + + This library is free software; you can redistribute it and/or + + modify it under the terms of the GNU Library General Public + + License as published by the Free Software Foundation; either + + version 2 of the License, or (at your option) any later version. + + + + This library is distributed in the hope that it will be useful, + + but WITHOUT ANY WARRANTY; without even the implied warranty of + + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + + Library General Public License for more details. + + + + You should have received a copy of the GNU Library General Public + + License along with this library; if not, write to the Free + + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + + +Also add information on how to contact you by electronic and paper mail. + + + +You should also get your employer (if you work as a programmer) or your + +school, if any, to sign a "copyright disclaimer" for the library, if + +necessary. Here is a sample; alter the names: + + + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + + + , 1 April 1990 + + Ty Coon, President of Vice + + + +That's all there is to it! + +=============================================================================== + +* For Googletest see also this required NOTICE: + +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/sql-odbc/aws_sdk_cpp_setup.ps1 b/sql-odbc/aws_sdk_cpp_setup.ps1 new file mode 100644 index 0000000000..ecc4aa6b65 --- /dev/null +++ b/sql-odbc/aws_sdk_cpp_setup.ps1 @@ -0,0 +1,30 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +git clone "https://github.com/aws/aws-sdk-cpp.git" + +$prefix_path = (pwd).path + +mkdir sdk-build + +cd sdk-build + +cmake ..\\aws-sdk-cpp\\ -D CMAKE_INSTALL_PREFIX=$prefix_path\AWSSDK\ -D CMAKE_BUILD_TYPE=Release -D BUILD_ONLY="core" -D CUSTOM_MEMORY_MANAGEMENT="OFF" -D ENABLE_RTTI="OFF" -D ENABLE_TESTING="OFF" + +msbuild ALL_BUILD.vcxproj /p:Configuration=Release + +msbuild INSTALL.vcxproj /p:Configuration=Release + +cd .. \ No newline at end of file diff --git a/sql-odbc/aws_sdk_cpp_setup.sh b/sql-odbc/aws_sdk_cpp_setup.sh new file mode 100755 index 0000000000..7ded1ee467 --- /dev/null +++ b/sql-odbc/aws_sdk_cpp_setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +cd src +git clone -b "1.7.329" "https://github.com/aws/aws-sdk-cpp.git" +cd .. \ No newline at end of file diff --git a/sql-odbc/build_win_debug32.ps1 b/sql-odbc/build_win_debug32.ps1 new file mode 100644 index 0000000000..2c64f9c413 --- /dev/null +++ b/sql-odbc/build_win_debug32.ps1 @@ -0,0 +1,26 @@ +# Build AWS SDK +$BITNESS=32 + +# Compare Bitness for 32 +# $ARCH="Win32" + +mkdir sdk-build${BITNESS} +cd sdk-build${BITNESS} + +git clone "https://github.com/aws/aws-sdk-cpp.git" + +$prefix_path = (pwd).path +cmake .\aws-sdk-cpp -A Win32 -D CMAKE_INSTALL_PREFIX=${prefix_path}\AWSSDK\ -D CMAKE_BUILD_TYPE=Debug -D BUILD_ONLY="core" -D CUSTOM_MEMORY_MANAGEMENT="OFF" -D ENABLE_RTTI="OFF" -D ENABLE_TESTING="OFF" + +msbuild ALL_BUILD.vcxproj /p:Configuration=Debug +msbuild INSTALL.vcxproj /p:Configuration=Debug + +cd .. + +# # Configure Project +cmake -S src -B cmake-build${BITNESS} -A Win32 -D CMAKE_INSTALL_PREFIX=sdk-build${BITNESS}\AWSSDK\ -D BUILD_WITH_TESTS=ON + +# # Build Project +cmake --build .\cmake-build${BITNESS} --config Debug + +msbuild cmake-build32\PACKAGE.vcxproj -p:Configuration=Debug \ No newline at end of file diff --git a/sql-odbc/build_win_debug64.ps1 b/sql-odbc/build_win_debug64.ps1 new file mode 100644 index 0000000000..2e38670790 --- /dev/null +++ b/sql-odbc/build_win_debug64.ps1 @@ -0,0 +1,26 @@ +# Build AWS SDK +$BITNESS = 64 + +# Compare Bitness for 32 +# $ARCH="x64" + +mkdir sdk-build${BITNESS} +cd sdk-build${BITNESS} + +git clone "https://github.com/aws/aws-sdk-cpp.git" + +$prefix_path = (pwd).path +cmake .\aws-sdk-cpp -A x64 -D CMAKE_INSTALL_PREFIX=${prefix_path}\AWSSDK\ -D CMAKE_BUILD_TYPE=Debug -D BUILD_ONLY="core" -D CUSTOM_MEMORY_MANAGEMENT="OFF" -D ENABLE_RTTI="OFF" -D ENABLE_TESTING="OFF" + +msbuild ALL_BUILD.vcxproj /p:Configuration=Debug +msbuild INSTALL.vcxproj /p:Configuration=Debug + +cd .. + +# # Configure Project +cmake -S src -B cmake-build${BITNESS} -A x64 -D CMAKE_INSTALL_PREFIX=sdk-build${BITNESS}\AWSSDK\ -D BUILD_WITH_TESTS=ON + +# # Build Project +cmake --build .\cmake-build${BITNESS} --config Debug + +cp .\sdk-build64\bin\Debug\* .\bin64\Debug \ No newline at end of file diff --git a/sql-odbc/build_win_release32.ps1 b/sql-odbc/build_win_release32.ps1 new file mode 100644 index 0000000000..ebf707e167 --- /dev/null +++ b/sql-odbc/build_win_release32.ps1 @@ -0,0 +1,24 @@ +# Build AWS SDK +$BITNESS=32 + +# Compare Bitness for 32 +# $ARCH="Win32" + +mkdir sdk-build${BITNESS} +cd sdk-build${BITNESS} + +git clone "https://github.com/aws/aws-sdk-cpp.git" + +$prefix_path = (pwd).path +cmake .\aws-sdk-cpp -A Win32 -D CMAKE_INSTALL_PREFIX=${prefix_path}\AWSSDK\ -D CMAKE_BUILD_TYPE=Release -D BUILD_ONLY="core" -D CUSTOM_MEMORY_MANAGEMENT="OFF" -D ENABLE_RTTI="OFF" -D ENABLE_TESTING="OFF" + +msbuild ALL_BUILD.vcxproj /p:Configuration=Release +msbuild INSTALL.vcxproj /p:Configuration=Release + +cd .. + +# # Configure Project +cmake -S src -B cmake-build${BITNESS} -A Win32 -D CMAKE_INSTALL_PREFIX=sdk-build${BITNESS}\AWSSDK\ -D BUILD_WITH_TESTS=ON + +# # Build Project +cmake --build .\cmake-build${BITNESS} --config Release diff --git a/sql-odbc/build_win_release64.ps1 b/sql-odbc/build_win_release64.ps1 new file mode 100644 index 0000000000..c39d0f9757 --- /dev/null +++ b/sql-odbc/build_win_release64.ps1 @@ -0,0 +1,26 @@ +# Build AWS SDK +$BITNESS = 64 + +# Compare Bitness for 32 +# $ARCH="x64" + +mkdir sdk-build64 +cd .\sdk-build64 + +git clone "https://github.com/aws/aws-sdk-cpp.git" + +$prefix_path = (pwd).path +cmake .\aws-sdk-cpp -A x64 -D CMAKE_INSTALL_PREFIX=${prefix_path}\AWSSDK\ -D CMAKE_BUILD_TYPE=Release -D BUILD_ONLY="core" -D CUSTOM_MEMORY_MANAGEMENT="OFF" -D ENABLE_RTTI="OFF" -D ENABLE_TESTING="OFF" + +msbuild ALL_BUILD.vcxproj /p:Configuration=Release +msbuild INSTALL.vcxproj /p:Configuration=Release + +cd .. + +# # Configure Project +cmake -S src -B cmake-build64 -A x64 -D CMAKE_INSTALL_PREFIX=sdk-build64\AWSSDK\ -D BUILD_WITH_TESTS=ON + +# # Build Project +cmake --build .\cmake-build64 --config Release + +cp .\sdk-build64\bin\Release\* .\bin64\Release \ No newline at end of file diff --git a/sql-odbc/docs/dev/Pagination.md b/sql-odbc/docs/dev/Pagination.md new file mode 100644 index 0000000000..10a937e39c --- /dev/null +++ b/sql-odbc/docs/dev/Pagination.md @@ -0,0 +1,28 @@ +# Opendistro ODBC Driver Cursor (Pagination) Support Design Documentation + +## Overview +Elasticsearch ODBC Driver supports forward-only cursor. This document illustrates how the cursor(pagination) is handled in the driver. + +For information on how the pagination is supported on Elasticsearch server, check [Opendistro SQL Cursor (Pagination) Support](https://github.com/opendistro-for-elasticsearch/sql/blob/master/docs/dev/Pagination.md). + +## Data Flow +

    + +

    + +* Steps 3 & 4 will repeatedly get datarows until entire result is retrieved. +* Step 5 will send a request to close cursor whenever the connection is closed. +* ODBC Driver will provide an option to define fetch size as a connection parameter. + * If fetch size is zero, query will fallback to non-cursor behavior. + * If fetch size is not given then the number of rows per request will be as per server-defined [default fetch size](https://github.com/opendistro-for-elasticsearch/sql/blob/master/docs/dev/Pagination.md#42-salient-points). +* ODBC Driver will send the request to close cursor whenever the connection is closed. + +## Detailed Design + +The ODBC Driver will use std::promise and std::future to retrieve the result asynchronusly. + +* Thread to send queries is designed to get the next set of results. +* Thread to process data is designed to parse the datarows and add the results in the resultset. + + + diff --git a/sql-odbc/docs/dev/example_config_files/elasticsearch.yml b/sql-odbc/docs/dev/example_config_files/elasticsearch.yml new file mode 100644 index 0000000000..8f578210e3 --- /dev/null +++ b/sql-odbc/docs/dev/example_config_files/elasticsearch.yml @@ -0,0 +1,5 @@ +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.client_authentication: none +xpack.security.http.ssl.key_passphrase: password_if_applicable +xpack.security.http.ssl.certificate: certificate.pem +xpack.security.http.ssl.key: key.pem \ No newline at end of file diff --git a/sql-odbc/docs/dev/example_config_files/example_tdc_file.tdc b/sql-odbc/docs/dev/example_config_files/example_tdc_file.tdc new file mode 100644 index 0000000000..629b2b924d --- /dev/null +++ b/sql-odbc/docs/dev/example_config_files/example_tdc_file.tdc @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/sql-odbc/docs/dev/example_config_files/kibana.yml b/sql-odbc/docs/dev/example_config_files/kibana.yml new file mode 100644 index 0000000000..db311013ae --- /dev/null +++ b/sql-odbc/docs/dev/example_config_files/kibana.yml @@ -0,0 +1,9 @@ +# The URLs of the Elasticsearch instances to use for all your queries. +elasticsearch.hosts: ["https://localhost:9200"] + +elasticsearch.ssl.verificationMode: none + +server.ssl.enabled: true +server.ssl.certificate: /certificate.pem +server.ssl.key: /key.pem +server.ssl.clientAuthentication: none diff --git a/sql-odbc/docs/dev/img/async_result_retrieval.png b/sql-odbc/docs/dev/img/async_result_retrieval.png new file mode 100644 index 0000000000..c28087af1e Binary files /dev/null and b/sql-odbc/docs/dev/img/async_result_retrieval.png differ diff --git a/sql-odbc/docs/dev/img/data_flow.png b/sql-odbc/docs/dev/img/data_flow.png new file mode 100644 index 0000000000..987e8d830c Binary files /dev/null and b/sql-odbc/docs/dev/img/data_flow.png differ diff --git a/sql-odbc/docs/dev/sequence_diagrams/SQLDriverConnect.png b/sql-odbc/docs/dev/sequence_diagrams/SQLDriverConnect.png new file mode 100644 index 0000000000..6810835abe Binary files /dev/null and b/sql-odbc/docs/dev/sequence_diagrams/SQLDriverConnect.png differ diff --git a/sql-odbc/docs/dev/sequence_diagrams/SQLDriverConnect.txt b/sql-odbc/docs/dev/sequence_diagrams/SQLDriverConnect.txt new file mode 100644 index 0000000000..0d8bc880ec --- /dev/null +++ b/sql-odbc/docs/dev/sequence_diagrams/SQLDriverConnect.txt @@ -0,0 +1,14 @@ +ODBCTest->psqlodbc Driver:SQLDriverConnect or SQLConnect +psqlodbc Driver->libes (C interface):DB Parameters +libes (C interface)->Connection Factory:ESConn +Connection Factory->ESConn:Initialize ESConn +ESConn->libes (C interface):ESConn object +libes (C interface)->ESConn:DB Parameters +ESConn->libes (C interface):DB Parameters Validation Result +libes (C interface)->ESConn:Connect to DB +database ElasticSearch +ESConn->ElasticSearch:Get +ElasticSearch->ESConn:data +ESConn->libes (C interface):DB Connection Result +libes (C interface)->psqlodbc Driver:ESConn object with DB Connection +psqlodbc Driver->ODBCTest:SQL Code Result \ No newline at end of file diff --git a/sql-odbc/docs/dev/sequence_diagrams/SQLExecDirect.png b/sql-odbc/docs/dev/sequence_diagrams/SQLExecDirect.png new file mode 100644 index 0000000000..cbb4ba714b Binary files /dev/null and b/sql-odbc/docs/dev/sequence_diagrams/SQLExecDirect.png differ diff --git a/sql-odbc/docs/dev/sequence_diagrams/SQLExecDirect.txt b/sql-odbc/docs/dev/sequence_diagrams/SQLExecDirect.txt new file mode 100644 index 0000000000..9917433ac2 --- /dev/null +++ b/sql-odbc/docs/dev/sequence_diagrams/SQLExecDirect.txt @@ -0,0 +1,12 @@ +Title SQLExecDirect Sequence Diagram +ODBCTest->psqlodbc Driver:SQLExecDirect +psqlodbc Driver->libes (C interface):Query +libes (C interface)->libes (C interface):Active Connection +libes (C interface)->ESConn:Connection, Query +ESConn->ESConn:Connection, Query validation +database ElasticSearch +ESConn->ElasticSearch:Post +ElasticSearch->ESConn:Data +ESConn->libes (C interface):Data +libes (C interface)->psqlodbc Driver:Data +psqlodbc Driver->ODBCTest:Query Execution Status \ No newline at end of file diff --git a/sql-odbc/docs/dev/sign_installers.md b/sql-odbc/docs/dev/sign_installers.md new file mode 100644 index 0000000000..7f02c6fd8b --- /dev/null +++ b/sql-odbc/docs/dev/sign_installers.md @@ -0,0 +1,49 @@ +# Signing Installers + +## Steps to sign odbc driver windows installer + +- Get code signing certificate. Certificate must meet some [criteria](https://docs.microsoft.com/en-us/windows/win32/appxpkg/how-to-sign-a-package-using-signtool). Some options are listed below. + - Buy [Extended Validation (EV) Code Signing Certificate](https://docs.microsoft.com/en-us/windows-hardware/drivers/dashboard/get-a-code-signing-certificate#step-2-buy-a-new-code-signing-certificate) + - Create a [self-signed certificate](https://docs.microsoft.com/en-us/windows/msix/package/create-certificate-package-signing#create-a-self-signed-certificate) (For testing purpose only). +- Install the certificate that you want to sign the file with. + - [Import-Certificate](https://docs.microsoft.com/en-us/powershell/module/pkiclient/import-certificate?view=win10-ps) can be used for this purpose + +``` +// Import certificate as Trusted Publisher +Import-Certificate -FilePath .\code_signing.crt -Cert Cert:\CurrentUser\TrustedPublisher + +// Import certificate as a Root certificate authority. +Import-Certificate -FilePath .\code_signing.crt -Cert Cert:\CurrentUser\Root +``` + +- Sign the .msi file. + - Sign installer using [SignTool](https://docs.microsoft.com/en-us/windows/msix/package/sign-app-package-using-signtool) + +``` + signtool sign /sha1 '.\Open Distro for Elasticsearch SQL ODBC Driver--Windows.msi' +``` + + - Alternatively, [Set-AuthenticodeSignature](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.security/set-authenticodesignature?view=powershell-7) can be used for adding digital signature. + +``` + Set-AuthenticodeSignature '.\Open Distro for Elasticsearch SQL ODBC Driver--Windows.msi' -Certificate (Get-ChildItem Cert:\CurrentUser\My -CodeSigningCert) -TimestampServer "http://timestamp.verisign.com/scripts/timstamp.dll" +``` + +[Note](https://stackoverflow.com/questions/50956108/codesign-software-still-gives-a-warning-on-windows-10): If you have a standard code signing certificate, some time will be needed for your application to build trust. Microsoft affirms that an Extended Validation (EV) Code Signing Certificate allows to skip this period of trust building. According to Microsoft, extended validation certificates allow the developer to immediately establish reputation with SmartScreen. Otherwise, the users will see a warning like "Windows Defender Smartscreen prevented an unrecognized app from starting. Running this app might put your PC at risk.", with the two buttons: "Run anyway" and "Don't run". + + +## Steps to sign odbc driver macOS installer + +- Get a [Developer ID Installer signing certificate](https://help.apple.com/xcode/mac/current/#/dev154b28f09) +- Sign the installer package using `productsign`. Do not use `Developer ID Application certificate`. + +``` +productsign -sign "Developer ID Installer: Your Apple Account Name (**********)" "~/Desktop/Open Distro for Elasticsearch SQL ODBC Driver--Darwin.pkg" "~/Desktop/signed-Open Distro for Elasticsearch SQL ODBC Driver--Darwin.pkg" +``` + +- Test installer package using [spctl](http://www.manpagez.com/man/8/spctl/) +``` + spctl -a -v --type install "Desktop/Open Distro for Elasticsearch SQL ODBC Driver--Darwin.pkg" +``` + +Reference: https://help.apple.com/xcode/mac/current/#/deve51ce7c3d \ No newline at end of file diff --git a/sql-odbc/docs/test/excel_connection.md b/sql-odbc/docs/test/excel_connection.md new file mode 100644 index 0000000000..1514ba6181 --- /dev/null +++ b/sql-odbc/docs/test/excel_connection.md @@ -0,0 +1,14 @@ +# Testing Microsoft Excel Connection + +## Prerequisites +* [Download and install](../../README.md) Open Distro for Elasticsearch SQL ODBC Driver. +* [Install and configure](https://opendistro.github.io/for-elasticsearch-docs/docs/install/) Open Distro for Elasticsearch. +* Open ODBC Data Source Administrator. Click on **System DSN** > **ODFE SQL ODBC DSN** > **Configure**. +* Set all connection options & Click on **Test**. Connection test should return `Connection Successful`. + +## Microsoft Excel Connectivity + +There are multiple ways to load data from Elasticsearch in Microsoft Excel. +* [ODBC as Data Source](odbc_data_source_connection.md) +* [Microsoft Query](microsoft_query_connection.md) +* [Query Wizard](query_wizard_connection.md) diff --git a/sql-odbc/docs/test/img/from_odbc_advanced_options.png b/sql-odbc/docs/test/img/from_odbc_advanced_options.png new file mode 100644 index 0000000000..b011326bc0 Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_advanced_options.png differ diff --git a/sql-odbc/docs/test/img/from_odbc_advanced_options_auth.png b/sql-odbc/docs/test/img/from_odbc_advanced_options_auth.png new file mode 100644 index 0000000000..15496f6e1a Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_advanced_options_auth.png differ diff --git a/sql-odbc/docs/test/img/from_odbc_advanced_options_load_data.png b/sql-odbc/docs/test/img/from_odbc_advanced_options_load_data.png new file mode 100644 index 0000000000..320a9f9122 Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_advanced_options_load_data.png differ diff --git a/sql-odbc/docs/test/img/from_odbc_auth.png b/sql-odbc/docs/test/img/from_odbc_auth.png new file mode 100644 index 0000000000..c392f8b55a Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_auth.png differ diff --git a/sql-odbc/docs/test/img/from_odbc_dsn.png b/sql-odbc/docs/test/img/from_odbc_dsn.png new file mode 100644 index 0000000000..58598b18a9 Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_dsn.png differ diff --git a/sql-odbc/docs/test/img/from_odbc_loaded_data.png b/sql-odbc/docs/test/img/from_odbc_loaded_data.png new file mode 100644 index 0000000000..f8a4c98afb Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_loaded_data.png differ diff --git a/sql-odbc/docs/test/img/from_odbc_table_list.png b/sql-odbc/docs/test/img/from_odbc_table_list.png new file mode 100644 index 0000000000..7fea36c26b Binary files /dev/null and b/sql-odbc/docs/test/img/from_odbc_table_list.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_add_tables.png b/sql-odbc/docs/test/img/microsoft_query_add_tables.png new file mode 100644 index 0000000000..6bc517f038 Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_add_tables.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_disable_use_the_query_wizard_option.png b/sql-odbc/docs/test/img/microsoft_query_disable_use_the_query_wizard_option.png new file mode 100644 index 0000000000..737323ce94 Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_disable_use_the_query_wizard_option.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_import_data.png b/sql-odbc/docs/test/img/microsoft_query_import_data.png new file mode 100644 index 0000000000..c9dcdcda09 Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_import_data.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_loaded_data.png b/sql-odbc/docs/test/img/microsoft_query_loaded_data.png new file mode 100644 index 0000000000..de02b5818c Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_loaded_data.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_select_colums.png b/sql-odbc/docs/test/img/microsoft_query_select_colums.png new file mode 100644 index 0000000000..164d2c0dc0 Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_select_colums.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_select_tables.png b/sql-odbc/docs/test/img/microsoft_query_select_tables.png new file mode 100644 index 0000000000..6c4404bb1b Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_select_tables.png differ diff --git a/sql-odbc/docs/test/img/microsoft_query_table_options.png b/sql-odbc/docs/test/img/microsoft_query_table_options.png new file mode 100644 index 0000000000..cd673ac584 Binary files /dev/null and b/sql-odbc/docs/test/img/microsoft_query_table_options.png differ diff --git a/sql-odbc/docs/test/img/odbc_data_source.png b/sql-odbc/docs/test/img/odbc_data_source.png new file mode 100644 index 0000000000..6aeec58cf0 Binary files /dev/null and b/sql-odbc/docs/test/img/odbc_data_source.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_choose_coulms.png b/sql-odbc/docs/test/img/query_wizard_choose_coulms.png new file mode 100644 index 0000000000..073b7b7f00 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_choose_coulms.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_enable_use_the_query_wizard_option.png b/sql-odbc/docs/test/img/query_wizard_enable_use_the_query_wizard_option.png new file mode 100644 index 0000000000..29cda7a355 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_enable_use_the_query_wizard_option.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_error_popup.png b/sql-odbc/docs/test/img/query_wizard_error_popup.png new file mode 100644 index 0000000000..2595bbdcfd Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_error_popup.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_filter_data.png b/sql-odbc/docs/test/img/query_wizard_filter_data.png new file mode 100644 index 0000000000..a5a77b65c3 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_filter_data.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_finish.png b/sql-odbc/docs/test/img/query_wizard_finish.png new file mode 100644 index 0000000000..14186f19d4 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_finish.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_import_data.png b/sql-odbc/docs/test/img/query_wizard_import_data.png new file mode 100644 index 0000000000..48c538e0de Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_import_data.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_loaded_data.png b/sql-odbc/docs/test/img/query_wizard_loaded_data.png new file mode 100644 index 0000000000..b83c920740 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_loaded_data.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_select_tables.png b/sql-odbc/docs/test/img/query_wizard_select_tables.png new file mode 100644 index 0000000000..830e2d1528 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_select_tables.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_sort_order.png b/sql-odbc/docs/test/img/query_wizard_sort_order.png new file mode 100644 index 0000000000..93670aefdd Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_sort_order.png differ diff --git a/sql-odbc/docs/test/img/query_wizard_table_options.png b/sql-odbc/docs/test/img/query_wizard_table_options.png new file mode 100644 index 0000000000..30a3aa3586 Binary files /dev/null and b/sql-odbc/docs/test/img/query_wizard_table_options.png differ diff --git a/sql-odbc/docs/test/img/select_microsoft_query.png b/sql-odbc/docs/test/img/select_microsoft_query.png new file mode 100644 index 0000000000..58e01f0053 Binary files /dev/null and b/sql-odbc/docs/test/img/select_microsoft_query.png differ diff --git a/sql-odbc/docs/test/microsoft_query_connection.md b/sql-odbc/docs/test/microsoft_query_connection.md new file mode 100644 index 0000000000..a3deaea431 --- /dev/null +++ b/sql-odbc/docs/test/microsoft_query_connection.md @@ -0,0 +1,36 @@ +## Microsoft Query Connection + +* Open blank workbook in Microsoft Excel. +* Click on **Data** > **Get Data** > **From Other Sources** > **From Microsoft Query** + + + +* Select **Databases** > **ODFE SQL ODBC DSN**. +* Clear the **Use the Query Wizard to create/edit queries** check box, and then click on **OK**. + + + +* Click on **Options** in Add Table window. + + + +* Click **Refresh**. Select checkbox **Tables**. Clear all other checkboxes. Click on **OK**. + + + +* Select tables and click on **Add**. After all required tables are added, click on **Close**. + + + +* Double click on required columns. You can double-click the asterisk (*) to select all the columns from a table. + + + +* You can select different options to define query here. Ensure the query is supported by the [OpenDistro for Elasticsearch SQL plugin](https://github.com/opendistro-for-elasticsearch/sql). After defining query, click on **Return Data** to retrieve the result set. +* Select worksheet and click on **OK**. + + + +* Data will be loaded in the spreadsheet + + diff --git a/sql-odbc/docs/test/odbc_data_source_connection.md b/sql-odbc/docs/test/odbc_data_source_connection.md new file mode 100644 index 0000000000..855a4d49be --- /dev/null +++ b/sql-odbc/docs/test/odbc_data_source_connection.md @@ -0,0 +1,41 @@ +## ODBC as Data Source Connection + +* Open blank workbook in Microsoft Excel. +* Click on **Data** > **Get Data** > **From Other Sources** > **From ODBC** + + + +* Select **ODFE SQL ODBC DSN**. Click **OK**. + + + +* Select **Default or Custom** in connection credentials windows and click on **Connect**. + + + +* Select a table from list to load data preview. Click on **Load**. + + + +#### Test Advanced options + +* Click on **Advanced options** after selecting DSN. +* Add some connection options in **Connection string**. +* Enter any supported **SQL statement**. + + + +* Click on **OK**. +* Select Default option for authentication and Click on **Connect**. + + + +* Data preview should be available. **Load** data in spreadsheet. + + + +* Data will be loaded in the spreadsheet + + + +NOTE: Check driver logs for verify modified connection string options. diff --git a/sql-odbc/docs/test/query_wizard_connection.md b/sql-odbc/docs/test/query_wizard_connection.md new file mode 100644 index 0000000000..4dab086dae --- /dev/null +++ b/sql-odbc/docs/test/query_wizard_connection.md @@ -0,0 +1,48 @@ +## Query Wizard Connection + +* Open blank workbook in Microsoft Excel. +* Click on **Data** > **Get Data** > **From Other Sources** > **From Microsoft Query** + + + +* Select **Databases** > **ODFE SQL ODBC DSN**. +* Ensure the **Use the Query Wizard to create/edit queries** check box is selected, and then click **OK**. + + + +* You might get an popup with a message `This data source contains no visible tables`. Click on **OK**. + + + +* Click on **Options** in Query Wizard window. + + + +* Select checkbox **Tables**. Clear all other checkboxes. Click on **OK**. + + + +* You will see list of available tables & columns. Select required tables/columns and click on **>**. +* After selecting all required columns, Click on **Next**. + + + +* Specify conditions to apply filter if needed. Ensure selected operations are supported by Elasticsearch. Click on **Next**. + + + +* Specify sorting options if required. Ensure selected operations are supported by the [OpenDistro for Elasticsearch SQL plugin](https://github.com/opendistro-for-elasticsearch/sql). Click on **Next**. + + + +* Select **Return Data to Microsoft Excel** and click on **Finish**. + + + +* Select worksheet and click on **OK**. + + + +* Data will be loaded in the spreadsheet + + diff --git a/sql-odbc/docs/user/configuration_options.md b/sql-odbc/docs/user/configuration_options.md new file mode 100644 index 0000000000..dca4f4bb4d --- /dev/null +++ b/sql-odbc/docs/user/configuration_options.md @@ -0,0 +1,37 @@ +# Configuration Options + +>**NOTE:** All option names are *case-insensitive*. + +#### Basic Options + +| Option | Description | Type | Default | +|--------|-------------|------|---------------| +| `DSN` | **D**ata **S**ource **N**ame used for configuring the connection. | string | | +| `Host` / `Server` | Hostname or IP address for the target cluster. | string | | +| `Port` | Port number on which the cluster's REST interface is listening. | string | | + +#### Authentication Options + +| Option | Description | Type | Default | +|--------|-------------|------|---------------| +| `Auth` | Authentication mechanism to use. | one of `BASIC` (basic HTTP), `AWS_SIGV4` (AWS auth), `NONE` | `NONE` +| `User` / `UID` | [`Auth=BASIC`] Username for the connection. | string | | +| `Password` / `PWD` | [`Auth=BASIC`] Password for the connection. | string | | +| `Region` | [`Auth=AWS_SIGV4`] Region used for signing requests | AWS region (eg. `us-west-1`) | | + +#### Advanced Options + +| Option | Description | Type | Default | +|--------|-------------|------|---------------| +| `UseSSL` | Whether to establish the connection over SSL/TLS | boolean (`0` or `1`) | false (`0`) | +| `HostnameVerification` | Indicate whether certificate hostname verification should be performed for an SSL/TLS connection. | boolean (`0` or `1`) | true (`1`) | +| `ResponseTimeout` | The maximum time to wait for responses from the `Host`, in seconds. | integer | `10` | + +#### Logging Options + +| Option | Description | Type | Default | +|--------|-------------|------|---------------| +| `LogLevel` | Severity level for driver logs. | one of `ES_OFF`, `ES_FATAL`, `ES_ERROR`, `ES_INFO`, `ES_DEBUG`, `ES_TRACE`, `ES_ALL` | `ES_WARNING` | +| `LogOutput` | Location for storing driver logs. | string | WIN: `C:\`, MAC: `/tmp` | + +**NOTE:** Administrative privileges are required to change the value of logging options on Windows. \ No newline at end of file diff --git a/sql-odbc/docs/user/img/driver_default_config.png b/sql-odbc/docs/user/img/driver_default_config.png new file mode 100644 index 0000000000..dfc4bbe9a5 Binary files /dev/null and b/sql-odbc/docs/user/img/driver_default_config.png differ diff --git a/sql-odbc/docs/user/img/dsn_default_config.png b/sql-odbc/docs/user/img/dsn_default_config.png new file mode 100644 index 0000000000..465a4adcb5 Binary files /dev/null and b/sql-odbc/docs/user/img/dsn_default_config.png differ diff --git a/sql-odbc/docs/user/mac_configure_dsn.md b/sql-odbc/docs/user/mac_configure_dsn.md new file mode 100644 index 0000000000..0ec3115893 --- /dev/null +++ b/sql-odbc/docs/user/mac_configure_dsn.md @@ -0,0 +1,39 @@ +# Configuring a DSN on Mac + +> NOTE: iODBC Administrator is included with the [iODBC Driver Manager](http://www.iodbc.org/dataspace/doc/iodbc/wiki/iodbcWiki/Downloads), which must be installed in order to use the driver on Mac. + +1. Open **iODBC Administrator** using the following command (this gives the application permissions to save the Driver & DSN configurations): + 1. `sudo /Applications/iODBC/iODBC\ Administrator64.app/Contents/MacOS/iODBC\ Administrator64` + +## Adding a Driver Entry + +Note: In order to use the Open Distro for Elasticsearch SQL ODBC driver with the [Tableau Connector](), the **Description of the Driver** field *must* start with `ODFE SQL ODBC`. + +1. Go to the **ODBC Drivers** tab. +2. Click **Add a Driver**. + * **Description of the Driver**: The driver name used for ODBC connections (ex. `ODFE SQL ODBC Driver`) + * **Driver File Name**: The path to the driver file (default installed path: `/usr/local/lib/odfesqlodbc/bin/libodfesqlodbc.dylib`) + * **Setup File Name**: The path to the driver file (default installed path: `/usr/local/lib/odfesqlodbc/bin/libodfesqlodbc.dylib`) + * Set as a **User** driver + * Below is a screenshot of how the final screen should look. +3. Click **OK** to save the options. + +

    + +

    + +## Adding a DSN Entry + +This is not required if you are using the Tableau Connector, but will help with connecting to your data source through other applications. + +1. Go to the **User DSN** tab +2. Select **Add** on the right side of the window. + * Choose the Driver you added above. + * **Data Source Name (DSN)**: The name of the DSN used to store connection options (ex. `ODFE SQL ODBC DSN`) + * **Comment**: Not required + * Add key-value pairs by using the **'+'** button. Below is a picture of the recommended set of options for a default local Open Distro for Elasticsearch installation. +3. Click **OK** to save the DSN configuration. + +

    + +

    \ No newline at end of file diff --git a/sql-odbc/libraries/VisualLeakDetector/COPYING.txt b/sql-odbc/libraries/VisualLeakDetector/COPYING.txt new file mode 100644 index 0000000000..073dd57f8d --- /dev/null +++ b/sql-odbc/libraries/VisualLeakDetector/COPYING.txt @@ -0,0 +1,458 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/sql-odbc/libraries/VisualLeakDetector/bin/Microsoft.DTfW.DHL.manifest b/sql-odbc/libraries/VisualLeakDetector/bin/Microsoft.DTfW.DHL.manifest new file mode 100644 index 0000000000..e61222f007 --- /dev/null +++ b/sql-odbc/libraries/VisualLeakDetector/bin/Microsoft.DTfW.DHL.manifest @@ -0,0 +1,7 @@ + + + + + + + diff --git a/sql-odbc/libraries/VisualLeakDetector/bin/dbghelp.dll b/sql-odbc/libraries/VisualLeakDetector/bin/dbghelp.dll new file mode 100644 index 0000000000..62d850863e Binary files /dev/null and b/sql-odbc/libraries/VisualLeakDetector/bin/dbghelp.dll differ diff --git a/sql-odbc/libraries/VisualLeakDetector/bin/vld_x86.dll b/sql-odbc/libraries/VisualLeakDetector/bin/vld_x86.dll new file mode 100644 index 0000000000..08c6b27a0d Binary files /dev/null and b/sql-odbc/libraries/VisualLeakDetector/bin/vld_x86.dll differ diff --git a/sql-odbc/libraries/VisualLeakDetector/bin64/Microsoft.DTfW.DHL.manifest b/sql-odbc/libraries/VisualLeakDetector/bin64/Microsoft.DTfW.DHL.manifest new file mode 100644 index 0000000000..1ea64d5118 --- /dev/null +++ b/sql-odbc/libraries/VisualLeakDetector/bin64/Microsoft.DTfW.DHL.manifest @@ -0,0 +1,7 @@ + + + + + + + diff --git a/sql-odbc/libraries/VisualLeakDetector/bin64/dbghelp.dll b/sql-odbc/libraries/VisualLeakDetector/bin64/dbghelp.dll new file mode 100644 index 0000000000..499fa290ae Binary files /dev/null and b/sql-odbc/libraries/VisualLeakDetector/bin64/dbghelp.dll differ diff --git a/sql-odbc/libraries/VisualLeakDetector/bin64/vld_x64.dll b/sql-odbc/libraries/VisualLeakDetector/bin64/vld_x64.dll new file mode 100644 index 0000000000..8c12be108d Binary files /dev/null and b/sql-odbc/libraries/VisualLeakDetector/bin64/vld_x64.dll differ diff --git a/sql-odbc/libraries/VisualLeakDetector/include/vld.h b/sql-odbc/libraries/VisualLeakDetector/include/vld.h new file mode 100644 index 0000000000..b591ff3e2f --- /dev/null +++ b/sql-odbc/libraries/VisualLeakDetector/include/vld.h @@ -0,0 +1,350 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Visual Leak Detector - Import Library Header +// Copyright (c) 2005-2014 VLD Team +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +// +// See COPYING.txt for the full terms of the GNU Lesser General Public License. +// +//////////////////////////////////////////////////////////////////////////////// + +#pragma once + +#include "vld_def.h" + +typedef int VLD_BOOL; +typedef unsigned int VLD_UINT; +typedef size_t VLD_SIZET; +typedef void* VLD_HMODULE; + +#if defined _DEBUG || defined VLD_FORCE_ENABLE + +#ifdef __AFXWIN_H__ +#error[VLD COMPILE ERROR] '#include ' should appear before '#include ' in file stdafx.h +#endif + +#pragma comment(lib, "vld.lib") + +// Force a symbolic reference to the global VisualLeakDetector class object from +// the DLL. This ensures that the DLL is loaded and linked with the program, +// even if no code otherwise imports any of the DLL's exports. +#pragma comment(linker, "/include:__imp_?g_vld@@3VVisualLeakDetector@@A") + +//////////////////////////////////////////////////////////////////////////////// +// +// Visual Leak Detector APIs +// + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// VLDDisable - Disables Visual Leak Detector's memory leak detection at +// runtime. If memory leak detection is already disabled, then calling this +// function has no effect. +// +// Note: In multithreaded programs, this function operates on a per-thread +// basis. In other words, if you call this function from one thread, then +// memory leak detection is only disabled for that thread. If memory leak +// detection is enabled for other threads, then it will remain enabled for +// those other threads. It was designed to work this way to insulate you, +// the programmer, from having to ensure thread synchronization when calling +// VLDEnable() and VLDDisable(). Without this, calling these two functions +// unsynchronized could result in unpredictable and unintended behavior. +// But this also means that if you want to disable memory leak detection +// process-wide, then you need to call this function from every thread in +// the process. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDDisable (); + +// VLDEnable - Enables Visual Leak Detector's memory leak detection at runtime. +// If memory leak detection is already enabled, which it is by default, then +// calling this function has no effect. +// +// Note: In multithreaded programs, this function operates on a per-thread +// basis. In other words, if you call this function from one thread, then +// memory leak detection is only enabled for that thread. If memory leak +// detection is disabled for other threads, then it will remain disabled for +// those other threads. It was designed to work this way to insulate you, +// the programmer, from having to ensure thread synchronization when calling +// VLDEnable() and VLDDisable(). Without this, calling these two functions +// unsynchronized could result in unpredictable and unintended behavior. +// But this also means that if you want to enable memory leak detection +// process-wide, then you need to call this function from every thread in +// the process. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDEnable (); + +// VLDRestore - Restore Visual Leak Detector's previous state. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDRestore (); + +// VLDGlobalDisable - Disables Visual Leak Detector's memory leak detection at +// runtime in all threads. If memory leak detection is already disabled, +// then calling this function has no effect. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDGlobalDisable (); + +// VLDGlobalEnable - Enables Visual Leak Detector's memory leak detection +// at runtime in all threads. If memory leak detection is already enabled, +// which it is by default, then calling this function has no effect. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDGlobalEnable (); + +// VLDReportLeaks - Report leaks up to the execution point. +// +// Return Value: +// +// None. +// +__declspec(dllimport) VLD_UINT VLDReportLeaks (); + +// VLDReportThreadLeaks - Report thread leaks up to the execution point. +// +// threadId: thread Id. +// +// Return Value: +// +// None. +// +__declspec(dllimport) VLD_UINT VLDReportThreadLeaks (VLD_UINT threadId); + +// VLDGetLeaksCount - Return memory leaks count to the execution point. +// +// Return Value: +// +// None. +// +__declspec(dllimport) VLD_UINT VLDGetLeaksCount (); + +// VLDGetThreadLeaksCount - Return thread memory leaks count to the execution point. +// +// threadId: thread Id. +// +// Return Value: +// +// None. +// +__declspec(dllimport) VLD_UINT VLDGetThreadLeaksCount (VLD_UINT threadId); + +// VLDMarkAllLeaksAsReported - Mark all leaks as reported. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDMarkAllLeaksAsReported (); + +// VLDMarkThreadLeaksAsReported - Mark thread leaks as reported. +// +// threadId: thread Id. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDMarkThreadLeaksAsReported (VLD_UINT threadId); + + +// VLDRefreshModules - Look for recently loaded DLLs and patch them if necessary. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDRefreshModules(); + + +// VLDEnableModule - Enable Memory leak checking on the specified module. +// +// module: module handle. +// +// Return Value: +// +// None. +// + +__declspec(dllimport) void VLDEnableModule(VLD_HMODULE module); + + +// VLDDisableModule - Disable Memory leak checking on the specified module. +// +// module: module handle. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDDisableModule(VLD_HMODULE module); + +// VLDGetOptions - Return all current options. +// +// Return Value: +// +// Mask of current options. +// +__declspec(dllimport) VLD_UINT VLDGetOptions(); + +// VLDGetReportFilename - Return current report filename. +// +// filename: current report filename (max characters - MAX_PATH). +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDGetReportFilename(wchar_t *filename); + +// VLDSetOptions - Update the report options via function call rather than INI file. +// +// option_mask: Only the following flags are checked +// VLD_OPT_AGGREGATE_DUPLICATES +// VLD_OPT_MODULE_LIST_INCLUDE +// VLD_OPT_SAFE_STACK_WALK +// VLD_OPT_SLOW_DEBUGGER_DUMP +// VLD_OPT_TRACE_INTERNAL_FRAMES +// VLD_OPT_START_DISABLED +// VLD_OPT_SKIP_HEAPFREE_LEAKS +// VLD_OPT_VALIDATE_HEAPFREE +// +// maxDataDump: maximum number of user-data bytes to dump for each leaked block. +// +// maxTraceFrames: maximum number of frames per stack trace for each leaked block. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDSetOptions(VLD_UINT option_mask, VLD_SIZET maxDataDump, VLD_UINT maxTraceFrames); + +// VLDSetModulesList - Set list of modules included/excluded in leak detection +// depending on parameter "includeModules". +// +// modules: list of modules to be forcefully included/excluded in leak detection. +// +// includeModules: include or exclude that modules. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDSetModulesList(const wchar_t *modules, VLD_BOOL includeModules); + +// VLDGetModulesList - Return current list of included/excluded modules +// depending on flag VLD_OPT_TRACE_INTERNAL_FRAMES. +// +// modules: destination string for list of included/excluded modules (maximum length 512 characters). +// +// size: maximum string size. +// +// Return Value: +// +// VLD_BOOL: TRUE if include modules, otherwise FALSE. +// +__declspec(dllimport) VLD_BOOL VLDGetModulesList(wchar_t *modules, VLD_UINT size); + +// VLDSetReportOptions - Update the report options via function call rather than INI file. +// +// Only the following flags are checked +// VLD_OPT_REPORT_TO_DEBUGGER +// VLD_OPT_REPORT_TO_FILE +// VLD_OPT_REPORT_TO_STDOUT +// VLD_OPT_UNICODE_REPORT +// +// filename is optional and can be NULL. +// +// Return Value: +// +// None. +// +__declspec(dllimport) void VLDSetReportOptions(VLD_UINT option_mask, const wchar_t *filename); + +// VLDSetReportHook - Installs or uninstalls a client-defined reporting function by hooking it +// into the C run-time debug reporting process (debug version only). +// +// mode: The action to take: VLD_RPTHOOK_INSTALL or VLD_RPTHOOK_REMOVE. +// +// pfnNewHook: Report hook to install or remove. +// +// Return Value: +// +// int: 0 if success. +// +__declspec(dllimport) int VLDSetReportHook(int mode, VLD_REPORT_HOOK pfnNewHook); + +// VLDResolveCallstacks - Performs symbol resolution for all saved extent CallStack's that have +// been tracked by Visual Leak Detector. This function is necessary for applications that +// dynamically load and unload modules, and through which memory leaks might be included. +// If this is NOT called, stack traces may have stack frames with no symbol information. This +// happens because the symbol API's cannot look up symbols for a binary / module that has been unloaded +// from the process. +// +// Return Value: +// +// int: 0 if successfully resolved all callstacks. +// +__declspec(dllexport) int VLDResolveCallstacks(); + +#ifdef __cplusplus +} +#endif // __cplusplus + +#else // !_DEBUG + +#define VLDEnable() +#define VLDDisable() +#define VLDRestore() +#define VLDGlobalDisable() +#define VLDGlobalEnable() +#define VLDReportLeaks() (0) +#define VLDReportThreadLeaks() (0) +#define VLDGetLeaksCount() (0) +#define VLDGetThreadLeaksCount() (0) +#define VLDMarkAllLeaksAsReported() +#define VLDMarkThreadLeaksAsReported(a) +#define VLDRefreshModules() +#define VLDEnableModule(a) +#define VLDDisableModule(b) +#define VLDGetOptions() (0) +#define VLDGetReportFilename(a) +#define VLDSetOptions(a, b, c) +#define VLDSetReportHook(a, b) +#define VLDSetModulesList(a) +#define VLDGetModulesList(a, b) (FALSE) +#define VLDSetReportOptions(a, b) +#define VLDResolveCallstacks() (0) + +#endif // _DEBUG diff --git a/sql-odbc/libraries/VisualLeakDetector/include/vld_def.h b/sql-odbc/libraries/VisualLeakDetector/include/vld_def.h new file mode 100644 index 0000000000..265de49c4b --- /dev/null +++ b/sql-odbc/libraries/VisualLeakDetector/include/vld_def.h @@ -0,0 +1,49 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Visual Leak Detector - Import Library Header +// Copyright (c) 2005-2014 VLD Team +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +// +// See COPYING.txt for the full terms of the GNU Lesser General Public License. +// +//////////////////////////////////////////////////////////////////////////////// + +#pragma once + +#ifndef _WCHAR_T_DEFINED +# include +#endif + +#define VLD_OPT_AGGREGATE_DUPLICATES 0x0001 // If set, aggregate duplicate leaks in the leak report. +#define VLD_OPT_MODULE_LIST_INCLUDE 0x0002 // If set, modules in the module list are included, all others are excluded. +#define VLD_OPT_REPORT_TO_DEBUGGER 0x0004 // If set, the memory leak report is sent to the debugger. +#define VLD_OPT_REPORT_TO_FILE 0x0008 // If set, the memory leak report is sent to a file. +#define VLD_OPT_SAFE_STACK_WALK 0x0010 // If set, the stack is walked using the "safe" method (StackWalk64). +#define VLD_OPT_SELF_TEST 0x0020 // If set, perform a self-test to verify memory leak self-checking. +#define VLD_OPT_SLOW_DEBUGGER_DUMP 0x0040 // If set, inserts a slight delay between sending output to the debugger. +#define VLD_OPT_START_DISABLED 0x0080 // If set, memory leak detection will initially disabled. +#define VLD_OPT_TRACE_INTERNAL_FRAMES 0x0100 // If set, include useless frames (e.g. internal to VLD) in call stacks. +#define VLD_OPT_UNICODE_REPORT 0x0200 // If set, the leak report will be encoded UTF-16 instead of ASCII. +#define VLD_OPT_VLDOFF 0x0400 // If set, VLD will be completely deactivated. It will not attach to any modules. +#define VLD_OPT_REPORT_TO_STDOUT 0x0800 // If set, the memory leak report is sent to stdout. +#define VLD_OPT_SKIP_HEAPFREE_LEAKS 0x1000 // If set, VLD skip HeapFree memory leaks. +#define VLD_OPT_VALIDATE_HEAPFREE 0x2000 // If set, VLD verifies and reports heap consistency for HeapFree calls. +#define VLD_OPT_SKIP_CRTSTARTUP_LEAKS 0x4000 // If set, VLD skip crt srtartup memory leaks. + +#define VLD_RPTHOOK_INSTALL 0 +#define VLD_RPTHOOK_REMOVE 1 + +typedef int (__cdecl * VLD_REPORT_HOOK)(int reportType, wchar_t *message, int *returnValue); diff --git a/sql-odbc/libraries/VisualLeakDetector/lib/vld.lib b/sql-odbc/libraries/VisualLeakDetector/lib/vld.lib new file mode 100644 index 0000000000..96339e2724 Binary files /dev/null and b/sql-odbc/libraries/VisualLeakDetector/lib/vld.lib differ diff --git a/sql-odbc/libraries/VisualLeakDetector/lib64/vld.lib b/sql-odbc/libraries/VisualLeakDetector/lib64/vld.lib new file mode 100644 index 0000000000..878dde2da5 Binary files /dev/null and b/sql-odbc/libraries/VisualLeakDetector/lib64/vld.lib differ diff --git a/sql-odbc/libraries/rabbit/LICENSE b/sql-odbc/libraries/rabbit/LICENSE new file mode 100644 index 0000000000..4676af6df8 --- /dev/null +++ b/sql-odbc/libraries/rabbit/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2014 mashiro + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/sql-odbc/libraries/rabbit/README b/sql-odbc/libraries/rabbit/README new file mode 100644 index 0000000000..765d927dc0 --- /dev/null +++ b/sql-odbc/libraries/rabbit/README @@ -0,0 +1,5 @@ +Downloaded on November 18th, 2019 @ 9:40 am +Master branch +https://github.com/mashiro/rabbit/tree/master + +- lyndonb@bitquilltech.com \ No newline at end of file diff --git a/sql-odbc/libraries/rabbit/include/rabbit.hpp b/sql-odbc/libraries/rabbit/include/rabbit.hpp new file mode 100644 index 0000000000..ea4cddebc8 --- /dev/null +++ b/sql-odbc/libraries/rabbit/include/rabbit.hpp @@ -0,0 +1,1351 @@ +// The MIT License (MIT) +// +// Copyright (c) 2013-2014 mashiro +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// clang-format off + +#ifndef RABBIT_HPP_INCLUDED +#define RABBIT_HPP_INCLUDED + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#endif + +#ifndef RABBIT_NAMESPACE +#define RABBIT_NAMESPACE rabbit +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace RABBIT_NAMESPACE { + +#define RABBIT_TAG_DEF(name, id) \ + struct name \ + { \ + static const rapidjson::Type native_value = id; \ + static const int value = id; \ + }; \ +/**/ +RABBIT_TAG_DEF(null_tag, rapidjson::kNullType) +RABBIT_TAG_DEF(false_tag, rapidjson::kFalseType) +RABBIT_TAG_DEF(true_tag, rapidjson::kTrueType) +RABBIT_TAG_DEF(object_tag, rapidjson::kObjectType) +RABBIT_TAG_DEF(array_tag, rapidjson::kArrayType) +RABBIT_TAG_DEF(string_tag, rapidjson::kStringType) +RABBIT_TAG_DEF(number_tag, rapidjson::kNumberType) +#undef RABBIT_TAG_DEF + +class type_mismatch : public std::runtime_error +{ +public: + type_mismatch(const std::string& msg) + : std::runtime_error(msg) + {} +}; + +typedef rapidjson::ParseErrorCode parse_error_code; + +class parse_error : public std::runtime_error +{ +private: + parse_error_code code_; + +public: + parse_error(parse_error_code code) + : std::runtime_error(rapidjson::GetParseError_En(code)) + , code_(code) + {} + + parse_error_code code() const { return code_; } +}; + +// fwd +template class basic_value_ref; +template class basic_value; +template class basic_object; +template class basic_array; + +namespace details { + + template + struct enable_if_c + { + typedef T type; + }; + + template + struct enable_if_c + {}; + + template + struct enable_if : enable_if_c + {}; + + template + struct disable_if : enable_if_c + {}; + + + template + struct bool_ + { + static const bool value = C; + }; + + typedef bool_ true_; + typedef bool_ false_; + + + template struct remove_reference { typedef T type; }; + template struct remove_reference { typedef T type; }; + + template struct remove_const { typedef T type; }; + template struct remove_const { typedef T type; }; + + + template > + class basic_string_ref + { + public: + typedef Char value_type; + typedef std::size_t size_type; + + private: + const value_type* data_; + size_type length_; + + public: + basic_string_ref() + : data_(0) + , length_(0) + {} + + basic_string_ref(const basic_string_ref& other) + : data_(other.data_) + , length_(other.length_) + {} + + basic_string_ref(const value_type* str) + : data_(str) + , length_(Traits::length(str)) + {} + + basic_string_ref(const value_type* str, size_type length) + : data_(str) + , length_(length) + {} + + template + basic_string_ref(const std::basic_string& other) + : data_(other.data()) + , length_(other.length()) + {} + + size_type size() const { return length_; } + size_type length() const { return length_; } + size_type max_size() const { return length_; } + bool empty() const { return length_ == 0; } + + const value_type* data() const { return data_; } + }; + + + // type traits + template struct is_tag : false_ {}; + template <> struct is_tag : true_ {}; + template <> struct is_tag : true_ {}; + template <> struct is_tag : true_ {}; + template <> struct is_tag : true_ {}; + template <> struct is_tag : true_ {}; + template <> struct is_tag : true_ {}; + template <> struct is_tag : true_ {}; + + template struct is_null : false_ {}; + template <> struct is_null : true_ {}; + + template struct is_false : false_ {}; + template <> struct is_false : true_ {}; + + template struct is_true : false_ {}; + template <> struct is_true : true_ {}; + + template struct is_object : false_ {}; + template <> struct is_object : true_ {}; + template struct is_object< basic_object > : true_ {}; + + template struct is_array : false_ {}; + template <> struct is_array : true_ {}; + template struct is_array< basic_array > : true_ {}; + + template struct is_string : false_ {}; + template <> struct is_string : true_ {}; + template struct is_string< std::basic_string > : true_ {}; + template struct is_string< basic_string_ref > : true_ {}; + + template struct is_number : false_ {}; + template <> struct is_number : true_ {}; + + template struct is_bool : false_ {}; + template <> struct is_bool : true_ {}; + + template struct is_int : false_ {}; + template <> struct is_int : true_ {}; + + template struct is_uint : false_ {}; + template <> struct is_uint : true_ {}; + + template struct is_int64 : false_ {}; + template <> struct is_int64 : true_ {}; + + template struct is_uint64 : false_ {}; + template <> struct is_uint64 : true_ {}; + + template struct is_double : false_ {}; + template <> struct is_double : true_ {}; + + template struct is_value_ref : false_ {}; + template struct is_value_ref< basic_value_ref > : true_ {}; + template struct is_value_ref< basic_value > : true_ {}; + template struct is_value_ref< basic_object > : true_ {}; + template struct is_value_ref< basic_array > : true_ {}; + + // type name + template const char* type_name(typename enable_if< is_null >::type* = 0) { return "null"; } + template const char* type_name(typename enable_if< is_false >::type* = 0) { return "false"; } + template const char* type_name(typename enable_if< is_true >::type* = 0) { return "true"; } + template const char* type_name(typename enable_if< is_object >::type* = 0) { return "object"; } + template const char* type_name(typename enable_if< is_array >::type* = 0) { return "array"; } + template const char* type_name(typename enable_if< is_string >::type* = 0) { return "string"; } + template const char* type_name(typename enable_if< is_number >::type* = 0) { return "number"; } + template const char* type_name(typename enable_if< is_bool >::type* = 0) { return "bool"; } + template const char* type_name(typename enable_if< is_int >::type* = 0) { return "int"; } + template const char* type_name(typename enable_if< is_uint >::type* = 0) { return "uint"; } + template const char* type_name(typename enable_if< is_int64 >::type* = 0) { return "int64"; } + template const char* type_name(typename enable_if< is_uint64 >::type* = 0) { return "uint64"; } + template const char* type_name(typename enable_if< is_double >::type* = 0) { return "double"; } + template const char* type_name(typename enable_if< is_value_ref >::type* = 0) { return "value_ref"; } + + + template + struct operator_arrow_proxy + { + mutable typename remove_const::type value_; + operator_arrow_proxy(const PseudoReference& value) : value_(value) {} + PseudoReference* operator->() const { return &value_; } + }; + + template + struct operator_arrow_proxy + { + T& value_; + operator_arrow_proxy(T& value) : value_(value) {} + T* operator->() const { return &value_; } + }; + + + template + class transform_iterator + { + typedef std::iterator_traits traits_type; + + public: + typedef transform_iterator this_type; + typedef Function function_type; + + typedef Iterator iterator_type; + typedef typename traits_type::iterator_category iterator_category; + typedef typename traits_type::difference_type difference_type; + + typedef typename Function::result_type result_type; + typedef typename remove_reference::type value_type; + typedef operator_arrow_proxy operator_arrow_proxy_type; + typedef operator_arrow_proxy_type pointer; + typedef result_type reference; + + private: + iterator_type it_; + function_type func_; + + public: + transform_iterator() + : it_() + , func_() + {} + + explicit transform_iterator(const iterator_type& it) + : it_(it) + , func_() + {} + + transform_iterator(const iterator_type& it, const function_type& func) + : it_(it) + , func_(func) + {} + + template + transform_iterator(const transform_iterator& other) + : it_(other.base()) + , func_(other.functor()) + {} + + iterator_type& base() { return it_; } + const iterator_type& base() const { return it_; } + + function_type& functor() { return func_; } + const function_type& functor() const { return func_; } + + result_type dereference() const { return func_(*it_); } + + result_type operator*() const { return dereference(); } + operator_arrow_proxy_type operator->() const { return operator_arrow_proxy_type(dereference()); } + + this_type& operator++() { ++it_; return *this; } + this_type operator++(int) { return this_type(it_++, func_); } + this_type& operator--() { --it_; return *this; } + this_type operator--(int) { return this_type(it_--, func_); } + + this_type operator+(difference_type n) const { return this_type(it_ + n, func_); } + this_type& operator+=(difference_type n) { it_ += n; return *this; } + this_type operator-(difference_type n) const { return this_type(it_ - n, func_); } + this_type& operator-=(difference_type n) { it_ -= n; return *this; } + + result_type operator[](difference_type n) const { return func_(it_[n]); } + + // Subtraction operator overload is required for std::distance + template + size_t operator-(const transform_iterator& other) const { return base() - other.base(); } + + template + bool operator==(const transform_iterator& other) const { return base() == other.base(); } + + template + bool operator!=(const transform_iterator& other) const { return base() != other.base(); } + + template + bool operator<(const transform_iterator& other) const { return base() < other.base(); } + + template + bool operator>(const transform_iterator& other) const { return base() > other.base(); } + + template + bool operator<=(const transform_iterator& other) const { return base() <= other.base(); } + + template + bool operator>=(const transform_iterator& other) const { return base() >= other.base(); } + }; + + template + transform_iterator make_transform_iterator(Iterator it, Function func = Function()) + { + return transform_iterator(it, func); + } + + + template + class member_wrapper + { + public: + typedef Member wrapped_type; + typedef ValueRef value_ref_type; + typedef typename ValueRef::string_type string_type; + typedef typename ValueRef::allocator_type allocator_type; + + class proxy + { + wrapped_type& member_; + allocator_type* alloc_; + + public: + proxy(wrapped_type& member, allocator_type* alloc) + : member_(member) + , alloc_(alloc) + {} + + string_type name() const { return value_ref_type(&(member_.name), alloc_).as_string(); } + value_ref_type value() const { return value_ref_type(&(member_.value), alloc_); } + }; + + private: + allocator_type* alloc_; + + public: + member_wrapper(allocator_type* alloc) + : alloc_(alloc) + {} + + template + member_wrapper(const member_wrapper& other) + : alloc_(other.get_allocator_pointer()) + {} + + typedef proxy result_type; + result_type operator()(wrapped_type& member) const + { + return result_type(member, alloc_); + } + + allocator_type* get_allocator_pointer() const { return alloc_; } + }; + + template + class value_wrapper + { + public: + typedef Value wrapped_type; + typedef ValueRef value_ref_type; + typedef typename ValueRef::string_type string_type; + typedef typename ValueRef::allocator_type allocator_type; + + private: + allocator_type* alloc_; + + public: + value_wrapper(allocator_type* alloc) + : alloc_(alloc) + {} + + template + value_wrapper(const value_wrapper& other) + : alloc_(other.get_allocator_pointer()) + {} + + typedef value_ref_type result_type; + result_type operator()(wrapped_type& value) const + { + return result_type(&value, alloc_); + } + + allocator_type* get_allocator_pointer() const { return alloc_; } + }; + + template struct value_ref_traits; + template struct const_value_ref_traits; + + template + struct value_ref_traits + { + typedef Encoding encoding_type; + typedef rapidjson::Type native_type; + typedef rapidjson::GenericDocument native_document_type; + typedef rapidjson::GenericValue native_value_type; + typedef typename native_document_type::AllocatorType native_allocator_type; + typedef const_value_ref_traits const_traits; + + template + static void set(ValueRef& ref, Tag tag = Tag()) + { + ref.set(tag); + } + }; + + template + struct const_value_ref_traits + { + typedef Encoding encoding_type; + typedef const rapidjson::Type native_type; + typedef const rapidjson::GenericDocument native_document_type; + typedef const rapidjson::GenericValue native_value_type; + typedef const typename native_document_type::AllocatorType native_allocator_type; + typedef const_value_ref_traits const_traits; + + template + static void set(const ValueRef& ref, Tag tag = Tag()) + {} + }; + + + template + class scoped_ptr + { + private: + T* p_; + + private: + scoped_ptr(const scoped_ptr& other); + scoped_ptr& operator=(const scoped_ptr& other); + + public: + explicit scoped_ptr(T* p = 0) + : p_(p) + {} + + ~scoped_ptr() + { + delete p_; + } + + T* operator->() { return p_; } + const T* operator->() const { return p_; } + + T& operator*() { return *p_; } + const T& operator*() const { return *p_; } + + T* get() { return p_; } + const T* get() const { return p_; } + + void swap(scoped_ptr& other) throw() + { + std::swap(p_, other.p_); + } + }; + +} // details + +template +class basic_value_ref +{ +public: + typedef Traits traits; + typedef typename Traits::const_traits const_traits; + + typedef typename traits::encoding_type encoding_type; + typedef typename traits::native_type native_type; + typedef typename traits::native_document_type native_document_type; + typedef typename traits::native_value_type native_value_type; + typedef typename traits::native_allocator_type native_allocator_type; + + typedef basic_value_ref value_ref_type; + typedef const basic_value_ref const_value_ref_type; + typedef typename encoding_type::Ch char_type; + typedef std::basic_string string_type; + typedef details::basic_string_ref string_ref_type; + typedef native_allocator_type allocator_type; + +private: + typedef details::member_wrapper< typename native_value_type::Member, value_ref_type> member_wrapper_type; + typedef details::member_wrapper const_member_wrapper_type; + typedef details::value_wrapper< native_value_type, value_ref_type> value_wrapper_type; + typedef details::value_wrapper const_value_wrapper_type; + +public: + typedef details::transform_iterator< member_wrapper_type, typename native_value_type::MemberIterator> member_iterator; + typedef details::transform_iterator const_member_iterator; + typedef details::transform_iterator< value_wrapper_type, typename native_value_type::ValueIterator> value_iterator; + typedef details::transform_iterator const_value_iterator; + +private: + native_value_type* value_; + allocator_type* alloc_; + +public: + basic_value_ref(native_value_type* value = 0, allocator_type* alloc = 0) + : value_(value) + , alloc_(alloc) + {} + + template + basic_value_ref(const basic_value_ref& other) + : value_(other.get_native_value_pointer()) + , alloc_(other.get_allocator_pointer()) + {} + + native_value_type* get_native_value_pointer() const { return value_; } + allocator_type* get_allocator_pointer() const { return alloc_; } + allocator_type& get_allocator() const { return *alloc_; } + + void set(null_tag) { value_->SetNull(); } + void set(object_tag) { value_->SetObject(); } + void set(array_tag) { value_->SetArray(); } + void set(bool value) { value_->SetBool(value); } + void set(int value) { value_->SetInt(value); } + void set(unsigned value) { value_->SetUint(value); } + void set(int64_t value) { value_->SetInt64(value); } + void set(uint64_t value) { value_->SetUint64(value); } + void set(double value) { value_->SetDouble(value); } + void set(const char_type* value) { value_->SetString(value, *alloc_); } + void set(const string_type& value) { value_->SetString(value.data(), static_cast(value.length()), *alloc_); } + + template + void set(const T& value, typename details::enable_if< details::is_value_ref >::type* = 0) + { + if (value.is_null()) set(null_tag()); + else if (value.is_bool()) set(value.as_bool()); + else if (value.is_int()) set(value.as_int()); + else if (value.is_uint()) set(value.as_uint()); + else if (value.is_int64()) set(value.as_int64()); + else if (value.is_uint64()) set(value.as_uint64()); + else if (value.is_double()) set(value.as_double()); + else if (value.is_string()) set(value.as_string()); + else if (value.is_array()) throw std::runtime_error("can not assign array directly. please use insert"); + else if (value.is_object()) throw std::runtime_error("can not assign object directly. please use insert"); + } + + template + value_ref_type& operator=(const T& value) + { + set(value); + return *this; + } + + + template + bool operator==(const basic_value_ref& other) const + { + if (is_null() && other.is_null()) return true; + if (is_bool() && other.is_bool() && as_bool() == other.as_bool()) return true; + if (is_int() && other.is_int() && as_int() == other.as_int()) return true; + if (is_uint() && other.is_uint() && as_uint() == other.as_uint()) return true; + if (is_int64() && other.is_int64() && as_int64() == other.as_int64()) return true; + if (is_uint64() && other.is_uint64() && as_uint64() == other.as_uint64()) return true; + if (is_double() && other.is_double() && as_double() == other.as_double()) return true; + if (is_string() && other.is_string() && as_string() == other.as_string()) return true; + return false; + } + + template + bool operator!=(const basic_value_ref& other) const + { + return !(*this == other); + } + + + int which() const + { + return static_cast(value_->GetType()); + } + +#define RABBIT_IS_DEF(name, base_name) \ + template \ + bool is(typename details::enable_if< details::is_##name >::type* = 0) const \ + { \ + return value_->Is##base_name(); \ + } \ + bool is_##name() const \ + { \ + return value_->Is##base_name(); \ + } \ +/**/ + RABBIT_IS_DEF(null, Null) + RABBIT_IS_DEF(false, False) + RABBIT_IS_DEF(true, True) + RABBIT_IS_DEF(object, Object) + RABBIT_IS_DEF(array, Array) + RABBIT_IS_DEF(number, Number) + RABBIT_IS_DEF(bool, Bool) + RABBIT_IS_DEF(int, Int) + RABBIT_IS_DEF(uint, Uint) + RABBIT_IS_DEF(int64, Int64) + RABBIT_IS_DEF(uint64, Uint64) + RABBIT_IS_DEF(double, Double) + RABBIT_IS_DEF(string, String) +#undef RABBIT_IS_DEF + +#define RABBIT_AS_DEF(result_type, name, base_name) \ + template \ + T as(typename details::enable_if< details::is_##name >::type* = 0) const \ + { \ + type_check(); \ + return value_->Get##base_name(); \ + } \ + result_type as_##name() const \ + { \ + type_check(); \ + return value_->Get##base_name(); \ + } \ +/**/ + RABBIT_AS_DEF(bool, bool, Bool) + RABBIT_AS_DEF(int, int, Int) + RABBIT_AS_DEF(unsigned, uint, Uint) + RABBIT_AS_DEF(int64_t, int64, Int64) + RABBIT_AS_DEF(uint64_t, uint64, Uint64) + RABBIT_AS_DEF(double, double, Double) + RABBIT_AS_DEF(string_type, string, String) +#undef RABBIT_AS_DEF + +private: + struct as_t + { + const value_ref_type& ref_; + as_t(const value_ref_type& ref) : ref_(ref) {} + + template + operator Result() const { return ref_.as(); } + }; + +public: + as_t as() const { return as_t(*this); } + + bool has(const string_ref_type& name) const + { + type_check(); + return value_->HasMember(name.data()); + } + + template + void insert(const string_ref_type& name, const T& value, typename details::disable_if< details::is_value_ref >::type* = 0) + { + type_check(); + native_value_type v(value); + value_->AddMember(rapidjson::StringRef(name.data(), name.length()), v, *alloc_); + } + + template + void insert(const string_ref_type& name, const T& value, typename details::enable_if< details::is_value_ref >::type* = 0) + { + type_check(); + value_->AddMember(rapidjson::StringRef(name.data(), name.length()), *value.get_native_value_pointer(), *alloc_); + } + + bool erase(const string_ref_type& name) + { + type_check(); + return value_->RemoveMember(name.data()); + } + + value_ref_type at(const string_ref_type& name) + { + type_check(); + + if (!has(name)) + { + native_value_type null; + value_->AddMember(rapidjson::StringRef(name.data(), name.length()), null, *alloc_); + } + + return value_ref_type(&((*value_)[name.data()]), alloc_); + } + + const_value_ref_type at(const string_ref_type& name) const + { + type_check(); + if (!has(name)) + throw std::out_of_range("not found"); + return const_value_ref_type(&((*value_)[name.data()]), alloc_); + } + + value_ref_type operator[](const string_ref_type& name) { return at(name); } + const_value_ref_type operator[](const string_ref_type& name) const { return at(name); } + + member_iterator member_begin() + { + type_check(); + return details::make_transform_iterator(value_->MemberBegin(), member_wrapper_type(alloc_)); + } + + member_iterator member_end() + { + type_check(); + return details::make_transform_iterator(value_->MemberEnd(), member_wrapper_type(alloc_)); + } + + const_member_iterator member_begin() const + { + type_check(); + return details::make_transform_iterator(value_->MemberBegin(), const_member_wrapper_type(alloc_)); + } + + const_member_iterator member_end() const + { + type_check(); + return details::make_transform_iterator(value_->MemberEnd(), const_member_wrapper_type(alloc_)); + } + + const_member_iterator member_cbegin() const { return member_begin(); } + const_member_iterator member_cend() const { return member_end(); } + + + std::size_t size() const + { + type_check(); + return value_->Size(); + } + + std::size_t capacity() const + { + type_check(); + return value_->Capacity(); + } + + bool empty() const + { + type_check(); + return value_->Empty(); + } + + value_ref_type at(std::size_t index) + { + type_check(); + range_check(index); + return value_ref_type(&((*value_)[index]), alloc_); + } + + const_value_ref_type at(std::size_t index) const + { + type_check(); + range_check(index); + return const_value_ref_type(&((*value_)[index]), *alloc_); + } + + value_ref_type operator[](std::size_t index) { return at(index); } + const_value_ref_type operator[](std::size_t index) const { return at(index); } + + template + void push_back(const T& value, typename details::disable_if< details::is_value_ref >::type* = 0) + { + type_check(); + native_value_type v(value); + value_->PushBack(v, *alloc_); + } + + template + void push_back(const T& value, typename details::enable_if< details::is_value_ref >::type* = 0) + { + type_check(); + value_->PushBack(*value.get_native_value_pointer(), *alloc_); + } + + void pop_back() + { + type_check(); + value_->PopBack(); + } + + value_iterator value_begin() + { + type_check(); + return details::make_transform_iterator(value_->Begin(), value_wrapper_type(alloc_)); + } + + value_iterator value_end() + { + type_check(); + return details::make_transform_iterator(value_->End(), value_wrapper_type(alloc_)); + } + + const_value_iterator value_begin() const + { + type_check(); + return details::make_transform_iterator(value_->Begin(), const_value_wrapper_type(alloc_)); + } + + const_value_iterator value_end() const + { + type_check(); + return details::make_transform_iterator(value_->End(), const_value_wrapper_type(alloc_)); + } + + const_value_iterator value_cbegin() const { return value_begin(); } + const_value_iterator value_cend() const { return value_end(); } + + + void swap(value_ref_type& other) throw() + { + std::swap(value_, other.value_); + std::swap(alloc_, other.alloc_); + } + + string_type str() const + { + switch (which()) + { + case null_tag::value: + return "null"; + + case false_tag::value: + return "false"; + + case true_tag::value: + return "true"; + + case string_tag::value: + return as_string(); + + case number_tag::value: + { + std::basic_stringstream ss; + if (is_int()) ss << as_int(); + else if (is_uint()) ss << as_uint(); + else if (is_int64()) ss << as_int64(); + else if (is_uint64()) ss << as_uint64(); + else if (is_double()) ss << as_double(); + return ss.str(); + } + + default: + { + typedef rapidjson::GenericStringBuffer buffer_t; + typedef rapidjson::Writer writer_t; + buffer_t buffer; + writer_t writer(buffer); + value_->Accept(writer); + return buffer.GetString(); + } + } + } + +private: + template + void type_check() const + { + if (!is()) + { + std::stringstream ss; + ss << "value is not "; + ss << details::type_name(); + ss << " (which is " << which() << ")"; + throw type_mismatch(ss.str()); + } + } + + void range_check(std::size_t index) const + { + if (index >= size()) + { + std::stringstream ss; + ss << "index (which is " << index << ") >= size() (which is " << size() << ")"; + throw std::out_of_range(ss.str()); + } + } +}; + + +template +struct basic_value_base +{ + typedef basic_value_ref base_type; + typedef typename base_type::native_value_type native_value_type; + typedef typename base_type::allocator_type allocator_type; + + details::scoped_ptr value_impl_; + details::scoped_ptr alloc_impl_; + + explicit basic_value_base(native_value_type* value = 0, allocator_type* alloc = 0) + : value_impl_(value) + , alloc_impl_(alloc) + {} +}; + +template +class basic_value + : private basic_value_base + , public basic_value_ref +{ +public: + typedef Traits traits; + typedef typename Traits::const_traits const_traits; + + typedef basic_value_base member_type; + typedef basic_value_ref base_type; + + typedef typename base_type::encoding_type encoding_type; + typedef typename base_type::native_type native_type; + typedef typename base_type::native_document_type native_document_type; + typedef typename base_type::native_value_type native_value_type; + typedef typename base_type::native_allocator_type native_allocator_type; + + typedef typename base_type::value_ref_type value_ref_type; + typedef typename base_type::const_value_ref_type const_value_ref_type; + typedef typename base_type::char_type char_type; + typedef typename base_type::string_type string_type; + typedef typename base_type::string_ref_type string_ref_type; + typedef typename base_type::allocator_type allocator_type; + + typedef typename base_type::member_iterator member_iterator; + typedef typename base_type::const_member_iterator const_member_iterator; + typedef typename base_type::value_iterator value_iterator; + typedef typename base_type::const_value_iterator const_value_iterator; + +private: + typedef DefaultTag default_tag; + +public: + basic_value() + : member_type(new native_value_type(DefaultTag::native_value), new allocator_type()) + , base_type(member_type::value_impl_.get(), member_type::alloc_impl_.get()) + {} + + basic_value(allocator_type& alloc) + : member_type(new native_value_type(DefaultTag::native_value)) + , base_type(member_type::value_impl_.get(), &alloc) + {} + + template + basic_value(Tag tag, typename details::enable_if< details::is_tag >::type* = 0) + : member_type(new native_value_type(Tag::native_value), new allocator_type()) + , base_type(member_type::value_impl_.get(), member_type::alloc_impl_.get()) + {} + + template + basic_value(Tag tag, allocator_type& alloc, typename details::enable_if< details::is_tag >::type* = 0) + : member_type(new native_value_type(Tag::native_value), &alloc) + , base_type(member_type::value_impl_.get(), member_type::alloc_impl_.get()) + {} + + template + basic_value(const T& value, typename details::disable_if< details::is_value_ref >::type* = 0) + : member_type(new native_value_type(value), new allocator_type()) + , base_type(member_type::value_impl_.get(), member_type::alloc_impl_.get()) + {} + + template + basic_value(const T& value, allocator_type& alloc, typename details::disable_if< details::is_value_ref >::type* = 0) + : member_type(new native_value_type(value)) + , base_type(member_type::value_impl_.get(), &alloc) + {} + + + basic_value(const basic_value& other) + : member_type() + , base_type(other) + { + if (other.is_root_value()) + throw std::runtime_error("can not copy root value"); + } + + basic_value& operator=(const basic_value& other) + { + if (other.is_root_value()) + throw std::runtime_error("can not copy root value"); + basic_value(other).swap(*this); + return *this; + } + + template + basic_value(const basic_value_ref& other) + : member_type() + , base_type(other) + { + if (base_type::is_null()) + traits::set(*this, default_tag()); + } + + template + basic_value& operator=(const basic_value_ref& other) + { + basic_value(other).swap(*this); + return *this; + } + + + template + basic_value& operator=(const T& value) + { + base_type::set(value); + return *this; + } + + void clear() + { + base_type::set(default_tag()); + } + + void swap(basic_value& other) throw() + { + base_type::swap(other); + member_type::value_impl_.swap(other.value_impl_); + member_type::alloc_impl_.swap(other.alloc_impl_); + } + +private: + bool is_root_value() const + { + return member_type::value_impl_.get() != 0 + || member_type::alloc_impl_.get() != 0; + } +}; + +template +class basic_object : public basic_value +{ +public: + typedef basic_value base_type; + + typedef typename base_type::encoding_type encoding_type; + typedef typename base_type::native_type native_type; + typedef typename base_type::native_document_type native_document_type; + typedef typename base_type::native_value_type native_value_type; + typedef typename base_type::native_allocator_type native_allocator_type; + + typedef typename base_type::value_ref_type value_ref_type; + typedef typename base_type::const_value_ref_type const_value_ref_type; + typedef typename base_type::char_type char_type; + typedef typename base_type::string_type string_type; + typedef typename base_type::string_ref_type string_ref_type; + typedef typename base_type::allocator_type allocator_type; + + typedef typename base_type::member_iterator iterator; + typedef typename base_type::const_member_iterator const_iterator; + +public: + basic_object() + : base_type() + {} + + basic_object(allocator_type& alloc) + : base_type(alloc) + {} + + basic_object(const basic_object& other) + : base_type(other) + {} + + template + basic_object(const basic_value_ref& other) + : base_type(other) + {} + + iterator begin() { return base_type::member_begin(); } + iterator end() { return base_type::member_end(); } + const_iterator begin() const { return base_type::member_begin(); } + const_iterator end() const { return base_type::member_end(); } + const_iterator cbegin() const { return base_type::member_begin(); } + const_iterator cend() const { return base_type::member_end(); } +}; + +template +class basic_array : public basic_value +{ +public: + typedef basic_value base_type; + + typedef typename base_type::encoding_type encoding_type; + typedef typename base_type::native_type native_type; + typedef typename base_type::native_document_type native_document_type; + typedef typename base_type::native_value_type native_value_type; + typedef typename base_type::native_allocator_type native_allocator_type; + + typedef typename base_type::value_ref_type value_ref_type; + typedef typename base_type::const_value_ref_type const_value_ref_type; + typedef typename base_type::char_type char_type; + typedef typename base_type::string_type string_type; + typedef typename base_type::string_ref_type string_ref_type; + typedef typename base_type::allocator_type allocator_type; + + typedef typename base_type::value_iterator iterator; + typedef typename base_type::const_value_iterator const_iterator; + +public: + basic_array() + : base_type() + {} + + basic_array(allocator_type& alloc) + : base_type(alloc) + {} + + basic_array(const basic_array& other) + : base_type(other) + {} + + template + basic_array(const basic_value_ref& other) + : base_type(other) + {} + + iterator begin() { return base_type::value_begin(); } + iterator end() { return base_type::value_end(); } + const_iterator begin() const { return base_type::value_begin(); } + const_iterator end() const { return base_type::value_end(); } + const_iterator cbegin() const { return base_type::value_begin(); } + const_iterator cend() const { return base_type::value_end(); } +}; + + +template +struct basic_document_base +{ + typedef basic_value_ref base_type; + typedef typename base_type::native_document_type native_document_type; + + details::scoped_ptr document_impl_; + + explicit basic_document_base(native_document_type* document = 0) + : document_impl_(document) + {} +}; + +template +class basic_document + : private basic_document_base + , public basic_value_ref +{ +public: + typedef basic_document_base member_type; + typedef basic_value_ref base_type; + + typedef typename base_type::encoding_type encoding_type; + typedef typename base_type::native_type native_type; + typedef typename base_type::native_document_type native_document_type; + typedef typename base_type::native_value_type native_value_type; + typedef typename base_type::native_allocator_type native_allocator_type; + + typedef typename base_type::value_ref_type value_ref_type; + typedef typename base_type::const_value_ref_type const_value_ref_type; + typedef typename base_type::char_type char_type; + typedef typename base_type::string_type string_type; + typedef typename base_type::string_ref_type string_ref_type; + typedef typename base_type::allocator_type allocator_type; + +private: + basic_document(const basic_document&); + basic_document& operator=(const basic_document&); + +public: + basic_document() + : member_type(new native_document_type()) + , base_type(member_type::document_impl_.get(), &(member_type::document_impl_->GetAllocator())) + {} + + void swap(basic_document& other) + { + base_type::swap(other); + member_type::document_impl_.swap(other.document_impl_); + } + + void parse(const string_ref_type& str) + { + parse<0>(str); + } + + template + void parse(const string_ref_type& str) + { + member_type::document_impl_->template Parse(str.data()); + if (member_type::document_impl_->HasParseError()) + throw parse_error(member_type::document_impl_->GetParseError()); + } + + // Additional functions - not from original port. These are added with the intention of supporting schema validation + void parse(const string_ref_type& str, const string_ref_type& schema) + { + parse<0>(str, schema); + } + + template + void parse(const string_ref_type& str, const string_ref_type& schema) + { + // Parse schema + rapidjson::Document doc; + doc.Parse(schema.data()); + if(doc.HasParseError()) + throw parse_error(doc.GetParseError()); + + // Construct schema validator + rapidjson::SchemaDocument schema_document(doc); + rapidjson::SchemaValidator schema_validator(schema_document); + + // Parse using rabbit style parse call + member_type::document_impl_->template Parse(str.data()); + if (member_type::document_impl_->HasParseError()) + throw parse_error(member_type::document_impl_->GetParseError()); + + // Validate using rapidjson schema validator + if(!member_type::document_impl_->Accept(schema_validator)) { + if (member_type::document_impl_->HasParseError()) + throw parse_error(member_type::document_impl_->GetParseError()); + else + throw parse_error(rapidjson::kParseErrorUnspecificSyntaxError); + } + } +}; + + +template +void swap(basic_value_ref& a, basic_value_ref& b) +{ + a.swap(b); +} + +template +void swap(basic_value& a, basic_value& b) +{ + a.swap(b); +} + +template +void swap(basic_object& a, basic_object& b) +{ + a.swap(b); +} + +template +void swap(basic_array& a, basic_array& b) +{ + a.swap(b); +} + +template +void swap(basic_document& a, basic_document& b) +{ + a.swap(b); +} + + +template +typename basic_value_ref::string_type str(const basic_value_ref& value) +{ + return value.str(); +} + +template +std::basic_ostream& operator<<(std::basic_ostream& os, const basic_value_ref& value) +{ + os << str(value); + return os; +} + + +typedef rapidjson::UTF8<> default_encoding; + +template +struct types +{ + typedef details::value_ref_traits traits; + typedef details::const_value_ref_traits const_traits; + + typedef basic_value_ref value_ref; + typedef const basic_value_ref const_value_ref; + typedef basic_value value; + typedef const basic_value const_value; + typedef basic_object object; + typedef const basic_object const_object; + typedef basic_array array; + typedef const basic_array const_array; + typedef basic_document document; + typedef const basic_document const_document; + typedef typename document::allocator_type allocator; +}; + +typedef types<>::value_ref value_ref; +typedef types<>::const_value_ref const_value_ref; +typedef types<>::value value; +typedef types<>::const_value const_value; +typedef types<>::object object; +typedef types<>::const_object const_object; +typedef types<>::array array; +typedef types<>::const_array const_array; +typedef types<>::document document; +typedef types<>::const_document const_document; +typedef types<>::allocator allocator; + +} + +#endif + diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/allocators.h b/sql-odbc/libraries/rapidjson/include/rapidjson/allocators.h new file mode 100644 index 0000000000..98affe03fb --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/allocators.h @@ -0,0 +1,271 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ALLOCATORS_H_ +#define RAPIDJSON_ALLOCATORS_H_ + +#include "rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Allocator + +/*! \class rapidjson::Allocator + \brief Concept for allocating, resizing and freeing memory block. + + Note that Malloc() and Realloc() are non-static but Free() is static. + + So if an allocator need to support Free(), it needs to put its pointer in + the header of memory block. + +\code +concept Allocator { + static const bool kNeedFree; //!< Whether this allocator needs to call Free(). + + // Allocate a memory block. + // \param size of the memory block in bytes. + // \returns pointer to the memory block. + void* Malloc(size_t size); + + // Resize a memory block. + // \param originalPtr The pointer to current memory block. Null pointer is permitted. + // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.) + // \param newSize the new size in bytes. + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize); + + // Free a memory block. + // \param pointer to the memory block. Null pointer is permitted. + static void Free(void *ptr); +}; +\endcode +*/ + +/////////////////////////////////////////////////////////////////////////////// +// CrtAllocator + +//! C-runtime library allocator. +/*! This class is just wrapper for standard C library memory routines. + \note implements Allocator concept +*/ +class CrtAllocator { +public: + static const bool kNeedFree = true; + void* Malloc(size_t size) { + if (size) // behavior of malloc(0) is implementation defined. + return std::malloc(size); + else + return NULL; // standardize to returning NULL. + } + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { + (void)originalSize; + if (newSize == 0) { + std::free(originalPtr); + return NULL; + } + return std::realloc(originalPtr, newSize); + } + static void Free(void *ptr) { std::free(ptr); } +}; + +/////////////////////////////////////////////////////////////////////////////// +// MemoryPoolAllocator + +//! Default memory allocator used by the parser and DOM. +/*! This allocator allocate memory blocks from pre-allocated memory chunks. + + It does not free memory blocks. And Realloc() only allocate new memory. + + The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default. + + User may also supply a buffer as the first chunk. + + If the user-buffer is full then additional chunks are allocated by BaseAllocator. + + The user-buffer is not deallocated by this allocator. + + \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator. + \note implements Allocator concept +*/ +template +class MemoryPoolAllocator { +public: + static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator) + + //! Constructor with chunkSize. + /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. + \param baseAllocator The allocator for allocating memory chunks. + */ + MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : + chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0) + { + } + + //! Constructor with user-supplied buffer. + /*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size. + + The user buffer will not be deallocated when this allocator is destructed. + + \param buffer User supplied buffer. + \param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader). + \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. + \param baseAllocator The allocator for allocating memory chunks. + */ + MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : + chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0) + { + RAPIDJSON_ASSERT(buffer != 0); + RAPIDJSON_ASSERT(size > sizeof(ChunkHeader)); + chunkHead_ = reinterpret_cast(buffer); + chunkHead_->capacity = size - sizeof(ChunkHeader); + chunkHead_->size = 0; + chunkHead_->next = 0; + } + + //! Destructor. + /*! This deallocates all memory chunks, excluding the user-supplied buffer. + */ + ~MemoryPoolAllocator() { + Clear(); + RAPIDJSON_DELETE(ownBaseAllocator_); + } + + //! Deallocates all memory chunks, excluding the user-supplied buffer. + void Clear() { + while (chunkHead_ && chunkHead_ != userBuffer_) { + ChunkHeader* next = chunkHead_->next; + baseAllocator_->Free(chunkHead_); + chunkHead_ = next; + } + if (chunkHead_ && chunkHead_ == userBuffer_) + chunkHead_->size = 0; // Clear user buffer + } + + //! Computes the total capacity of allocated memory chunks. + /*! \return total capacity in bytes. + */ + size_t Capacity() const { + size_t capacity = 0; + for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) + capacity += c->capacity; + return capacity; + } + + //! Computes the memory blocks allocated. + /*! \return total used bytes. + */ + size_t Size() const { + size_t size = 0; + for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) + size += c->size; + return size; + } + + //! Allocates a memory block. (concept Allocator) + void* Malloc(size_t size) { + if (!size) + return NULL; + + size = RAPIDJSON_ALIGN(size); + if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity) + if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size)) + return NULL; + + void *buffer = reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size; + chunkHead_->size += size; + return buffer; + } + + //! Resizes a memory block (concept Allocator) + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { + if (originalPtr == 0) + return Malloc(newSize); + + if (newSize == 0) + return NULL; + + originalSize = RAPIDJSON_ALIGN(originalSize); + newSize = RAPIDJSON_ALIGN(newSize); + + // Do not shrink if new size is smaller than original + if (originalSize >= newSize) + return originalPtr; + + // Simply expand it if it is the last allocation and there is sufficient space + if (originalPtr == reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) { + size_t increment = static_cast(newSize - originalSize); + if (chunkHead_->size + increment <= chunkHead_->capacity) { + chunkHead_->size += increment; + return originalPtr; + } + } + + // Realloc process: allocate and copy memory, do not free original buffer. + if (void* newBuffer = Malloc(newSize)) { + if (originalSize) + std::memcpy(newBuffer, originalPtr, originalSize); + return newBuffer; + } + else + return NULL; + } + + //! Frees a memory block (concept Allocator) + static void Free(void *ptr) { (void)ptr; } // Do nothing + +private: + //! Copy constructor is not permitted. + MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */; + //! Copy assignment operator is not permitted. + MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */; + + //! Creates a new chunk. + /*! \param capacity Capacity of the chunk in bytes. + \return true if success. + */ + bool AddChunk(size_t capacity) { + if (!baseAllocator_) + ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator()); + if (ChunkHeader* chunk = reinterpret_cast(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) { + chunk->capacity = capacity; + chunk->size = 0; + chunk->next = chunkHead_; + chunkHead_ = chunk; + return true; + } + else + return false; + } + + static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity. + + //! Chunk header for perpending to each chunk. + /*! Chunks are stored as a singly linked list. + */ + struct ChunkHeader { + size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself). + size_t size; //!< Current size of allocated memory in bytes. + ChunkHeader *next; //!< Next chunk in the linked list. + }; + + ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation. + size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated. + void *userBuffer_; //!< User supplied buffer. + BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks. + BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object. +}; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/document.h b/sql-odbc/libraries/rapidjson/include/rapidjson/document.h new file mode 100644 index 0000000000..dda56fdf0c --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/document.h @@ -0,0 +1,2575 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_DOCUMENT_H_ +#define RAPIDJSON_DOCUMENT_H_ + +/*! \file document.h */ + +#include "reader.h" +#include "internal/meta.h" +#include "internal/strfunc.h" +#include "memorystream.h" +#include "encodedstream.h" +#include // placement new +#include + +RAPIDJSON_DIAG_PUSH +#ifdef _MSC_VER +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_OFF(effc++) +#if __GNUC__ >= 6 +RAPIDJSON_DIAG_OFF(terminate) // ignore throwing RAPIDJSON_ASSERT in RAPIDJSON_NOEXCEPT functions +#endif +#endif // __GNUC__ + +#ifndef RAPIDJSON_NOMEMBERITERATORCLASS +#include // std::iterator, std::random_access_iterator_tag +#endif + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS +#include // std::move +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +// Forward declaration. +template +class GenericValue; + +template +class GenericDocument; + +//! Name-value pair in a JSON object value. +/*! + This class was internal to GenericValue. It used to be a inner struct. + But a compiler (IBM XL C/C++ for AIX) have reported to have problem with that so it moved as a namespace scope struct. + https://code.google.com/p/rapidjson/issues/detail?id=64 +*/ +template +struct GenericMember { + GenericValue name; //!< name of member (must be a string) + GenericValue value; //!< value of member. +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericMemberIterator + +#ifndef RAPIDJSON_NOMEMBERITERATORCLASS + +//! (Constant) member iterator for a JSON object value +/*! + \tparam Const Is this a constant iterator? + \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) + \tparam Allocator Allocator type for allocating memory of object, array and string. + + This class implements a Random Access Iterator for GenericMember elements + of a GenericValue, see ISO/IEC 14882:2003(E) C++ standard, 24.1 [lib.iterator.requirements]. + + \note This iterator implementation is mainly intended to avoid implicit + conversions from iterator values to \c NULL, + e.g. from GenericValue::FindMember. + + \note Define \c RAPIDJSON_NOMEMBERITERATORCLASS to fall back to a + pointer-based implementation, if your platform doesn't provide + the C++ header. + + \see GenericMember, GenericValue::MemberIterator, GenericValue::ConstMemberIterator + */ +template +class GenericMemberIterator + : public std::iterator >::Type> { + + friend class GenericValue; + template friend class GenericMemberIterator; + + typedef GenericMember PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef std::iterator BaseType; + +public: + //! Iterator type itself + typedef GenericMemberIterator Iterator; + //! Constant iterator type + typedef GenericMemberIterator ConstIterator; + //! Non-constant iterator type + typedef GenericMemberIterator NonConstIterator; + + //! Pointer to (const) GenericMember + typedef typename BaseType::pointer Pointer; + //! Reference to (const) GenericMember + typedef typename BaseType::reference Reference; + //! Signed integer type (e.g. \c ptrdiff_t) + typedef typename BaseType::difference_type DifferenceType; + + //! Default constructor (singular value) + /*! Creates an iterator pointing to no element. + \note All operations, except for comparisons, are undefined on such values. + */ + GenericMemberIterator() : ptr_() {} + + //! Iterator conversions to more const + /*! + \param it (Non-const) iterator to copy from + + Allows the creation of an iterator from another GenericMemberIterator + that is "less const". Especially, creating a non-constant iterator + from a constant iterator are disabled: + \li const -> non-const (not ok) + \li const -> const (ok) + \li non-const -> const (ok) + \li non-const -> non-const (ok) + + \note If the \c Const template parameter is already \c false, this + constructor effectively defines a regular copy-constructor. + Otherwise, the copy constructor is implicitly defined. + */ + GenericMemberIterator(const NonConstIterator & it) : ptr_(it.ptr_) {} + Iterator& operator=(const NonConstIterator & it) { ptr_ = it.ptr_; return *this; } + + //! @name stepping + //@{ + Iterator& operator++(){ ++ptr_; return *this; } + Iterator& operator--(){ --ptr_; return *this; } + Iterator operator++(int){ Iterator old(*this); ++ptr_; return old; } + Iterator operator--(int){ Iterator old(*this); --ptr_; return old; } + //@} + + //! @name increment/decrement + //@{ + Iterator operator+(DifferenceType n) const { return Iterator(ptr_+n); } + Iterator operator-(DifferenceType n) const { return Iterator(ptr_-n); } + + Iterator& operator+=(DifferenceType n) { ptr_+=n; return *this; } + Iterator& operator-=(DifferenceType n) { ptr_-=n; return *this; } + //@} + + //! @name relations + //@{ + bool operator==(ConstIterator that) const { return ptr_ == that.ptr_; } + bool operator!=(ConstIterator that) const { return ptr_ != that.ptr_; } + bool operator<=(ConstIterator that) const { return ptr_ <= that.ptr_; } + bool operator>=(ConstIterator that) const { return ptr_ >= that.ptr_; } + bool operator< (ConstIterator that) const { return ptr_ < that.ptr_; } + bool operator> (ConstIterator that) const { return ptr_ > that.ptr_; } + //@} + + //! @name dereference + //@{ + Reference operator*() const { return *ptr_; } + Pointer operator->() const { return ptr_; } + Reference operator[](DifferenceType n) const { return ptr_[n]; } + //@} + + //! Distance + DifferenceType operator-(ConstIterator that) const { return ptr_-that.ptr_; } + +private: + //! Internal constructor from plain pointer + explicit GenericMemberIterator(Pointer p) : ptr_(p) {} + + Pointer ptr_; //!< raw pointer +}; + +#else // RAPIDJSON_NOMEMBERITERATORCLASS + +// class-based member iterator implementation disabled, use plain pointers + +template +struct GenericMemberIterator; + +//! non-const GenericMemberIterator +template +struct GenericMemberIterator { + //! use plain pointer as iterator type + typedef GenericMember* Iterator; +}; +//! const GenericMemberIterator +template +struct GenericMemberIterator { + //! use plain const pointer as iterator type + typedef const GenericMember* Iterator; +}; + +#endif // RAPIDJSON_NOMEMBERITERATORCLASS + +/////////////////////////////////////////////////////////////////////////////// +// GenericStringRef + +//! Reference to a constant string (not taking a copy) +/*! + \tparam CharType character type of the string + + This helper class is used to automatically infer constant string + references for string literals, especially from \c const \b (!) + character arrays. + + The main use is for creating JSON string values without copying the + source string via an \ref Allocator. This requires that the referenced + string pointers have a sufficient lifetime, which exceeds the lifetime + of the associated GenericValue. + + \b Example + \code + Value v("foo"); // ok, no need to copy & calculate length + const char foo[] = "foo"; + v.SetString(foo); // ok + + const char* bar = foo; + // Value x(bar); // not ok, can't rely on bar's lifetime + Value x(StringRef(bar)); // lifetime explicitly guaranteed by user + Value y(StringRef(bar, 3)); // ok, explicitly pass length + \endcode + + \see StringRef, GenericValue::SetString +*/ +template +struct GenericStringRef { + typedef CharType Ch; //!< character type of the string + + //! Create string reference from \c const character array +#ifndef __clang__ // -Wdocumentation + /*! + This constructor implicitly creates a constant string reference from + a \c const character array. It has better performance than + \ref StringRef(const CharType*) by inferring the string \ref length + from the array length, and also supports strings containing null + characters. + + \tparam N length of the string, automatically inferred + + \param str Constant character array, lifetime assumed to be longer + than the use of the string in e.g. a GenericValue + + \post \ref s == str + + \note Constant complexity. + \note There is a hidden, private overload to disallow references to + non-const character arrays to be created via this constructor. + By this, e.g. function-scope arrays used to be filled via + \c snprintf are excluded from consideration. + In such cases, the referenced string should be \b copied to the + GenericValue instead. + */ +#endif + template + GenericStringRef(const CharType (&str)[N]) RAPIDJSON_NOEXCEPT + : s(str), length(N-1) {} + + //! Explicitly create string reference from \c const character pointer +#ifndef __clang__ // -Wdocumentation + /*! + This constructor can be used to \b explicitly create a reference to + a constant string pointer. + + \see StringRef(const CharType*) + + \param str Constant character pointer, lifetime assumed to be longer + than the use of the string in e.g. a GenericValue + + \post \ref s == str + + \note There is a hidden, private overload to disallow references to + non-const character arrays to be created via this constructor. + By this, e.g. function-scope arrays used to be filled via + \c snprintf are excluded from consideration. + In such cases, the referenced string should be \b copied to the + GenericValue instead. + */ +#endif + explicit GenericStringRef(const CharType* str) + : s(str), length(internal::StrLen(str)){ RAPIDJSON_ASSERT(s != 0); } + + //! Create constant string reference from pointer and length +#ifndef __clang__ // -Wdocumentation + /*! \param str constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \param len length of the string, excluding the trailing NULL terminator + + \post \ref s == str && \ref length == len + \note Constant complexity. + */ +#endif + GenericStringRef(const CharType* str, SizeType len) + : s(str), length(len) { RAPIDJSON_ASSERT(s != 0); } + + GenericStringRef(const GenericStringRef& rhs) : s(rhs.s), length(rhs.length) {} + + GenericStringRef& operator=(const GenericStringRef& rhs) { s = rhs.s; length = rhs.length; } + + //! implicit conversion to plain CharType pointer + operator const Ch *() const { return s; } + + const Ch* const s; //!< plain CharType pointer + const SizeType length; //!< length of the string (excluding the trailing NULL terminator) + +private: + //! Disallow construction from non-const array + template + GenericStringRef(CharType (&str)[N]) /* = delete */; +}; + +//! Mark a character pointer as constant string +/*! Mark a plain character pointer as a "string literal". This function + can be used to avoid copying a character string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + \tparam CharType Character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \return GenericStringRef string reference object + \relatesalso GenericStringRef + + \see GenericValue::GenericValue(StringRefType), GenericValue::operator=(StringRefType), GenericValue::SetString(StringRefType), GenericValue::PushBack(StringRefType, Allocator&), GenericValue::AddMember +*/ +template +inline GenericStringRef StringRef(const CharType* str) { + return GenericStringRef(str, internal::StrLen(str)); +} + +//! Mark a character pointer as constant string +/*! Mark a plain character pointer as a "string literal". This function + can be used to avoid copying a character string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + + This version has better performance with supplied length, and also + supports string containing null characters. + + \tparam CharType character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \param length The length of source string. + \return GenericStringRef string reference object + \relatesalso GenericStringRef +*/ +template +inline GenericStringRef StringRef(const CharType* str, size_t length) { + return GenericStringRef(str, SizeType(length)); +} + +#if RAPIDJSON_HAS_STDSTRING +//! Mark a string object as constant string +/*! Mark a string object (e.g. \c std::string) as a "string literal". + This function can be used to avoid copying a string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + + \tparam CharType character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \return GenericStringRef string reference object + \relatesalso GenericStringRef + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. +*/ +template +inline GenericStringRef StringRef(const std::basic_string& str) { + return GenericStringRef(str.data(), SizeType(str.size())); +} +#endif + +/////////////////////////////////////////////////////////////////////////////// +// GenericValue type traits +namespace internal { + +template +struct IsGenericValueImpl : FalseType {}; + +// select candidates according to nested encoding and allocator types +template struct IsGenericValueImpl::Type, typename Void::Type> + : IsBaseOf, T>::Type {}; + +// helper to match arbitrary GenericValue instantiations, including derived classes +template struct IsGenericValue : IsGenericValueImpl::Type {}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// TypeHelper + +namespace internal { + +template +struct TypeHelper {}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsBool(); } + static bool Get(const ValueType& v) { return v.GetBool(); } + static ValueType& Set(ValueType& v, bool data) { return v.SetBool(data); } + static ValueType& Set(ValueType& v, bool data, typename ValueType::AllocatorType&) { return v.SetBool(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt(); } + static int Get(const ValueType& v) { return v.GetInt(); } + static ValueType& Set(ValueType& v, int data) { return v.SetInt(data); } + static ValueType& Set(ValueType& v, int data, typename ValueType::AllocatorType&) { return v.SetInt(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint(); } + static unsigned Get(const ValueType& v) { return v.GetUint(); } + static ValueType& Set(ValueType& v, unsigned data) { return v.SetUint(data); } + static ValueType& Set(ValueType& v, unsigned data, typename ValueType::AllocatorType&) { return v.SetUint(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt64(); } + static int64_t Get(const ValueType& v) { return v.GetInt64(); } + static ValueType& Set(ValueType& v, int64_t data) { return v.SetInt64(data); } + static ValueType& Set(ValueType& v, int64_t data, typename ValueType::AllocatorType&) { return v.SetInt64(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint64(); } + static uint64_t Get(const ValueType& v) { return v.GetUint64(); } + static ValueType& Set(ValueType& v, uint64_t data) { return v.SetUint64(data); } + static ValueType& Set(ValueType& v, uint64_t data, typename ValueType::AllocatorType&) { return v.SetUint64(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsDouble(); } + static double Get(const ValueType& v) { return v.GetDouble(); } + static ValueType& Set(ValueType& v, double data) { return v.SetDouble(data); } + static ValueType& Set(ValueType& v, double data, typename ValueType::AllocatorType&) { return v.SetDouble(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsFloat(); } + static float Get(const ValueType& v) { return v.GetFloat(); } + static ValueType& Set(ValueType& v, float data) { return v.SetFloat(data); } + static ValueType& Set(ValueType& v, float data, typename ValueType::AllocatorType&) { return v.SetFloat(data); } +}; + +template +struct TypeHelper { + typedef const typename ValueType::Ch* StringType; + static bool Is(const ValueType& v) { return v.IsString(); } + static StringType Get(const ValueType& v) { return v.GetString(); } + static ValueType& Set(ValueType& v, const StringType data) { return v.SetString(typename ValueType::StringRefType(data)); } + static ValueType& Set(ValueType& v, const StringType data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } +}; + +#if RAPIDJSON_HAS_STDSTRING +template +struct TypeHelper > { + typedef std::basic_string StringType; + static bool Is(const ValueType& v) { return v.IsString(); } + static StringType Get(const ValueType& v) { return StringType(v.GetString(), v.GetStringLength()); } + static ValueType& Set(ValueType& v, const StringType& data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } +}; +#endif + +template +struct TypeHelper { + typedef typename ValueType::Array ArrayType; + static bool Is(const ValueType& v) { return v.IsArray(); } + static ArrayType Get(ValueType& v) { return v.GetArray(); } + static ValueType& Set(ValueType& v, ArrayType data) { return v = data; } + static ValueType& Set(ValueType& v, ArrayType data, typename ValueType::AllocatorType&) { return v = data; } +}; + +template +struct TypeHelper { + typedef typename ValueType::ConstArray ArrayType; + static bool Is(const ValueType& v) { return v.IsArray(); } + static ArrayType Get(const ValueType& v) { return v.GetArray(); } +}; + +template +struct TypeHelper { + typedef typename ValueType::Object ObjectType; + static bool Is(const ValueType& v) { return v.IsObject(); } + static ObjectType Get(ValueType& v) { return v.GetObject(); } + static ValueType& Set(ValueType& v, ObjectType data) { return v = data; } + static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { v = data; } +}; + +template +struct TypeHelper { + typedef typename ValueType::ConstObject ObjectType; + static bool Is(const ValueType& v) { return v.IsObject(); } + static ObjectType Get(const ValueType& v) { return v.GetObject(); } +}; + +} // namespace internal + +// Forward declarations +template class GenericArray; +template class GenericObject; + +/////////////////////////////////////////////////////////////////////////////// +// GenericValue + +//! Represents a JSON value. Use Value for UTF8 encoding and default allocator. +/*! + A JSON value can be one of 7 types. This class is a variant type supporting + these types. + + Use the Value if UTF8 and default allocator + + \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) + \tparam Allocator Allocator type for allocating memory of object, array and string. +*/ +template > +class GenericValue { +public: + //! Name-value pair in an object. + typedef GenericMember Member; + typedef Encoding EncodingType; //!< Encoding type from template parameter. + typedef Allocator AllocatorType; //!< Allocator type from template parameter. + typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. + typedef GenericStringRef StringRefType; //!< Reference to a constant string + typedef typename GenericMemberIterator::Iterator MemberIterator; //!< Member iterator for iterating in object. + typedef typename GenericMemberIterator::Iterator ConstMemberIterator; //!< Constant member iterator for iterating in object. + typedef GenericValue* ValueIterator; //!< Value iterator for iterating in array. + typedef const GenericValue* ConstValueIterator; //!< Constant value iterator for iterating in array. + typedef GenericValue ValueType; //!< Value type of itself. + typedef GenericArray Array; + typedef GenericArray ConstArray; + typedef GenericObject Object; + typedef GenericObject ConstObject; + + //!@name Constructors and destructor. + //@{ + + //! Default constructor creates a null value. + GenericValue() RAPIDJSON_NOEXCEPT : data_() { data_.f.flags = kNullFlag; } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericValue(GenericValue&& rhs) RAPIDJSON_NOEXCEPT : data_(rhs.data_) { + rhs.data_.f.flags = kNullFlag; // give up contents + } +#endif + +private: + //! Copy constructor is not permitted. + GenericValue(const GenericValue& rhs); + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Moving from a GenericDocument is not permitted. + template + GenericValue(GenericDocument&& rhs); + + //! Move assignment from a GenericDocument is not permitted. + template + GenericValue& operator=(GenericDocument&& rhs); +#endif + +public: + + //! Constructor with JSON value type. + /*! This creates a Value of specified type with default content. + \param type Type of the value. + \note Default content for number is zero. + */ + explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() { + static const uint16_t defaultFlags[7] = { + kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag, + kNumberAnyFlag + }; + RAPIDJSON_ASSERT(type <= kNumberType); + data_.f.flags = defaultFlags[type]; + + // Use ShortString to store empty string. + if (type == kStringType) + data_.ss.SetLength(0); + } + + //! Explicit copy constructor (with allocator) + /*! Creates a copy of a Value by using the given Allocator + \tparam SourceAllocator allocator of \c rhs + \param rhs Value to copy from (read-only) + \param allocator Allocator for allocating copied elements and buffers. Commonly use GenericDocument::GetAllocator(). + \see CopyFrom() + */ + template< typename SourceAllocator > + GenericValue(const GenericValue& rhs, Allocator & allocator); + + //! Constructor for boolean value. + /*! \param b Boolean value + \note This constructor is limited to \em real boolean values and rejects + implicitly converted types like arbitrary pointers. Use an explicit cast + to \c bool, if you want to construct a boolean JSON value in such cases. + */ +#ifndef RAPIDJSON_DOXYGEN_RUNNING // hide SFINAE from Doxygen + template + explicit GenericValue(T b, RAPIDJSON_ENABLEIF((internal::IsSame))) RAPIDJSON_NOEXCEPT // See #472 +#else + explicit GenericValue(bool b) RAPIDJSON_NOEXCEPT +#endif + : data_() { + // safe-guard against failing SFINAE + RAPIDJSON_STATIC_ASSERT((internal::IsSame::Value)); + data_.f.flags = b ? kTrueFlag : kFalseFlag; + } + + //! Constructor for int value. + explicit GenericValue(int i) RAPIDJSON_NOEXCEPT : data_() { + data_.n.i64 = i; + data_.f.flags = (i >= 0) ? (kNumberIntFlag | kUintFlag | kUint64Flag) : kNumberIntFlag; + } + + //! Constructor for unsigned value. + explicit GenericValue(unsigned u) RAPIDJSON_NOEXCEPT : data_() { + data_.n.u64 = u; + data_.f.flags = (u & 0x80000000) ? kNumberUintFlag : (kNumberUintFlag | kIntFlag | kInt64Flag); + } + + //! Constructor for int64_t value. + explicit GenericValue(int64_t i64) RAPIDJSON_NOEXCEPT : data_() { + data_.n.i64 = i64; + data_.f.flags = kNumberInt64Flag; + if (i64 >= 0) { + data_.f.flags |= kNumberUint64Flag; + if (!(static_cast(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) + data_.f.flags |= kUintFlag; + if (!(static_cast(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + else if (i64 >= static_cast(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + + //! Constructor for uint64_t value. + explicit GenericValue(uint64_t u64) RAPIDJSON_NOEXCEPT : data_() { + data_.n.u64 = u64; + data_.f.flags = kNumberUint64Flag; + if (!(u64 & RAPIDJSON_UINT64_C2(0x80000000, 0x00000000))) + data_.f.flags |= kInt64Flag; + if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) + data_.f.flags |= kUintFlag; + if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + + //! Constructor for double value. + explicit GenericValue(double d) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = d; data_.f.flags = kNumberDoubleFlag; } + + //! Constructor for constant string (i.e. do not make a copy of string) + GenericValue(const Ch* s, SizeType length) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(StringRef(s, length)); } + + //! Constructor for constant string (i.e. do not make a copy of string) + explicit GenericValue(StringRefType s) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(s); } + + //! Constructor for copy-string (i.e. do make a copy of string) + GenericValue(const Ch* s, SizeType length, Allocator& allocator) : data_() { SetStringRaw(StringRef(s, length), allocator); } + + //! Constructor for copy-string (i.e. do make a copy of string) + GenericValue(const Ch*s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } + +#if RAPIDJSON_HAS_STDSTRING + //! Constructor for copy-string from a string object (i.e. do make a copy of string) + /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + GenericValue(const std::basic_string& s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } +#endif + + //! Constructor for Array. + /*! + \param a An array obtained by \c GetArray(). + \note \c Array is always pass-by-value. + \note the source array is moved into this value and the sourec array becomes empty. + */ + GenericValue(Array a) RAPIDJSON_NOEXCEPT : data_(a.value_.data_) { + a.value_.data_ = Data(); + a.value_.data_.f.flags = kArrayFlag; + } + + //! Constructor for Object. + /*! + \param o An object obtained by \c GetObject(). + \note \c Object is always pass-by-value. + \note the source object is moved into this value and the sourec object becomes empty. + */ + GenericValue(Object o) RAPIDJSON_NOEXCEPT : data_(o.value_.data_) { + o.value_.data_ = Data(); + o.value_.data_.f.flags = kObjectFlag; + } + + //! Destructor. + /*! Need to destruct elements of array, members of object, or copy-string. + */ + ~GenericValue() { + if (Allocator::kNeedFree) { // Shortcut by Allocator's trait + switch(data_.f.flags) { + case kArrayFlag: + { + GenericValue* e = GetElementsPointer(); + for (GenericValue* v = e; v != e + data_.a.size; ++v) + v->~GenericValue(); + Allocator::Free(e); + } + break; + + case kObjectFlag: + for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) + m->~Member(); + Allocator::Free(GetMembersPointer()); + break; + + case kCopyStringFlag: + Allocator::Free(const_cast(GetStringPointer())); + break; + + default: + break; // Do nothing for other types. + } + } + } + + //@} + + //!@name Assignment operators + //@{ + + //! Assignment with move semantics. + /*! \param rhs Source of the assignment. It will become a null value after assignment. + */ + GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT { + RAPIDJSON_ASSERT(this != &rhs); + this->~GenericValue(); + RawAssign(rhs); + return *this; + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move assignment in C++11 + GenericValue& operator=(GenericValue&& rhs) RAPIDJSON_NOEXCEPT { + return *this = rhs.Move(); + } +#endif + + //! Assignment of constant string reference (no copy) + /*! \param str Constant string reference to be assigned + \note This overload is needed to avoid clashes with the generic primitive type assignment overload below. + \see GenericStringRef, operator=(T) + */ + GenericValue& operator=(StringRefType str) RAPIDJSON_NOEXCEPT { + GenericValue s(str); + return *this = s; + } + + //! Assignment with primitive types. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param value The value to be assigned. + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref SetString(const Ch*, Allocator&) (for copying) or + \ref StringRef() (to explicitly mark the pointer as constant) instead. + All other pointer types would implicitly convert to \c bool, + use \ref SetBool() instead. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::IsPointer), (GenericValue&)) + operator=(T value) { + GenericValue v(value); + return *this = v; + } + + //! Deep-copy assignment from Value + /*! Assigns a \b copy of the Value to the current Value object + \tparam SourceAllocator Allocator type of \c rhs + \param rhs Value to copy from (read-only) + \param allocator Allocator to use for copying + */ + template + GenericValue& CopyFrom(const GenericValue& rhs, Allocator& allocator) { + RAPIDJSON_ASSERT(static_cast(this) != static_cast(&rhs)); + this->~GenericValue(); + new (this) GenericValue(rhs, allocator); + return *this; + } + + //! Exchange the contents of this value with those of other. + /*! + \param other Another value. + \note Constant complexity. + */ + GenericValue& Swap(GenericValue& other) RAPIDJSON_NOEXCEPT { + GenericValue temp; + temp.RawAssign(*this); + RawAssign(other); + other.RawAssign(temp); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.value, b.value); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericValue& a, GenericValue& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //! Prepare Value for move semantics + /*! \return *this */ + GenericValue& Move() RAPIDJSON_NOEXCEPT { return *this; } + //@} + + //!@name Equal-to and not-equal-to operators + //@{ + //! Equal-to operator + /*! + \note If an object contains duplicated named member, comparing equality with any object is always \c false. + \note Linear time complexity (number of all values in the subtree and total lengths of all strings). + */ + template + bool operator==(const GenericValue& rhs) const { + typedef GenericValue RhsType; + if (GetType() != rhs.GetType()) + return false; + + switch (GetType()) { + case kObjectType: // Warning: O(n^2) inner-loop + if (data_.o.size != rhs.data_.o.size) + return false; + for (ConstMemberIterator lhsMemberItr = MemberBegin(); lhsMemberItr != MemberEnd(); ++lhsMemberItr) { + typename RhsType::ConstMemberIterator rhsMemberItr = rhs.FindMember(lhsMemberItr->name); + if (rhsMemberItr == rhs.MemberEnd() || lhsMemberItr->value != rhsMemberItr->value) + return false; + } + return true; + + case kArrayType: + if (data_.a.size != rhs.data_.a.size) + return false; + for (SizeType i = 0; i < data_.a.size; i++) + if ((*this)[i] != rhs[i]) + return false; + return true; + + case kStringType: + return StringEqual(rhs); + + case kNumberType: + if (IsDouble() || rhs.IsDouble()) { + double a = GetDouble(); // May convert from integer to double. + double b = rhs.GetDouble(); // Ditto + return a >= b && a <= b; // Prevent -Wfloat-equal + } + else + return data_.n.u64 == rhs.data_.n.u64; + + default: + return true; + } + } + + //! Equal-to operator with const C-string pointer + bool operator==(const Ch* rhs) const { return *this == GenericValue(StringRef(rhs)); } + +#if RAPIDJSON_HAS_STDSTRING + //! Equal-to operator with string object + /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + bool operator==(const std::basic_string& rhs) const { return *this == GenericValue(StringRef(rhs)); } +#endif + + //! Equal-to operator with primitive types + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c true, \c false + */ + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr,internal::IsGenericValue >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); } + + //! Not-equal-to operator + /*! \return !(*this == rhs) + */ + template + bool operator!=(const GenericValue& rhs) const { return !(*this == rhs); } + + //! Not-equal-to operator with const C-string pointer + bool operator!=(const Ch* rhs) const { return !(*this == rhs); } + + //! Not-equal-to operator with arbitrary types + /*! \return !(*this == rhs) + */ + template RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator!=(const T& rhs) const { return !(*this == rhs); } + + //! Equal-to operator with arbitrary types (symmetric version) + /*! \return (rhs == lhs) + */ + template friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator==(const T& lhs, const GenericValue& rhs) { return rhs == lhs; } + + //! Not-Equal-to operator with arbitrary types (symmetric version) + /*! \return !(rhs == lhs) + */ + template friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator!=(const T& lhs, const GenericValue& rhs) { return !(rhs == lhs); } + //@} + + //!@name Type + //@{ + + Type GetType() const { return static_cast(data_.f.flags & kTypeMask); } + bool IsNull() const { return data_.f.flags == kNullFlag; } + bool IsFalse() const { return data_.f.flags == kFalseFlag; } + bool IsTrue() const { return data_.f.flags == kTrueFlag; } + bool IsBool() const { return (data_.f.flags & kBoolFlag) != 0; } + bool IsObject() const { return data_.f.flags == kObjectFlag; } + bool IsArray() const { return data_.f.flags == kArrayFlag; } + bool IsNumber() const { return (data_.f.flags & kNumberFlag) != 0; } + bool IsInt() const { return (data_.f.flags & kIntFlag) != 0; } + bool IsUint() const { return (data_.f.flags & kUintFlag) != 0; } + bool IsInt64() const { return (data_.f.flags & kInt64Flag) != 0; } + bool IsUint64() const { return (data_.f.flags & kUint64Flag) != 0; } + bool IsDouble() const { return (data_.f.flags & kDoubleFlag) != 0; } + bool IsString() const { return (data_.f.flags & kStringFlag) != 0; } + + // Checks whether a number can be losslessly converted to a double. + bool IsLosslessDouble() const { + if (!IsNumber()) return false; + if (IsUint64()) { + uint64_t u = GetUint64(); + volatile double d = static_cast(u); + return (d >= 0.0) + && (d < static_cast((std::numeric_limits::max)())) + && (u == static_cast(d)); + } + if (IsInt64()) { + int64_t i = GetInt64(); + volatile double d = static_cast(i); + return (d >= static_cast((std::numeric_limits::min)())) + && (d < static_cast((std::numeric_limits::max)())) + && (i == static_cast(d)); + } + return true; // double, int, uint are always lossless + } + + // Checks whether a number is a float (possible lossy). + bool IsFloat() const { + if ((data_.f.flags & kDoubleFlag) == 0) + return false; + double d = GetDouble(); + return d >= -3.4028234e38 && d <= 3.4028234e38; + } + // Checks whether a number can be losslessly converted to a float. + bool IsLosslessFloat() const { + if (!IsNumber()) return false; + double a = GetDouble(); + if (a < static_cast(-(std::numeric_limits::max)()) + || a > static_cast((std::numeric_limits::max)())) + return false; + double b = static_cast(static_cast(a)); + return a >= b && a <= b; // Prevent -Wfloat-equal + } + + //@} + + //!@name Null + //@{ + + GenericValue& SetNull() { this->~GenericValue(); new (this) GenericValue(); return *this; } + + //@} + + //!@name Bool + //@{ + + bool GetBool() const { RAPIDJSON_ASSERT(IsBool()); return data_.f.flags == kTrueFlag; } + //!< Set boolean value + /*! \post IsBool() == true */ + GenericValue& SetBool(bool b) { this->~GenericValue(); new (this) GenericValue(b); return *this; } + + //@} + + //!@name Object + //@{ + + //! Set this value as an empty object. + /*! \post IsObject() == true */ + GenericValue& SetObject() { this->~GenericValue(); new (this) GenericValue(kObjectType); return *this; } + + //! Get the number of members in the object. + SizeType MemberCount() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size; } + + //! Check whether the object is empty. + bool ObjectEmpty() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size == 0; } + + //! Get a value from an object associated with the name. + /*! \pre IsObject() == true + \tparam T Either \c Ch or \c const \c Ch (template used for disambiguation with \ref operator[](SizeType)) + \note In version 0.1x, if the member is not found, this function returns a null value. This makes issue 7. + Since 0.2, if the name is not correct, it will assert. + If user is unsure whether a member exists, user should use HasMember() first. + A better approach is to use FindMember(). + \note Linear time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >),(GenericValue&)) operator[](T* name) { + GenericValue n(StringRef(name)); + return (*this)[n]; + } + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >),(const GenericValue&)) operator[](T* name) const { return const_cast(*this)[name]; } + + //! Get a value from an object associated with the name. + /*! \pre IsObject() == true + \tparam SourceAllocator Allocator of the \c name value + + \note Compared to \ref operator[](T*), this version is faster because it does not need a StrLen(). + And it can also handle strings with embedded null characters. + + \note Linear time complexity. + */ + template + GenericValue& operator[](const GenericValue& name) { + MemberIterator member = FindMember(name); + if (member != MemberEnd()) + return member->value; + else { + RAPIDJSON_ASSERT(false); // see above note + + // This will generate -Wexit-time-destructors in clang + // static GenericValue NullValue; + // return NullValue; + + // Use static buffer and placement-new to prevent destruction + static char buffer[sizeof(GenericValue)]; + return *new (buffer) GenericValue(); + } + } + template + const GenericValue& operator[](const GenericValue& name) const { return const_cast(*this)[name]; } + +#if RAPIDJSON_HAS_STDSTRING + //! Get a value from an object associated with name (string object). + GenericValue& operator[](const std::basic_string& name) { return (*this)[GenericValue(StringRef(name))]; } + const GenericValue& operator[](const std::basic_string& name) const { return (*this)[GenericValue(StringRef(name))]; } +#endif + + //! Const member iterator + /*! \pre IsObject() == true */ + ConstMemberIterator MemberBegin() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer()); } + //! Const \em past-the-end member iterator + /*! \pre IsObject() == true */ + ConstMemberIterator MemberEnd() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer() + data_.o.size); } + //! Member iterator + /*! \pre IsObject() == true */ + MemberIterator MemberBegin() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer()); } + //! \em Past-the-end member iterator + /*! \pre IsObject() == true */ + MemberIterator MemberEnd() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer() + data_.o.size); } + + //! Check whether a member exists in the object. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + bool HasMember(const Ch* name) const { return FindMember(name) != MemberEnd(); } + +#if RAPIDJSON_HAS_STDSTRING + //! Check whether a member exists in the object with string object. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + bool HasMember(const std::basic_string& name) const { return FindMember(name) != MemberEnd(); } +#endif + + //! Check whether a member exists in the object with GenericValue name. + /*! + This version is faster because it does not need a StrLen(). It can also handle string with null character. + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + template + bool HasMember(const GenericValue& name) const { return FindMember(name) != MemberEnd(); } + + //! Find member by name. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + + \note Earlier versions of Rapidjson returned a \c NULL pointer, in case + the requested member doesn't exist. For consistency with e.g. + \c std::map, this has been changed to MemberEnd() now. + \note Linear time complexity. + */ + MemberIterator FindMember(const Ch* name) { + GenericValue n(StringRef(name)); + return FindMember(n); + } + + ConstMemberIterator FindMember(const Ch* name) const { return const_cast(*this).FindMember(name); } + + //! Find member by name. + /*! + This version is faster because it does not need a StrLen(). It can also handle string with null character. + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + + \note Earlier versions of Rapidjson returned a \c NULL pointer, in case + the requested member doesn't exist. For consistency with e.g. + \c std::map, this has been changed to MemberEnd() now. + \note Linear time complexity. + */ + template + MemberIterator FindMember(const GenericValue& name) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(name.IsString()); + MemberIterator member = MemberBegin(); + for ( ; member != MemberEnd(); ++member) + if (name.StringEqual(member->name)) + break; + return member; + } + template ConstMemberIterator FindMember(const GenericValue& name) const { return const_cast(*this).FindMember(name); } + +#if RAPIDJSON_HAS_STDSTRING + //! Find member by string object name. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + */ + MemberIterator FindMember(const std::basic_string& name) { return FindMember(GenericValue(StringRef(name))); } + ConstMemberIterator FindMember(const std::basic_string& name) const { return FindMember(GenericValue(StringRef(name))); } +#endif + + //! Add a member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value Value of any type. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note The ownership of \c name and \c value will be transferred to this object on success. + \pre IsObject() && name.IsString() + \post name.IsNull() && value.IsNull() + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, GenericValue& value, Allocator& allocator) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(name.IsString()); + + ObjectData& o = data_.o; + if (o.size >= o.capacity) { + if (o.capacity == 0) { + o.capacity = kDefaultObjectCapacity; + SetMembersPointer(reinterpret_cast(allocator.Malloc(o.capacity * sizeof(Member)))); + } + else { + SizeType oldCapacity = o.capacity; + o.capacity += (oldCapacity + 1) / 2; // grow by factor 1.5 + SetMembersPointer(reinterpret_cast(allocator.Realloc(GetMembersPointer(), oldCapacity * sizeof(Member), o.capacity * sizeof(Member)))); + } + } + Member* members = GetMembersPointer(); + members[o.size].name.RawAssign(name); + members[o.size].value.RawAssign(value); + o.size++; + return *this; + } + + //! Add a constant string value as member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, StringRefType value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Add a string object as member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, std::basic_string& value, Allocator& allocator) { + GenericValue v(value, allocator); + return AddMember(name, v, allocator); + } +#endif + + //! Add any primitive value as member (name-value pair) to the object. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param name A string value as name of member. + \param value Value of primitive type \c T as value of member + \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref + AddMember(StringRefType, StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized Constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + AddMember(GenericValue& name, T value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericValue& AddMember(GenericValue&& name, GenericValue&& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(GenericValue&& name, GenericValue& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(GenericValue& name, GenericValue&& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(StringRefType name, GenericValue&& value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + + + //! Add a member (name-value pair) to the object. + /*! \param name A constant string reference as name of member. + \param value Value of any type. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note The ownership of \c value will be transferred to this object on success. + \pre IsObject() + \post value.IsNull() + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(StringRefType name, GenericValue& value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } + + //! Add a constant string value as member (name-value pair) to the object. + /*! \param name A constant string reference as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(StringRefType,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(StringRefType name, StringRefType value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + + //! Add any primitive value as member (name-value pair) to the object. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param name A constant string reference as name of member. + \param value Value of primitive type \c T as value of member + \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref + AddMember(StringRefType, StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized Constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + AddMember(StringRefType name, T value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } + + //! Remove all members in the object. + /*! This function do not deallocate memory in the object, i.e. the capacity is unchanged. + \note Linear time complexity. + */ + void RemoveAllMembers() { + RAPIDJSON_ASSERT(IsObject()); + for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) + m->~Member(); + data_.o.size = 0; + } + + //! Remove a member in object by its name. + /*! \param name Name of member to be removed. + \return Whether the member existed. + \note This function may reorder the object members. Use \ref + EraseMember(ConstMemberIterator) if you need to preserve the + relative order of the remaining members. + \note Linear time complexity. + */ + bool RemoveMember(const Ch* name) { + GenericValue n(StringRef(name)); + return RemoveMember(n); + } + +#if RAPIDJSON_HAS_STDSTRING + bool RemoveMember(const std::basic_string& name) { return RemoveMember(GenericValue(StringRef(name))); } +#endif + + template + bool RemoveMember(const GenericValue& name) { + MemberIterator m = FindMember(name); + if (m != MemberEnd()) { + RemoveMember(m); + return true; + } + else + return false; + } + + //! Remove a member in object by iterator. + /*! \param m member iterator (obtained by FindMember() or MemberBegin()). + \return the new iterator after removal. + \note This function may reorder the object members. Use \ref + EraseMember(ConstMemberIterator) if you need to preserve the + relative order of the remaining members. + \note Constant time complexity. + */ + MemberIterator RemoveMember(MemberIterator m) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(data_.o.size > 0); + RAPIDJSON_ASSERT(GetMembersPointer() != 0); + RAPIDJSON_ASSERT(m >= MemberBegin() && m < MemberEnd()); + + MemberIterator last(GetMembersPointer() + (data_.o.size - 1)); + if (data_.o.size > 1 && m != last) + *m = *last; // Move the last one to this place + else + m->~Member(); // Only one left, just destroy + --data_.o.size; + return m; + } + + //! Remove a member from an object by iterator. + /*! \param pos iterator to the member to remove + \pre IsObject() == true && \ref MemberBegin() <= \c pos < \ref MemberEnd() + \return Iterator following the removed element. + If the iterator \c pos refers to the last element, the \ref MemberEnd() iterator is returned. + \note This function preserves the relative order of the remaining object + members. If you do not need this, use the more efficient \ref RemoveMember(MemberIterator). + \note Linear time complexity. + */ + MemberIterator EraseMember(ConstMemberIterator pos) { + return EraseMember(pos, pos +1); + } + + //! Remove members in the range [first, last) from an object. + /*! \param first iterator to the first member to remove + \param last iterator following the last member to remove + \pre IsObject() == true && \ref MemberBegin() <= \c first <= \c last <= \ref MemberEnd() + \return Iterator following the last removed element. + \note This function preserves the relative order of the remaining object + members. + \note Linear time complexity. + */ + MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(data_.o.size > 0); + RAPIDJSON_ASSERT(GetMembersPointer() != 0); + RAPIDJSON_ASSERT(first >= MemberBegin()); + RAPIDJSON_ASSERT(first <= last); + RAPIDJSON_ASSERT(last <= MemberEnd()); + + MemberIterator pos = MemberBegin() + (first - MemberBegin()); + for (MemberIterator itr = pos; itr != last; ++itr) + itr->~Member(); + std::memmove(&*pos, &*last, static_cast(MemberEnd() - last) * sizeof(Member)); + data_.o.size -= static_cast(last - first); + return pos; + } + + //! Erase a member in object by its name. + /*! \param name Name of member to be removed. + \return Whether the member existed. + \note Linear time complexity. + */ + bool EraseMember(const Ch* name) { + GenericValue n(StringRef(name)); + return EraseMember(n); + } + +#if RAPIDJSON_HAS_STDSTRING + bool EraseMember(const std::basic_string& name) { return EraseMember(GenericValue(StringRef(name))); } +#endif + + template + bool EraseMember(const GenericValue& name) { + MemberIterator m = FindMember(name); + if (m != MemberEnd()) { + EraseMember(m); + return true; + } + else + return false; + } + + Object GetObject() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); } + ConstObject GetObject() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); } + + //@} + + //!@name Array + //@{ + + //! Set this value as an empty array. + /*! \post IsArray == true */ + GenericValue& SetArray() { this->~GenericValue(); new (this) GenericValue(kArrayType); return *this; } + + //! Get the number of elements in array. + SizeType Size() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size; } + + //! Get the capacity of array. + SizeType Capacity() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.capacity; } + + //! Check whether the array is empty. + bool Empty() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size == 0; } + + //! Remove all elements in the array. + /*! This function do not deallocate memory in the array, i.e. the capacity is unchanged. + \note Linear time complexity. + */ + void Clear() { + RAPIDJSON_ASSERT(IsArray()); + GenericValue* e = GetElementsPointer(); + for (GenericValue* v = e; v != e + data_.a.size; ++v) + v->~GenericValue(); + data_.a.size = 0; + } + + //! Get an element from array by index. + /*! \pre IsArray() == true + \param index Zero-based index of element. + \see operator[](T*) + */ + GenericValue& operator[](SizeType index) { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(index < data_.a.size); + return GetElementsPointer()[index]; + } + const GenericValue& operator[](SizeType index) const { return const_cast(*this)[index]; } + + //! Element iterator + /*! \pre IsArray() == true */ + ValueIterator Begin() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer(); } + //! \em Past-the-end element iterator + /*! \pre IsArray() == true */ + ValueIterator End() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer() + data_.a.size; } + //! Constant element iterator + /*! \pre IsArray() == true */ + ConstValueIterator Begin() const { return const_cast(*this).Begin(); } + //! Constant \em past-the-end element iterator + /*! \pre IsArray() == true */ + ConstValueIterator End() const { return const_cast(*this).End(); } + + //! Request the array to have enough capacity to store elements. + /*! \param newCapacity The capacity that the array at least need to have. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note Linear time complexity. + */ + GenericValue& Reserve(SizeType newCapacity, Allocator &allocator) { + RAPIDJSON_ASSERT(IsArray()); + if (newCapacity > data_.a.capacity) { + SetElementsPointer(reinterpret_cast(allocator.Realloc(GetElementsPointer(), data_.a.capacity * sizeof(GenericValue), newCapacity * sizeof(GenericValue)))); + data_.a.capacity = newCapacity; + } + return *this; + } + + //! Append a GenericValue at the end of the array. + /*! \param value Value to be appended. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \post value.IsNull() == true + \return The value itself for fluent API. + \note The ownership of \c value will be transferred to this array on success. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + \note Amortized constant time complexity. + */ + GenericValue& PushBack(GenericValue& value, Allocator& allocator) { + RAPIDJSON_ASSERT(IsArray()); + if (data_.a.size >= data_.a.capacity) + Reserve(data_.a.capacity == 0 ? kDefaultArrayCapacity : (data_.a.capacity + (data_.a.capacity + 1) / 2), allocator); + GetElementsPointer()[data_.a.size++].RawAssign(value); + return *this; + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericValue& PushBack(GenericValue&& value, Allocator& allocator) { + return PushBack(value, allocator); + } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + + //! Append a constant string reference at the end of the array. + /*! \param value Constant string reference to be appended. + \param allocator Allocator for reallocating memory. It must be the same one used previously. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \return The value itself for fluent API. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + \note Amortized constant time complexity. + \see GenericStringRef + */ + GenericValue& PushBack(StringRefType value, Allocator& allocator) { + return (*this).template PushBack(value, allocator); + } + + //! Append a primitive value at the end of the array. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param value Value of primitive type T to be appended. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \return The value itself for fluent API. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref PushBack(GenericValue&, Allocator&) or \ref + PushBack(StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + PushBack(T value, Allocator& allocator) { + GenericValue v(value); + return PushBack(v, allocator); + } + + //! Remove the last element in the array. + /*! + \note Constant time complexity. + */ + GenericValue& PopBack() { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(!Empty()); + GetElementsPointer()[--data_.a.size].~GenericValue(); + return *this; + } + + //! Remove an element of array by iterator. + /*! + \param pos iterator to the element to remove + \pre IsArray() == true && \ref Begin() <= \c pos < \ref End() + \return Iterator following the removed element. If the iterator pos refers to the last element, the End() iterator is returned. + \note Linear time complexity. + */ + ValueIterator Erase(ConstValueIterator pos) { + return Erase(pos, pos + 1); + } + + //! Remove elements in the range [first, last) of the array. + /*! + \param first iterator to the first element to remove + \param last iterator following the last element to remove + \pre IsArray() == true && \ref Begin() <= \c first <= \c last <= \ref End() + \return Iterator following the last removed element. + \note Linear time complexity. + */ + ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(data_.a.size > 0); + RAPIDJSON_ASSERT(GetElementsPointer() != 0); + RAPIDJSON_ASSERT(first >= Begin()); + RAPIDJSON_ASSERT(first <= last); + RAPIDJSON_ASSERT(last <= End()); + ValueIterator pos = Begin() + (first - Begin()); + for (ValueIterator itr = pos; itr != last; ++itr) + itr->~GenericValue(); + std::memmove(pos, last, static_cast(End() - last) * sizeof(GenericValue)); + data_.a.size -= static_cast(last - first); + return pos; + } + + Array GetArray() { RAPIDJSON_ASSERT(IsArray()); return Array(*this); } + ConstArray GetArray() const { RAPIDJSON_ASSERT(IsArray()); return ConstArray(*this); } + + //@} + + //!@name Number + //@{ + + int GetInt() const { RAPIDJSON_ASSERT(data_.f.flags & kIntFlag); return data_.n.i.i; } + unsigned GetUint() const { RAPIDJSON_ASSERT(data_.f.flags & kUintFlag); return data_.n.u.u; } + int64_t GetInt64() const { RAPIDJSON_ASSERT(data_.f.flags & kInt64Flag); return data_.n.i64; } + uint64_t GetUint64() const { RAPIDJSON_ASSERT(data_.f.flags & kUint64Flag); return data_.n.u64; } + + //! Get the value as double type. + /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessDouble() to check whether the converison is lossless. + */ + double GetDouble() const { + RAPIDJSON_ASSERT(IsNumber()); + if ((data_.f.flags & kDoubleFlag) != 0) return data_.n.d; // exact type, no conversion. + if ((data_.f.flags & kIntFlag) != 0) return data_.n.i.i; // int -> double + if ((data_.f.flags & kUintFlag) != 0) return data_.n.u.u; // unsigned -> double + if ((data_.f.flags & kInt64Flag) != 0) return static_cast(data_.n.i64); // int64_t -> double (may lose precision) + RAPIDJSON_ASSERT((data_.f.flags & kUint64Flag) != 0); return static_cast(data_.n.u64); // uint64_t -> double (may lose precision) + } + + //! Get the value as float type. + /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessFloat() to check whether the converison is lossless. + */ + float GetFloat() const { + return static_cast(GetDouble()); + } + + GenericValue& SetInt(int i) { this->~GenericValue(); new (this) GenericValue(i); return *this; } + GenericValue& SetUint(unsigned u) { this->~GenericValue(); new (this) GenericValue(u); return *this; } + GenericValue& SetInt64(int64_t i64) { this->~GenericValue(); new (this) GenericValue(i64); return *this; } + GenericValue& SetUint64(uint64_t u64) { this->~GenericValue(); new (this) GenericValue(u64); return *this; } + GenericValue& SetDouble(double d) { this->~GenericValue(); new (this) GenericValue(d); return *this; } + GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(f); return *this; } + + //@} + + //!@name String + //@{ + + const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return (data_.f.flags & kInlineStrFlag) ? data_.ss.str : GetStringPointer(); } + + //! Get the length of string. + /*! Since rapidjson permits "\\u0000" in the json string, strlen(v.GetString()) may not equal to v.GetStringLength(). + */ + SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return ((data_.f.flags & kInlineStrFlag) ? (data_.ss.GetLength()) : data_.s.length); } + + //! Set this value as a string without copying source string. + /*! This version has better performance with supplied length, and also support string containing null character. + \param s source string pointer. + \param length The length of source string, excluding the trailing null terminator. + \return The value itself for fluent API. + \post IsString() == true && GetString() == s && GetStringLength() == length + \see SetString(StringRefType) + */ + GenericValue& SetString(const Ch* s, SizeType length) { return SetString(StringRef(s, length)); } + + //! Set this value as a string without copying source string. + /*! \param s source string reference + \return The value itself for fluent API. + \post IsString() == true && GetString() == s && GetStringLength() == s.length + */ + GenericValue& SetString(StringRefType s) { this->~GenericValue(); SetStringRaw(s); return *this; } + + //! Set this value as a string by copying from source string. + /*! This version has better performance with supplied length, and also support string containing null character. + \param s source string. + \param length The length of source string, excluding the trailing null terminator. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { this->~GenericValue(); SetStringRaw(StringRef(s, length), allocator); return *this; } + + //! Set this value as a string by copying from source string. + /*! \param s source string. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(s, internal::StrLen(s), allocator); } + +#if RAPIDJSON_HAS_STDSTRING + //! Set this value as a string by copying from source string. + /*! \param s source string. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s.data() && strcmp(GetString(),s.data() == 0 && GetStringLength() == s.size() + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + GenericValue& SetString(const std::basic_string& s, Allocator& allocator) { return SetString(s.data(), SizeType(s.size()), allocator); } +#endif + + //@} + + //!@name Array + //@{ + + //! Templated version for checking whether this value is type T. + /*! + \tparam T Either \c bool, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c float, \c const \c char*, \c std::basic_string + */ + template + bool Is() const { return internal::TypeHelper::Is(*this); } + + template + T Get() const { return internal::TypeHelper::Get(*this); } + + template + T Get() { return internal::TypeHelper::Get(*this); } + + template + ValueType& Set(const T& data) { return internal::TypeHelper::Set(*this, data); } + + template + ValueType& Set(const T& data, AllocatorType& allocator) { return internal::TypeHelper::Set(*this, data, allocator); } + + //@} + + //! Generate events of this value to a Handler. + /*! This function adopts the GoF visitor pattern. + Typical usage is to output this JSON value as JSON text via Writer, which is a Handler. + It can also be used to deep clone this value via GenericDocument, which is also a Handler. + \tparam Handler type of handler. + \param handler An object implementing concept Handler. + */ + template + bool Accept(Handler& handler) const { + switch(GetType()) { + case kNullType: return handler.Null(); + case kFalseType: return handler.Bool(false); + case kTrueType: return handler.Bool(true); + + case kObjectType: + if (RAPIDJSON_UNLIKELY(!handler.StartObject())) + return false; + for (ConstMemberIterator m = MemberBegin(); m != MemberEnd(); ++m) { + RAPIDJSON_ASSERT(m->name.IsString()); // User may change the type of name by MemberIterator. + if (RAPIDJSON_UNLIKELY(!handler.Key(m->name.GetString(), m->name.GetStringLength(), (m->name.data_.f.flags & kCopyFlag) != 0))) + return false; + if (RAPIDJSON_UNLIKELY(!m->value.Accept(handler))) + return false; + } + return handler.EndObject(data_.o.size); + + case kArrayType: + if (RAPIDJSON_UNLIKELY(!handler.StartArray())) + return false; + for (const GenericValue* v = Begin(); v != End(); ++v) + if (RAPIDJSON_UNLIKELY(!v->Accept(handler))) + return false; + return handler.EndArray(data_.a.size); + + case kStringType: + return handler.String(GetString(), GetStringLength(), (data_.f.flags & kCopyFlag) != 0); + + default: + RAPIDJSON_ASSERT(GetType() == kNumberType); + if (IsDouble()) return handler.Double(data_.n.d); + else if (IsInt()) return handler.Int(data_.n.i.i); + else if (IsUint()) return handler.Uint(data_.n.u.u); + else if (IsInt64()) return handler.Int64(data_.n.i64); + else return handler.Uint64(data_.n.u64); + } + } + +private: + template friend class GenericValue; + template friend class GenericDocument; + + enum { + kBoolFlag = 0x0008, + kNumberFlag = 0x0010, + kIntFlag = 0x0020, + kUintFlag = 0x0040, + kInt64Flag = 0x0080, + kUint64Flag = 0x0100, + kDoubleFlag = 0x0200, + kStringFlag = 0x0400, + kCopyFlag = 0x0800, + kInlineStrFlag = 0x1000, + + // Initial flags of different types. + kNullFlag = kNullType, + kTrueFlag = kTrueType | kBoolFlag, + kFalseFlag = kFalseType | kBoolFlag, + kNumberIntFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag, + kNumberUintFlag = kNumberType | kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag, + kNumberInt64Flag = kNumberType | kNumberFlag | kInt64Flag, + kNumberUint64Flag = kNumberType | kNumberFlag | kUint64Flag, + kNumberDoubleFlag = kNumberType | kNumberFlag | kDoubleFlag, + kNumberAnyFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag, + kConstStringFlag = kStringType | kStringFlag, + kCopyStringFlag = kStringType | kStringFlag | kCopyFlag, + kShortStringFlag = kStringType | kStringFlag | kCopyFlag | kInlineStrFlag, + kObjectFlag = kObjectType, + kArrayFlag = kArrayType, + + kTypeMask = 0x07 + }; + + static const SizeType kDefaultArrayCapacity = 16; + static const SizeType kDefaultObjectCapacity = 16; + + struct Flag { +#if RAPIDJSON_48BITPOINTER_OPTIMIZATION + char payload[sizeof(SizeType) * 2 + 6]; // 2 x SizeType + lower 48-bit pointer +#elif RAPIDJSON_64BIT + char payload[sizeof(SizeType) * 2 + sizeof(void*) + 6]; // 6 padding bytes +#else + char payload[sizeof(SizeType) * 2 + sizeof(void*) + 2]; // 2 padding bytes +#endif + uint16_t flags; + }; + + struct String { + SizeType length; + SizeType hashcode; //!< reserved + const Ch* str; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + // implementation detail: ShortString can represent zero-terminated strings up to MaxSize chars + // (excluding the terminating zero) and store a value to determine the length of the contained + // string in the last character str[LenPos] by storing "MaxSize - length" there. If the string + // to store has the maximal length of MaxSize then str[LenPos] will be 0 and therefore act as + // the string terminator as well. For getting the string length back from that value just use + // "MaxSize - str[LenPos]". + // This allows to store 13-chars strings in 32-bit mode, 21-chars strings in 64-bit mode, + // 13-chars strings for RAPIDJSON_48BITPOINTER_OPTIMIZATION=1 inline (for `UTF8`-encoded strings). + struct ShortString { + enum { MaxChars = sizeof(static_cast(0)->payload) / sizeof(Ch), MaxSize = MaxChars - 1, LenPos = MaxSize }; + Ch str[MaxChars]; + + inline static bool Usable(SizeType len) { return (MaxSize >= len); } + inline void SetLength(SizeType len) { str[LenPos] = static_cast(MaxSize - len); } + inline SizeType GetLength() const { return static_cast(MaxSize - str[LenPos]); } + }; // at most as many bytes as "String" above => 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + // By using proper binary layout, retrieval of different integer types do not need conversions. + union Number { +#if RAPIDJSON_ENDIAN == RAPIDJSON_LITTLEENDIAN + struct I { + int i; + char padding[4]; + }i; + struct U { + unsigned u; + char padding2[4]; + }u; +#else + struct I { + char padding[4]; + int i; + }i; + struct U { + char padding2[4]; + unsigned u; + }u; +#endif + int64_t i64; + uint64_t u64; + double d; + }; // 8 bytes + + struct ObjectData { + SizeType size; + SizeType capacity; + Member* members; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + struct ArrayData { + SizeType size; + SizeType capacity; + GenericValue* elements; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + union Data { + String s; + ShortString ss; + Number n; + ObjectData o; + ArrayData a; + Flag f; + }; // 16 bytes in 32-bit mode, 24 bytes in 64-bit mode, 16 bytes in 64-bit with RAPIDJSON_48BITPOINTER_OPTIMIZATION + + RAPIDJSON_FORCEINLINE const Ch* GetStringPointer() const { return RAPIDJSON_GETPOINTER(Ch, data_.s.str); } + RAPIDJSON_FORCEINLINE const Ch* SetStringPointer(const Ch* str) { return RAPIDJSON_SETPOINTER(Ch, data_.s.str, str); } + RAPIDJSON_FORCEINLINE GenericValue* GetElementsPointer() const { return RAPIDJSON_GETPOINTER(GenericValue, data_.a.elements); } + RAPIDJSON_FORCEINLINE GenericValue* SetElementsPointer(GenericValue* elements) { return RAPIDJSON_SETPOINTER(GenericValue, data_.a.elements, elements); } + RAPIDJSON_FORCEINLINE Member* GetMembersPointer() const { return RAPIDJSON_GETPOINTER(Member, data_.o.members); } + RAPIDJSON_FORCEINLINE Member* SetMembersPointer(Member* members) { return RAPIDJSON_SETPOINTER(Member, data_.o.members, members); } + + // Initialize this value as array with initial data, without calling destructor. + void SetArrayRaw(GenericValue* values, SizeType count, Allocator& allocator) { + data_.f.flags = kArrayFlag; + if (count) { + GenericValue* e = static_cast(allocator.Malloc(count * sizeof(GenericValue))); + SetElementsPointer(e); + std::memcpy(e, values, count * sizeof(GenericValue)); + } + else + SetElementsPointer(0); + data_.a.size = data_.a.capacity = count; + } + + //! Initialize this value as object with initial data, without calling destructor. + void SetObjectRaw(Member* members, SizeType count, Allocator& allocator) { + data_.f.flags = kObjectFlag; + if (count) { + Member* m = static_cast(allocator.Malloc(count * sizeof(Member))); + SetMembersPointer(m); + std::memcpy(m, members, count * sizeof(Member)); + } + else + SetMembersPointer(0); + data_.o.size = data_.o.capacity = count; + } + + //! Initialize this value as constant string, without calling destructor. + void SetStringRaw(StringRefType s) RAPIDJSON_NOEXCEPT { + data_.f.flags = kConstStringFlag; + SetStringPointer(s); + data_.s.length = s.length; + } + + //! Initialize this value as copy string with initial data, without calling destructor. + void SetStringRaw(StringRefType s, Allocator& allocator) { + Ch* str = 0; + if (ShortString::Usable(s.length)) { + data_.f.flags = kShortStringFlag; + data_.ss.SetLength(s.length); + str = data_.ss.str; + } else { + data_.f.flags = kCopyStringFlag; + data_.s.length = s.length; + str = static_cast(allocator.Malloc((s.length + 1) * sizeof(Ch))); + SetStringPointer(str); + } + std::memcpy(str, s, s.length * sizeof(Ch)); + str[s.length] = '\0'; + } + + //! Assignment without calling destructor + void RawAssign(GenericValue& rhs) RAPIDJSON_NOEXCEPT { + data_ = rhs.data_; + // data_.f.flags = rhs.data_.f.flags; + rhs.data_.f.flags = kNullFlag; + } + + template + bool StringEqual(const GenericValue& rhs) const { + RAPIDJSON_ASSERT(IsString()); + RAPIDJSON_ASSERT(rhs.IsString()); + + const SizeType len1 = GetStringLength(); + const SizeType len2 = rhs.GetStringLength(); + if(len1 != len2) { return false; } + + const Ch* const str1 = GetString(); + const Ch* const str2 = rhs.GetString(); + if(str1 == str2) { return true; } // fast path for constant string + + return (std::memcmp(str1, str2, sizeof(Ch) * len1) == 0); + } + + Data data_; +}; + +//! GenericValue with UTF8 encoding +typedef GenericValue > Value; + +/////////////////////////////////////////////////////////////////////////////// +// GenericDocument + +//! A document for parsing JSON text as DOM. +/*! + \note implements Handler concept + \tparam Encoding Encoding for both parsing and string storage. + \tparam Allocator Allocator for allocating memory for the DOM + \tparam StackAllocator Allocator for allocating memory for stack during parsing. + \warning Although GenericDocument inherits from GenericValue, the API does \b not provide any virtual functions, especially no virtual destructor. To avoid memory leaks, do not \c delete a GenericDocument object via a pointer to a GenericValue. +*/ +template , typename StackAllocator = CrtAllocator> +class GenericDocument : public GenericValue { +public: + typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. + typedef GenericValue ValueType; //!< Value type of the document. + typedef Allocator AllocatorType; //!< Allocator type from template parameter. + + //! Constructor + /*! Creates an empty document of specified type. + \param type Mandatory type of object to create. + \param allocator Optional allocator for allocating memory. + \param stackCapacity Optional initial capacity of stack in bytes. + \param stackAllocator Optional allocator for allocating memory for stack. + */ + explicit GenericDocument(Type type, Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : + GenericValue(type), allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + } + + //! Constructor + /*! Creates an empty document which type is Null. + \param allocator Optional allocator for allocating memory. + \param stackCapacity Optional initial capacity of stack in bytes. + \param stackAllocator Optional allocator for allocating memory for stack. + */ + GenericDocument(Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : + allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericDocument(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT + : ValueType(std::forward(rhs)), // explicit cast to avoid prohibited move from Document + allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + stack_(std::move(rhs.stack_)), + parseResult_(rhs.parseResult_) + { + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.parseResult_ = ParseResult(); + } +#endif + + ~GenericDocument() { + Destroy(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move assignment in C++11 + GenericDocument& operator=(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT + { + // The cast to ValueType is necessary here, because otherwise it would + // attempt to call GenericValue's templated assignment operator. + ValueType::operator=(std::forward(rhs)); + + // Calling the destructor here would prematurely call stack_'s destructor + Destroy(); + + allocator_ = rhs.allocator_; + ownAllocator_ = rhs.ownAllocator_; + stack_ = std::move(rhs.stack_); + parseResult_ = rhs.parseResult_; + + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.parseResult_ = ParseResult(); + + return *this; + } +#endif + + //! Exchange the contents of this document with those of another. + /*! + \param rhs Another document. + \note Constant complexity. + \see GenericValue::Swap + */ + GenericDocument& Swap(GenericDocument& rhs) RAPIDJSON_NOEXCEPT { + ValueType::Swap(rhs); + stack_.Swap(rhs.stack_); + internal::Swap(allocator_, rhs.allocator_); + internal::Swap(ownAllocator_, rhs.ownAllocator_); + internal::Swap(parseResult_, rhs.parseResult_); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.doc, b.doc); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericDocument& a, GenericDocument& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //! Populate this document by a generator which produces SAX events. + /*! \tparam Generator A functor with bool f(Handler) prototype. + \param g Generator functor which sends SAX events to the parameter. + \return The document itself for fluent API. + */ + template + GenericDocument& Populate(Generator& g) { + ClearStackOnExit scope(*this); + if (g(*this)) { + RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object + ValueType::operator=(*stack_.template Pop(1));// Move value from stack to document + } + return *this; + } + + //!@name Parse from stream + //!@{ + + //! Parse JSON text from an input stream (with Encoding conversion) + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam SourceEncoding Encoding of input stream + \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + GenericReader reader( + stack_.HasAllocator() ? &stack_.GetAllocator() : 0); + ClearStackOnExit scope(*this); + parseResult_ = reader.template Parse(is, *this); + if (parseResult_) { + RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object + ValueType::operator=(*stack_.template Pop(1));// Move value from stack to document + } + return *this; + } + + //! Parse JSON text from an input stream + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + return ParseStream(is); + } + + //! Parse JSON text from an input stream (with \ref kParseDefaultFlags) + /*! \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + return ParseStream(is); + } + //!@} + + //!@name Parse in-place from mutable string + //!@{ + + //! Parse JSON text from a mutable string + /*! \tparam parseFlags Combination of \ref ParseFlag. + \param str Mutable zero-terminated string to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseInsitu(Ch* str) { + GenericInsituStringStream s(str); + return ParseStream(s); + } + + //! Parse JSON text from a mutable string (with \ref kParseDefaultFlags) + /*! \param str Mutable zero-terminated string to be parsed. + \return The document itself for fluent API. + */ + GenericDocument& ParseInsitu(Ch* str) { + return ParseInsitu(str); + } + //!@} + + //!@name Parse from read-only string + //!@{ + + //! Parse JSON text from a read-only string (with Encoding conversion) + /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). + \tparam SourceEncoding Transcoding from input Encoding + \param str Read-only zero-terminated string to be parsed. + */ + template + GenericDocument& Parse(const typename SourceEncoding::Ch* str) { + RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); + GenericStringStream s(str); + return ParseStream(s); + } + + //! Parse JSON text from a read-only string + /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). + \param str Read-only zero-terminated string to be parsed. + */ + template + GenericDocument& Parse(const Ch* str) { + return Parse(str); + } + + //! Parse JSON text from a read-only string (with \ref kParseDefaultFlags) + /*! \param str Read-only zero-terminated string to be parsed. + */ + GenericDocument& Parse(const Ch* str) { + return Parse(str); + } + + template + GenericDocument& Parse(const typename SourceEncoding::Ch* str, size_t length) { + RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); + MemoryStream ms(static_cast(str), length * sizeof(typename SourceEncoding::Ch)); + EncodedInputStream is(ms); + ParseStream(is); + return *this; + } + + template + GenericDocument& Parse(const Ch* str, size_t length) { + return Parse(str, length); + } + + GenericDocument& Parse(const Ch* str, size_t length) { + return Parse(str, length); + } + +#if RAPIDJSON_HAS_STDSTRING + template + GenericDocument& Parse(const std::basic_string& str) { + // c_str() is constant complexity according to standard. Should be faster than Parse(const char*, size_t) + return Parse(str.c_str()); + } + + template + GenericDocument& Parse(const std::basic_string& str) { + return Parse(str.c_str()); + } + + GenericDocument& Parse(const std::basic_string& str) { + return Parse(str); + } +#endif // RAPIDJSON_HAS_STDSTRING + + //!@} + + //!@name Handling parse errors + //!@{ + + //! Whether a parse error has occured in the last parsing. + bool HasParseError() const { return parseResult_.IsError(); } + + //! Get the \ref ParseErrorCode of last parsing. + ParseErrorCode GetParseError() const { return parseResult_.Code(); } + + //! Get the position of last parsing error in input, 0 otherwise. + size_t GetErrorOffset() const { return parseResult_.Offset(); } + + //! Implicit conversion to get the last parse result +#ifndef __clang // -Wdocumentation + /*! \return \ref ParseResult of the last parse operation + + \code + Document doc; + ParseResult ok = doc.Parse(json); + if (!ok) + printf( "JSON parse error: %s (%u)\n", GetParseError_En(ok.Code()), ok.Offset()); + \endcode + */ +#endif + operator ParseResult() const { return parseResult_; } + //!@} + + //! Get the allocator of this document. + Allocator& GetAllocator() { + RAPIDJSON_ASSERT(allocator_); + return *allocator_; + } + + //! Get the capacity of stack in bytes. + size_t GetStackCapacity() const { return stack_.GetCapacity(); } + +private: + // clear stack on any exit from ParseStream, e.g. due to exception + struct ClearStackOnExit { + explicit ClearStackOnExit(GenericDocument& d) : d_(d) {} + ~ClearStackOnExit() { d_.ClearStack(); } + private: + ClearStackOnExit(const ClearStackOnExit&); + ClearStackOnExit& operator=(const ClearStackOnExit&); + GenericDocument& d_; + }; + + // callers of the following private Handler functions + // template friend class GenericReader; // for parsing + template friend class GenericValue; // for deep copying + +public: + // Implementation of Handler + bool Null() { new (stack_.template Push()) ValueType(); return true; } + bool Bool(bool b) { new (stack_.template Push()) ValueType(b); return true; } + bool Int(int i) { new (stack_.template Push()) ValueType(i); return true; } + bool Uint(unsigned i) { new (stack_.template Push()) ValueType(i); return true; } + bool Int64(int64_t i) { new (stack_.template Push()) ValueType(i); return true; } + bool Uint64(uint64_t i) { new (stack_.template Push()) ValueType(i); return true; } + bool Double(double d) { new (stack_.template Push()) ValueType(d); return true; } + + bool RawNumber(const Ch* str, SizeType length, bool copy) { + if (copy) + new (stack_.template Push()) ValueType(str, length, GetAllocator()); + else + new (stack_.template Push()) ValueType(str, length); + return true; + } + + bool String(const Ch* str, SizeType length, bool copy) { + if (copy) + new (stack_.template Push()) ValueType(str, length, GetAllocator()); + else + new (stack_.template Push()) ValueType(str, length); + return true; + } + + bool StartObject() { new (stack_.template Push()) ValueType(kObjectType); return true; } + + bool Key(const Ch* str, SizeType length, bool copy) { return String(str, length, copy); } + + bool EndObject(SizeType memberCount) { + typename ValueType::Member* members = stack_.template Pop(memberCount); + stack_.template Top()->SetObjectRaw(members, memberCount, GetAllocator()); + return true; + } + + bool StartArray() { new (stack_.template Push()) ValueType(kArrayType); return true; } + + bool EndArray(SizeType elementCount) { + ValueType* elements = stack_.template Pop(elementCount); + stack_.template Top()->SetArrayRaw(elements, elementCount, GetAllocator()); + return true; + } + +private: + //! Prohibit copying + GenericDocument(const GenericDocument&); + //! Prohibit assignment + GenericDocument& operator=(const GenericDocument&); + + void ClearStack() { + if (Allocator::kNeedFree) + while (stack_.GetSize() > 0) // Here assumes all elements in stack array are GenericValue (Member is actually 2 GenericValue objects) + (stack_.template Pop(1))->~ValueType(); + else + stack_.Clear(); + stack_.ShrinkToFit(); + } + + void Destroy() { + RAPIDJSON_DELETE(ownAllocator_); + } + + static const size_t kDefaultStackCapacity = 1024; + Allocator* allocator_; + Allocator* ownAllocator_; + internal::Stack stack_; + ParseResult parseResult_; +}; + +//! GenericDocument with UTF8 encoding +typedef GenericDocument > Document; + +// defined here due to the dependency on GenericDocument +template +template +inline +GenericValue::GenericValue(const GenericValue& rhs, Allocator& allocator) +{ + switch (rhs.GetType()) { + case kObjectType: + case kArrayType: { // perform deep copy via SAX Handler + GenericDocument d(&allocator); + rhs.Accept(d); + RawAssign(*d.stack_.template Pop(1)); + } + break; + case kStringType: + if (rhs.data_.f.flags == kConstStringFlag) { + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + } else { + SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator); + } + break; + default: + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + break; + } +} + +//! Helper class for accessing Value of array type. +/*! + Instance of this helper class is obtained by \c GenericValue::GetArray(). + In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. +*/ +template +class GenericArray { +public: + typedef GenericArray ConstArray; + typedef GenericArray Array; + typedef ValueT PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef ValueType* ValueIterator; // This may be const or non-const iterator + typedef const ValueT* ConstValueIterator; + typedef typename ValueType::AllocatorType AllocatorType; + typedef typename ValueType::StringRefType StringRefType; + + template + friend class GenericValue; + + GenericArray(const GenericArray& rhs) : value_(rhs.value_) {} + GenericArray& operator=(const GenericArray& rhs) { value_ = rhs.value_; return *this; } + ~GenericArray() {} + + SizeType Size() const { return value_.Size(); } + SizeType Capacity() const { return value_.Capacity(); } + bool Empty() const { return value_.Empty(); } + void Clear() const { value_.Clear(); } + ValueType& operator[](SizeType index) const { return value_[index]; } + ValueIterator Begin() const { return value_.Begin(); } + ValueIterator End() const { return value_.End(); } + GenericArray Reserve(SizeType newCapacity, AllocatorType &allocator) const { value_.Reserve(newCapacity, allocator); return *this; } + GenericArray PushBack(ValueType& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericArray PushBack(ValueType&& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericArray PushBack(StringRefType value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (const GenericArray&)) PushBack(T value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } + GenericArray PopBack() const { value_.PopBack(); return *this; } + ValueIterator Erase(ConstValueIterator pos) const { return value_.Erase(pos); } + ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) const { return value_.Erase(first, last); } + +#if RAPIDJSON_HAS_CXX11_RANGE_FOR + ValueIterator begin() const { return value_.Begin(); } + ValueIterator end() const { return value_.End(); } +#endif + +private: + GenericArray(); + GenericArray(ValueType& value) : value_(value) {} + ValueType& value_; +}; + +//! Helper class for accessing Value of object type. +/*! + Instance of this helper class is obtained by \c GenericValue::GetObject(). + In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. +*/ +template +class GenericObject { +public: + typedef GenericObject ConstObject; + typedef GenericObject Object; + typedef ValueT PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef GenericMemberIterator MemberIterator; // This may be const or non-const iterator + typedef GenericMemberIterator ConstMemberIterator; + typedef typename ValueType::AllocatorType AllocatorType; + typedef typename ValueType::StringRefType StringRefType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename ValueType::Ch Ch; + + template + friend class GenericValue; + + GenericObject(const GenericObject& rhs) : value_(rhs.value_) {} + GenericObject& operator=(const GenericObject& rhs) { value_ = rhs.value_; return *this; } + ~GenericObject() {} + + SizeType MemberCount() const { return value_.MemberCount(); } + bool ObjectEmpty() const { return value_.ObjectEmpty(); } + template ValueType& operator[](T* name) const { return value_[name]; } + template ValueType& operator[](const GenericValue& name) const { return value_[name]; } +#if RAPIDJSON_HAS_STDSTRING + ValueType& operator[](const std::basic_string& name) const { return value_[name]; } +#endif + MemberIterator MemberBegin() const { return value_.MemberBegin(); } + MemberIterator MemberEnd() const { return value_.MemberEnd(); } + bool HasMember(const Ch* name) const { return value_.HasMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool HasMember(const std::basic_string& name) const { return value_.HasMember(name); } +#endif + template bool HasMember(const GenericValue& name) const { return value_.HasMember(name); } + MemberIterator FindMember(const Ch* name) const { return value_.FindMember(name); } + template MemberIterator FindMember(const GenericValue& name) const { return value_.FindMember(name); } +#if RAPIDJSON_HAS_STDSTRING + MemberIterator FindMember(const std::basic_string& name) const { return value_.FindMember(name); } +#endif + GenericObject AddMember(ValueType& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType& name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#if RAPIDJSON_HAS_STDSTRING + GenericObject AddMember(ValueType& name, std::basic_string& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#endif + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) AddMember(ValueType& name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericObject AddMember(ValueType&& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType&& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(StringRefType name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericObject AddMember(StringRefType name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(StringRefType name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericObject)) AddMember(StringRefType name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + void RemoveAllMembers() { return value_.RemoveAllMembers(); } + bool RemoveMember(const Ch* name) const { return value_.RemoveMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool RemoveMember(const std::basic_string& name) const { return value_.RemoveMember(name); } +#endif + template bool RemoveMember(const GenericValue& name) const { return value_.RemoveMember(name); } + MemberIterator RemoveMember(MemberIterator m) const { return value_.RemoveMember(m); } + MemberIterator EraseMember(ConstMemberIterator pos) const { return value_.EraseMember(pos); } + MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) const { return value_.EraseMember(first, last); } + bool EraseMember(const Ch* name) const { return value_.EraseMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool EraseMember(const std::basic_string& name) const { return EraseMember(ValueType(StringRef(name))); } +#endif + template bool EraseMember(const GenericValue& name) const { return value_.EraseMember(name); } + +#if RAPIDJSON_HAS_CXX11_RANGE_FOR + MemberIterator begin() const { return value_.MemberBegin(); } + MemberIterator end() const { return value_.MemberEnd(); } +#endif + +private: + GenericObject(); + GenericObject(ValueType& value) : value_(value) {} + ValueType& value_; +}; + +RAPIDJSON_NAMESPACE_END +RAPIDJSON_DIAG_POP + +#endif // RAPIDJSON_DOCUMENT_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/encodedstream.h b/sql-odbc/libraries/rapidjson/include/rapidjson/encodedstream.h new file mode 100644 index 0000000000..145068386a --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/encodedstream.h @@ -0,0 +1,299 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ENCODEDSTREAM_H_ +#define RAPIDJSON_ENCODEDSTREAM_H_ + +#include "stream.h" +#include "memorystream.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Input byte stream wrapper with a statically bound encoding. +/*! + \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. + \tparam InputByteStream Type of input byte stream. For example, FileReadStream. +*/ +template +class EncodedInputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); +public: + typedef typename Encoding::Ch Ch; + + EncodedInputStream(InputByteStream& is) : is_(is) { + current_ = Encoding::TakeBOM(is_); + } + + Ch Peek() const { return current_; } + Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; } + size_t Tell() const { return is_.Tell(); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + EncodedInputStream(const EncodedInputStream&); + EncodedInputStream& operator=(const EncodedInputStream&); + + InputByteStream& is_; + Ch current_; +}; + +//! Specialized for UTF8 MemoryStream. +template <> +class EncodedInputStream, MemoryStream> { +public: + typedef UTF8<>::Ch Ch; + + EncodedInputStream(MemoryStream& is) : is_(is) { + if (static_cast(is_.Peek()) == 0xEFu) is_.Take(); + if (static_cast(is_.Peek()) == 0xBBu) is_.Take(); + if (static_cast(is_.Peek()) == 0xBFu) is_.Take(); + } + Ch Peek() const { return is_.Peek(); } + Ch Take() { return is_.Take(); } + size_t Tell() const { return is_.Tell(); } + + // Not implemented + void Put(Ch) {} + void Flush() {} + Ch* PutBegin() { return 0; } + size_t PutEnd(Ch*) { return 0; } + + MemoryStream& is_; + +private: + EncodedInputStream(const EncodedInputStream&); + EncodedInputStream& operator=(const EncodedInputStream&); +}; + +//! Output byte stream wrapper with statically bound encoding. +/*! + \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. + \tparam OutputByteStream Type of input byte stream. For example, FileWriteStream. +*/ +template +class EncodedOutputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); +public: + typedef typename Encoding::Ch Ch; + + EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) { + if (putBOM) + Encoding::PutBOM(os_); + } + + void Put(Ch c) { Encoding::Put(os_, c); } + void Flush() { os_.Flush(); } + + // Not implemented + Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} + Ch Take() { RAPIDJSON_ASSERT(false); return 0;} + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + EncodedOutputStream(const EncodedOutputStream&); + EncodedOutputStream& operator=(const EncodedOutputStream&); + + OutputByteStream& os_; +}; + +#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x + +//! Input stream wrapper with dynamically bound encoding and automatic encoding detection. +/*! + \tparam CharType Type of character for reading. + \tparam InputByteStream type of input byte stream to be wrapped. +*/ +template +class AutoUTFInputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); +public: + typedef CharType Ch; + + //! Constructor. + /*! + \param is input stream to be wrapped. + \param type UTF encoding type if it is not detected from the stream. + */ + AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) { + RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); + DetectType(); + static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) }; + takeFunc_ = f[type_]; + current_ = takeFunc_(*is_); + } + + UTFType GetType() const { return type_; } + bool HasBOM() const { return hasBOM_; } + + Ch Peek() const { return current_; } + Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; } + size_t Tell() const { return is_->Tell(); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + AutoUTFInputStream(const AutoUTFInputStream&); + AutoUTFInputStream& operator=(const AutoUTFInputStream&); + + // Detect encoding type with BOM or RFC 4627 + void DetectType() { + // BOM (Byte Order Mark): + // 00 00 FE FF UTF-32BE + // FF FE 00 00 UTF-32LE + // FE FF UTF-16BE + // FF FE UTF-16LE + // EF BB BF UTF-8 + + const unsigned char* c = reinterpret_cast(is_->Peek4()); + if (!c) + return; + + unsigned bom = static_cast(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24)); + hasBOM_ = false; + if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } + else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } + else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); } + else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); } + else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); } + + // RFC 4627: Section 3 + // "Since the first two characters of a JSON text will always be ASCII + // characters [RFC0020], it is possible to determine whether an octet + // stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking + // at the pattern of nulls in the first four octets." + // 00 00 00 xx UTF-32BE + // 00 xx 00 xx UTF-16BE + // xx 00 00 00 UTF-32LE + // xx 00 xx 00 UTF-16LE + // xx xx xx xx UTF-8 + + if (!hasBOM_) { + unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0); + switch (pattern) { + case 0x08: type_ = kUTF32BE; break; + case 0x0A: type_ = kUTF16BE; break; + case 0x01: type_ = kUTF32LE; break; + case 0x05: type_ = kUTF16LE; break; + case 0x0F: type_ = kUTF8; break; + default: break; // Use type defined by user. + } + } + + // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. + if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); + if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); + } + + typedef Ch (*TakeFunc)(InputByteStream& is); + InputByteStream* is_; + UTFType type_; + Ch current_; + TakeFunc takeFunc_; + bool hasBOM_; +}; + +//! Output stream wrapper with dynamically bound encoding and automatic encoding detection. +/*! + \tparam CharType Type of character for writing. + \tparam OutputByteStream type of output byte stream to be wrapped. +*/ +template +class AutoUTFOutputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); +public: + typedef CharType Ch; + + //! Constructor. + /*! + \param os output stream to be wrapped. + \param type UTF encoding type. + \param putBOM Whether to write BOM at the beginning of the stream. + */ + AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) { + RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); + + // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. + if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); + if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); + + static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) }; + putFunc_ = f[type_]; + + if (putBOM) + PutBOM(); + } + + UTFType GetType() const { return type_; } + + void Put(Ch c) { putFunc_(*os_, c); } + void Flush() { os_->Flush(); } + + // Not implemented + Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} + Ch Take() { RAPIDJSON_ASSERT(false); return 0;} + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + AutoUTFOutputStream(const AutoUTFOutputStream&); + AutoUTFOutputStream& operator=(const AutoUTFOutputStream&); + + void PutBOM() { + typedef void (*PutBOMFunc)(OutputByteStream&); + static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) }; + f[type_](*os_); + } + + typedef void (*PutFunc)(OutputByteStream&, Ch); + + OutputByteStream* os_; + UTFType type_; + PutFunc putFunc_; +}; + +#undef RAPIDJSON_ENCODINGS_FUNC + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/encodings.h b/sql-odbc/libraries/rapidjson/include/rapidjson/encodings.h new file mode 100644 index 0000000000..baa7c2b17f --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/encodings.h @@ -0,0 +1,716 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ENCODINGS_H_ +#define RAPIDJSON_ENCODINGS_H_ + +#include "rapidjson.h" + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data +RAPIDJSON_DIAG_OFF(4702) // unreachable code +#elif defined(__GNUC__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +RAPIDJSON_DIAG_OFF(overflow) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Encoding + +/*! \class rapidjson::Encoding + \brief Concept for encoding of Unicode characters. + +\code +concept Encoding { + typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition. + + enum { supportUnicode = 1 }; // or 0 if not supporting unicode + + //! \brief Encode a Unicode codepoint to an output stream. + //! \param os Output stream. + //! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively. + template + static void Encode(OutputStream& os, unsigned codepoint); + + //! \brief Decode a Unicode codepoint from an input stream. + //! \param is Input stream. + //! \param codepoint Output of the unicode codepoint. + //! \return true if a valid codepoint can be decoded from the stream. + template + static bool Decode(InputStream& is, unsigned* codepoint); + + //! \brief Validate one Unicode codepoint from an encoded stream. + //! \param is Input stream to obtain codepoint. + //! \param os Output for copying one codepoint. + //! \return true if it is valid. + //! \note This function just validating and copying the codepoint without actually decode it. + template + static bool Validate(InputStream& is, OutputStream& os); + + // The following functions are deal with byte streams. + + //! Take a character from input byte stream, skip BOM if exist. + template + static CharType TakeBOM(InputByteStream& is); + + //! Take a character from input byte stream. + template + static Ch Take(InputByteStream& is); + + //! Put BOM to output byte stream. + template + static void PutBOM(OutputByteStream& os); + + //! Put a character to output byte stream. + template + static void Put(OutputByteStream& os, Ch c); +}; +\endcode +*/ + +/////////////////////////////////////////////////////////////////////////////// +// UTF8 + +//! UTF-8 encoding. +/*! http://en.wikipedia.org/wiki/UTF-8 + http://tools.ietf.org/html/rfc3629 + \tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char. + \note implements Encoding concept +*/ +template +struct UTF8 { + typedef CharType Ch; + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + if (codepoint <= 0x7F) + os.Put(static_cast(codepoint & 0xFF)); + else if (codepoint <= 0x7FF) { + os.Put(static_cast(0xC0 | ((codepoint >> 6) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint & 0x3F)))); + } + else if (codepoint <= 0xFFFF) { + os.Put(static_cast(0xE0 | ((codepoint >> 12) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + os.Put(static_cast(0x80 | (codepoint & 0x3F))); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + os.Put(static_cast(0xF0 | ((codepoint >> 18) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint >> 12) & 0x3F))); + os.Put(static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + os.Put(static_cast(0x80 | (codepoint & 0x3F))); + } + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + if (codepoint <= 0x7F) + PutUnsafe(os, static_cast(codepoint & 0xFF)); + else if (codepoint <= 0x7FF) { + PutUnsafe(os, static_cast(0xC0 | ((codepoint >> 6) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint & 0x3F)))); + } + else if (codepoint <= 0xFFFF) { + PutUnsafe(os, static_cast(0xE0 | ((codepoint >> 12) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | (codepoint & 0x3F))); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + PutUnsafe(os, static_cast(0xF0 | ((codepoint >> 18) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 12) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | (codepoint & 0x3F))); + } + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { +#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast(c) & 0x3Fu) +#define TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define TAIL() COPY(); TRANS(0x70) + typename InputStream::Ch c = is.Take(); + if (!(c & 0x80)) { + *codepoint = static_cast(c); + return true; + } + + unsigned char type = GetRange(static_cast(c)); + if (type >= 32) { + *codepoint = 0; + } else { + *codepoint = (0xFF >> type) & static_cast(c); + } + bool result = true; + switch (type) { + case 2: TAIL(); return result; + case 3: TAIL(); TAIL(); return result; + case 4: COPY(); TRANS(0x50); TAIL(); return result; + case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; + case 6: TAIL(); TAIL(); TAIL(); return result; + case 10: COPY(); TRANS(0x20); TAIL(); return result; + case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; + default: return false; + } +#undef COPY +#undef TRANS +#undef TAIL + } + + template + static bool Validate(InputStream& is, OutputStream& os) { +#define COPY() os.Put(c = is.Take()) +#define TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define TAIL() COPY(); TRANS(0x70) + Ch c; + COPY(); + if (!(c & 0x80)) + return true; + + bool result = true; + switch (GetRange(static_cast(c))) { + case 2: TAIL(); return result; + case 3: TAIL(); TAIL(); return result; + case 4: COPY(); TRANS(0x50); TAIL(); return result; + case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; + case 6: TAIL(); TAIL(); TAIL(); return result; + case 10: COPY(); TRANS(0x20); TAIL(); return result; + case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; + default: return false; + } +#undef COPY +#undef TRANS +#undef TAIL + } + + static unsigned char GetRange(unsigned char c) { + // Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + // With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types. + static const unsigned char type[] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10, + 0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40, + 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, + 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, + 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, + 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, + }; + return type[c]; + } + + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + typename InputByteStream::Ch c = Take(is); + if (static_cast(c) != 0xEFu) return c; + c = is.Take(); + if (static_cast(c) != 0xBBu) return c; + c = is.Take(); + if (static_cast(c) != 0xBFu) return c; + c = is.Take(); + return c; + } + + template + static Ch Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + return static_cast(is.Take()); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xEFu)); + os.Put(static_cast(0xBBu)); + os.Put(static_cast(0xBFu)); + } + + template + static void Put(OutputByteStream& os, Ch c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// UTF16 + +//! UTF-16 encoding. +/*! http://en.wikipedia.org/wiki/UTF-16 + http://tools.ietf.org/html/rfc2781 + \tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead. + \note implements Encoding concept + + \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. + For streaming, use UTF16LE and UTF16BE, which handle endianness. +*/ +template +struct UTF16 { + typedef CharType Ch; + RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2); + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + if (codepoint <= 0xFFFF) { + RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair + os.Put(static_cast(codepoint)); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + unsigned v = codepoint - 0x10000; + os.Put(static_cast((v >> 10) | 0xD800)); + os.Put((v & 0x3FF) | 0xDC00); + } + } + + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + if (codepoint <= 0xFFFF) { + RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair + PutUnsafe(os, static_cast(codepoint)); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + unsigned v = codepoint - 0x10000; + PutUnsafe(os, static_cast((v >> 10) | 0xD800)); + PutUnsafe(os, (v & 0x3FF) | 0xDC00); + } + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); + typename InputStream::Ch c = is.Take(); + if (c < 0xD800 || c > 0xDFFF) { + *codepoint = static_cast(c); + return true; + } + else if (c <= 0xDBFF) { + *codepoint = (static_cast(c) & 0x3FF) << 10; + c = is.Take(); + *codepoint |= (static_cast(c) & 0x3FF); + *codepoint += 0x10000; + return c >= 0xDC00 && c <= 0xDFFF; + } + return false; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + typename InputStream::Ch c; + os.Put(static_cast(c = is.Take())); + if (c < 0xD800 || c > 0xDFFF) + return true; + else if (c <= 0xDBFF) { + os.Put(c = is.Take()); + return c >= 0xDC00 && c <= 0xDFFF; + } + return false; + } +}; + +//! UTF-16 little endian encoding. +template +struct UTF16LE : UTF16 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0xFEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())) << 8; + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFFu)); + os.Put(static_cast(0xFEu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(static_cast(c) & 0xFFu)); + os.Put(static_cast((static_cast(c) >> 8) & 0xFFu)); + } +}; + +//! UTF-16 big endian encoding. +template +struct UTF16BE : UTF16 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0xFEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(static_cast(is.Take())) << 8; + c |= static_cast(is.Take()); + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0xFFu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast((static_cast(c) >> 8) & 0xFFu)); + os.Put(static_cast(static_cast(c) & 0xFFu)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// UTF32 + +//! UTF-32 encoding. +/*! http://en.wikipedia.org/wiki/UTF-32 + \tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead. + \note implements Encoding concept + + \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. + For streaming, use UTF32LE and UTF32BE, which handle endianness. +*/ +template +struct UTF32 { + typedef CharType Ch; + RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4); + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + os.Put(codepoint); + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + PutUnsafe(os, codepoint); + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); + Ch c = is.Take(); + *codepoint = c; + return c <= 0x10FFFF; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); + Ch c; + os.Put(c = is.Take()); + return c <= 0x10FFFF; + } +}; + +//! UTF-32 little endian enocoding. +template +struct UTF32LE : UTF32 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0x0000FEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())) << 16; + c |= static_cast(static_cast(is.Take())) << 24; + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFFu)); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0x00u)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c & 0xFFu)); + os.Put(static_cast((c >> 8) & 0xFFu)); + os.Put(static_cast((c >> 16) & 0xFFu)); + os.Put(static_cast((c >> 24) & 0xFFu)); + } +}; + +//! UTF-32 big endian encoding. +template +struct UTF32BE : UTF32 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0x0000FEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(static_cast(is.Take())) << 24; + c |= static_cast(static_cast(is.Take())) << 16; + c |= static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())); + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0xFFu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast((c >> 24) & 0xFFu)); + os.Put(static_cast((c >> 16) & 0xFFu)); + os.Put(static_cast((c >> 8) & 0xFFu)); + os.Put(static_cast(c & 0xFFu)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// ASCII + +//! ASCII encoding. +/*! http://en.wikipedia.org/wiki/ASCII + \tparam CharType Code unit for storing 7-bit ASCII data. Default is char. + \note implements Encoding concept +*/ +template +struct ASCII { + typedef CharType Ch; + + enum { supportUnicode = 0 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_ASSERT(codepoint <= 0x7F); + os.Put(static_cast(codepoint & 0xFF)); + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_ASSERT(codepoint <= 0x7F); + PutUnsafe(os, static_cast(codepoint & 0xFF)); + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + uint8_t c = static_cast(is.Take()); + *codepoint = c; + return c <= 0X7F; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + uint8_t c = static_cast(is.Take()); + os.Put(static_cast(c)); + return c <= 0x7F; + } + + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + uint8_t c = static_cast(Take(is)); + return static_cast(c); + } + + template + static Ch Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + return static_cast(is.Take()); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + (void)os; + } + + template + static void Put(OutputByteStream& os, Ch c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// AutoUTF + +//! Runtime-specified UTF encoding type of a stream. +enum UTFType { + kUTF8 = 0, //!< UTF-8. + kUTF16LE = 1, //!< UTF-16 little endian. + kUTF16BE = 2, //!< UTF-16 big endian. + kUTF32LE = 3, //!< UTF-32 little endian. + kUTF32BE = 4 //!< UTF-32 big endian. +}; + +//! Dynamically select encoding according to stream's runtime-specified UTF encoding type. +/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType(). +*/ +template +struct AutoUTF { + typedef CharType Ch; + + enum { supportUnicode = 1 }; + +#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x + + template + RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) { + typedef void (*EncodeFunc)(OutputStream&, unsigned); + static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) }; + (*f[os.GetType()])(os, codepoint); + } + + template + RAPIDJSON_FORCEINLINE static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + typedef void (*EncodeFunc)(OutputStream&, unsigned); + static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) }; + (*f[os.GetType()])(os, codepoint); + } + + template + RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) { + typedef bool (*DecodeFunc)(InputStream&, unsigned*); + static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) }; + return (*f[is.GetType()])(is, codepoint); + } + + template + RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + typedef bool (*ValidateFunc)(InputStream&, OutputStream&); + static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) }; + return (*f[is.GetType()])(is, os); + } + +#undef RAPIDJSON_ENCODINGS_FUNC +}; + +/////////////////////////////////////////////////////////////////////////////// +// Transcoder + +//! Encoding conversion. +template +struct Transcoder { + //! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream. + template + RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { + unsigned codepoint; + if (!SourceEncoding::Decode(is, &codepoint)) + return false; + TargetEncoding::Encode(os, codepoint); + return true; + } + + template + RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + unsigned codepoint; + if (!SourceEncoding::Decode(is, &codepoint)) + return false; + TargetEncoding::EncodeUnsafe(os, codepoint); + return true; + } + + //! Validate one Unicode codepoint from an encoded stream. + template + RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + return Transcode(is, os); // Since source/target encoding is different, must transcode. + } +}; + +// Forward declaration. +template +inline void PutUnsafe(Stream& stream, typename Stream::Ch c); + +//! Specialization of Transcoder with same source and target encoding. +template +struct Transcoder { + template + RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { + os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class. + return true; + } + + template + RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class. + return true; + } + + template + RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + return Encoding::Validate(is, os); // source/target encoding are the same + } +}; + +RAPIDJSON_NAMESPACE_END + +#if defined(__GNUC__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/error/en.h b/sql-odbc/libraries/rapidjson/include/rapidjson/error/en.h new file mode 100644 index 0000000000..2db838bff2 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/error/en.h @@ -0,0 +1,74 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ERROR_EN_H_ +#define RAPIDJSON_ERROR_EN_H_ + +#include "error.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(covered-switch-default) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Maps error code of parsing into error message. +/*! + \ingroup RAPIDJSON_ERRORS + \param parseErrorCode Error code obtained in parsing. + \return the error message. + \note User can make a copy of this function for localization. + Using switch-case is safer for future modification of error codes. +*/ +inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) { + switch (parseErrorCode) { + case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error."); + + case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty."); + case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values."); + + case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value."); + + case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member."); + case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member."); + case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member."); + + case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element."); + + case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string."); + case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid."); + case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string."); + case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string."); + case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string."); + + case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double."); + case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number."); + case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number."); + + case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error."); + case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error."); + + default: return RAPIDJSON_ERROR_STRING("Unknown error."); + } +} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ERROR_EN_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/error/error.h b/sql-odbc/libraries/rapidjson/include/rapidjson/error/error.h new file mode 100644 index 0000000000..95cb31a72f --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/error/error.h @@ -0,0 +1,155 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ERROR_ERROR_H_ +#define RAPIDJSON_ERROR_ERROR_H_ + +#include "../rapidjson.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +/*! \file error.h */ + +/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */ + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ERROR_CHARTYPE + +//! Character type of error messages. +/*! \ingroup RAPIDJSON_ERRORS + The default character type is \c char. + On Windows, user can define this macro as \c TCHAR for supporting both + unicode/non-unicode settings. +*/ +#ifndef RAPIDJSON_ERROR_CHARTYPE +#define RAPIDJSON_ERROR_CHARTYPE char +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ERROR_STRING + +//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[]. +/*! \ingroup RAPIDJSON_ERRORS + By default this conversion macro does nothing. + On Windows, user can define this macro as \c _T(x) for supporting both + unicode/non-unicode settings. +*/ +#ifndef RAPIDJSON_ERROR_STRING +#define RAPIDJSON_ERROR_STRING(x) x +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// ParseErrorCode + +//! Error code of parsing. +/*! \ingroup RAPIDJSON_ERRORS + \see GenericReader::Parse, GenericReader::GetParseErrorCode +*/ +enum ParseErrorCode { + kParseErrorNone = 0, //!< No error. + + kParseErrorDocumentEmpty, //!< The document is empty. + kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values. + + kParseErrorValueInvalid, //!< Invalid value. + + kParseErrorObjectMissName, //!< Missing a name for object member. + kParseErrorObjectMissColon, //!< Missing a colon after a name of object member. + kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member. + + kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element. + + kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string. + kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid. + kParseErrorStringEscapeInvalid, //!< Invalid escape character in string. + kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string. + kParseErrorStringInvalidEncoding, //!< Invalid encoding in string. + + kParseErrorNumberTooBig, //!< Number too big to be stored in double. + kParseErrorNumberMissFraction, //!< Miss fraction part in number. + kParseErrorNumberMissExponent, //!< Miss exponent in number. + + kParseErrorTermination, //!< Parsing was terminated. + kParseErrorUnspecificSyntaxError //!< Unspecific syntax error. +}; + +//! Result of parsing (wraps ParseErrorCode) +/*! + \ingroup RAPIDJSON_ERRORS + \code + Document doc; + ParseResult ok = doc.Parse("[42]"); + if (!ok) { + fprintf(stderr, "JSON parse error: %s (%u)", + GetParseError_En(ok.Code()), ok.Offset()); + exit(EXIT_FAILURE); + } + \endcode + \see GenericReader::Parse, GenericDocument::Parse +*/ +struct ParseResult { +public: + //! Default constructor, no error. + ParseResult() : code_(kParseErrorNone), offset_(0) {} + //! Constructor to set an error. + ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {} + + //! Get the error code. + ParseErrorCode Code() const { return code_; } + //! Get the error offset, if \ref IsError(), 0 otherwise. + size_t Offset() const { return offset_; } + + //! Conversion to \c bool, returns \c true, iff !\ref IsError(). + operator bool() const { return !IsError(); } + //! Whether the result is an error. + bool IsError() const { return code_ != kParseErrorNone; } + + bool operator==(const ParseResult& that) const { return code_ == that.code_; } + bool operator==(ParseErrorCode code) const { return code_ == code; } + friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; } + + //! Reset error code. + void Clear() { Set(kParseErrorNone); } + //! Update error code and offset. + void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; } + +private: + ParseErrorCode code_; + size_t offset_; +}; + +//! Function pointer type of GetParseError(). +/*! \ingroup RAPIDJSON_ERRORS + + This is the prototype for \c GetParseError_X(), where \c X is a locale. + User can dynamically change locale in runtime, e.g.: +\code + GetParseErrorFunc GetParseError = GetParseError_En; // or whatever + const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode()); +\endcode +*/ +typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode); + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ERROR_ERROR_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/filereadstream.h b/sql-odbc/libraries/rapidjson/include/rapidjson/filereadstream.h new file mode 100644 index 0000000000..b56ea13b34 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/filereadstream.h @@ -0,0 +1,99 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FILEREADSTREAM_H_ +#define RAPIDJSON_FILEREADSTREAM_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(missing-noreturn) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! File byte stream for input using fread(). +/*! + \note implements Stream concept +*/ +class FileReadStream { +public: + typedef char Ch; //!< Character type (byte). + + //! Constructor. + /*! + \param fp File pointer opened for read. + \param buffer user-supplied buffer. + \param bufferSize size of buffer in bytes. Must >=4 bytes. + */ + FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + RAPIDJSON_ASSERT(fp_ != 0); + RAPIDJSON_ASSERT(bufferSize >= 4); + Read(); + } + + Ch Peek() const { return *current_; } + Ch Take() { Ch c = *current_; Read(); return c; } + size_t Tell() const { return count_ + static_cast(current_ - buffer_); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return (current_ + 4 <= bufferLast_) ? current_ : 0; + } + +private: + void Read() { + if (current_ < bufferLast_) + ++current_; + else if (!eof_) { + count_ += readCount_; + readCount_ = fread(buffer_, 1, bufferSize_, fp_); + bufferLast_ = buffer_ + readCount_ - 1; + current_ = buffer_; + + if (readCount_ < bufferSize_) { + buffer_[readCount_] = '\0'; + ++bufferLast_; + eof_ = true; + } + } + } + + std::FILE* fp_; + Ch *buffer_; + size_t bufferSize_; + Ch *bufferLast_; + Ch *current_; + size_t readCount_; + size_t count_; //!< Number of characters read + bool eof_; +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/filewritestream.h b/sql-odbc/libraries/rapidjson/include/rapidjson/filewritestream.h new file mode 100644 index 0000000000..6378dd60ed --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/filewritestream.h @@ -0,0 +1,104 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FILEWRITESTREAM_H_ +#define RAPIDJSON_FILEWRITESTREAM_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(unreachable-code) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of C file stream for input using fread(). +/*! + \note implements Stream concept +*/ +class FileWriteStream { +public: + typedef char Ch; //!< Character type. Only support char. + + FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) { + RAPIDJSON_ASSERT(fp_ != 0); + } + + void Put(char c) { + if (current_ >= bufferEnd_) + Flush(); + + *current_++ = c; + } + + void PutN(char c, size_t n) { + size_t avail = static_cast(bufferEnd_ - current_); + while (n > avail) { + std::memset(current_, c, avail); + current_ += avail; + Flush(); + n -= avail; + avail = static_cast(bufferEnd_ - current_); + } + + if (n > 0) { + std::memset(current_, c, n); + current_ += n; + } + } + + void Flush() { + if (current_ != buffer_) { + size_t result = fwrite(buffer_, 1, static_cast(current_ - buffer_), fp_); + if (result < static_cast(current_ - buffer_)) { + // failure deliberately ignored at this time + // added to avoid warn_unused_result build errors + } + current_ = buffer_; + } + } + + // Not implemented + char Peek() const { RAPIDJSON_ASSERT(false); return 0; } + char Take() { RAPIDJSON_ASSERT(false); return 0; } + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + // Prohibit copy constructor & assignment operator. + FileWriteStream(const FileWriteStream&); + FileWriteStream& operator=(const FileWriteStream&); + + std::FILE* fp_; + char *buffer_; + char *bufferEnd_; + char *current_; +}; + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(FileWriteStream& stream, char c, size_t n) { + stream.PutN(c, n); +} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/fwd.h b/sql-odbc/libraries/rapidjson/include/rapidjson/fwd.h new file mode 100644 index 0000000000..e8104e841b --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/fwd.h @@ -0,0 +1,151 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FWD_H_ +#define RAPIDJSON_FWD_H_ + +#include "rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN + +// encodings.h + +template struct UTF8; +template struct UTF16; +template struct UTF16BE; +template struct UTF16LE; +template struct UTF32; +template struct UTF32BE; +template struct UTF32LE; +template struct ASCII; +template struct AutoUTF; + +template +struct Transcoder; + +// allocators.h + +class CrtAllocator; + +template +class MemoryPoolAllocator; + +// stream.h + +template +struct GenericStringStream; + +typedef GenericStringStream > StringStream; + +template +struct GenericInsituStringStream; + +typedef GenericInsituStringStream > InsituStringStream; + +// stringbuffer.h + +template +class GenericStringBuffer; + +typedef GenericStringBuffer, CrtAllocator> StringBuffer; + +// filereadstream.h + +class FileReadStream; + +// filewritestream.h + +class FileWriteStream; + +// memorybuffer.h + +template +struct GenericMemoryBuffer; + +typedef GenericMemoryBuffer MemoryBuffer; + +// memorystream.h + +struct MemoryStream; + +// reader.h + +template +struct BaseReaderHandler; + +template +class GenericReader; + +typedef GenericReader, UTF8, CrtAllocator> Reader; + +// writer.h + +template +class Writer; + +// prettywriter.h + +template +class PrettyWriter; + +// document.h + +template +struct GenericMember; + +template +class GenericMemberIterator; + +template +struct GenericStringRef; + +template +class GenericValue; + +typedef GenericValue, MemoryPoolAllocator > Value; + +template +class GenericDocument; + +typedef GenericDocument, MemoryPoolAllocator, CrtAllocator> Document; + +// pointer.h + +template +class GenericPointer; + +typedef GenericPointer Pointer; + +// schema.h + +template +class IGenericRemoteSchemaDocumentProvider; + +template +class GenericSchemaDocument; + +typedef GenericSchemaDocument SchemaDocument; +typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProvider; + +template < + typename SchemaDocumentType, + typename OutputHandler, + typename StateAllocator> +class GenericSchemaValidator; + +typedef GenericSchemaValidator, void>, CrtAllocator> SchemaValidator; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_RAPIDJSONFWD_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/biginteger.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/biginteger.h new file mode 100644 index 0000000000..9d3e88c998 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/biginteger.h @@ -0,0 +1,290 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_BIGINTEGER_H_ +#define RAPIDJSON_BIGINTEGER_H_ + +#include "../rapidjson.h" + +#if defined(_MSC_VER) && defined(_M_AMD64) +#include // for _umul128 +#pragma intrinsic(_umul128) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +class BigInteger { +public: + typedef uint64_t Type; + + BigInteger(const BigInteger& rhs) : count_(rhs.count_) { + std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); + } + + explicit BigInteger(uint64_t u) : count_(1) { + digits_[0] = u; + } + + BigInteger(const char* decimals, size_t length) : count_(1) { + RAPIDJSON_ASSERT(length > 0); + digits_[0] = 0; + size_t i = 0; + const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19 + while (length >= kMaxDigitPerIteration) { + AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration); + length -= kMaxDigitPerIteration; + i += kMaxDigitPerIteration; + } + + if (length > 0) + AppendDecimal64(decimals + i, decimals + i + length); + } + + BigInteger& operator=(const BigInteger &rhs) + { + if (this != &rhs) { + count_ = rhs.count_; + std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); + } + return *this; + } + + BigInteger& operator=(uint64_t u) { + digits_[0] = u; + count_ = 1; + return *this; + } + + BigInteger& operator+=(uint64_t u) { + Type backup = digits_[0]; + digits_[0] += u; + for (size_t i = 0; i < count_ - 1; i++) { + if (digits_[i] >= backup) + return *this; // no carry + backup = digits_[i + 1]; + digits_[i + 1] += 1; + } + + // Last carry + if (digits_[count_ - 1] < backup) + PushBack(1); + + return *this; + } + + BigInteger& operator*=(uint64_t u) { + if (u == 0) return *this = 0; + if (u == 1) return *this; + if (*this == 1) return *this = u; + + uint64_t k = 0; + for (size_t i = 0; i < count_; i++) { + uint64_t hi; + digits_[i] = MulAdd64(digits_[i], u, k, &hi); + k = hi; + } + + if (k > 0) + PushBack(k); + + return *this; + } + + BigInteger& operator*=(uint32_t u) { + if (u == 0) return *this = 0; + if (u == 1) return *this; + if (*this == 1) return *this = u; + + uint64_t k = 0; + for (size_t i = 0; i < count_; i++) { + const uint64_t c = digits_[i] >> 32; + const uint64_t d = digits_[i] & 0xFFFFFFFF; + const uint64_t uc = u * c; + const uint64_t ud = u * d; + const uint64_t p0 = ud + k; + const uint64_t p1 = uc + (p0 >> 32); + digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32); + k = p1 >> 32; + } + + if (k > 0) + PushBack(k); + + return *this; + } + + BigInteger& operator<<=(size_t shift) { + if (IsZero() || shift == 0) return *this; + + size_t offset = shift / kTypeBit; + size_t interShift = shift % kTypeBit; + RAPIDJSON_ASSERT(count_ + offset <= kCapacity); + + if (interShift == 0) { + std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type)); + count_ += offset; + } + else { + digits_[count_] = 0; + for (size_t i = count_; i > 0; i--) + digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift)); + digits_[offset] = digits_[0] << interShift; + count_ += offset; + if (digits_[count_]) + count_++; + } + + std::memset(digits_, 0, offset * sizeof(Type)); + + return *this; + } + + bool operator==(const BigInteger& rhs) const { + return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0; + } + + bool operator==(const Type rhs) const { + return count_ == 1 && digits_[0] == rhs; + } + + BigInteger& MultiplyPow5(unsigned exp) { + static const uint32_t kPow5[12] = { + 5, + 5 * 5, + 5 * 5 * 5, + 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 + }; + if (exp == 0) return *this; + for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27 + for (; exp >= 13; exp -= 13) *this *= static_cast(1220703125u); // 5^13 + if (exp > 0) *this *= kPow5[exp - 1]; + return *this; + } + + // Compute absolute difference of this and rhs. + // Assume this != rhs + bool Difference(const BigInteger& rhs, BigInteger* out) const { + int cmp = Compare(rhs); + RAPIDJSON_ASSERT(cmp != 0); + const BigInteger *a, *b; // Makes a > b + bool ret; + if (cmp < 0) { a = &rhs; b = this; ret = true; } + else { a = this; b = &rhs; ret = false; } + + Type borrow = 0; + for (size_t i = 0; i < a->count_; i++) { + Type d = a->digits_[i] - borrow; + if (i < b->count_) + d -= b->digits_[i]; + borrow = (d > a->digits_[i]) ? 1 : 0; + out->digits_[i] = d; + if (d != 0) + out->count_ = i + 1; + } + + return ret; + } + + int Compare(const BigInteger& rhs) const { + if (count_ != rhs.count_) + return count_ < rhs.count_ ? -1 : 1; + + for (size_t i = count_; i-- > 0;) + if (digits_[i] != rhs.digits_[i]) + return digits_[i] < rhs.digits_[i] ? -1 : 1; + + return 0; + } + + size_t GetCount() const { return count_; } + Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; } + bool IsZero() const { return count_ == 1 && digits_[0] == 0; } + +private: + void AppendDecimal64(const char* begin, const char* end) { + uint64_t u = ParseUint64(begin, end); + if (IsZero()) + *this = u; + else { + unsigned exp = static_cast(end - begin); + (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u + } + } + + void PushBack(Type digit) { + RAPIDJSON_ASSERT(count_ < kCapacity); + digits_[count_++] = digit; + } + + static uint64_t ParseUint64(const char* begin, const char* end) { + uint64_t r = 0; + for (const char* p = begin; p != end; ++p) { + RAPIDJSON_ASSERT(*p >= '0' && *p <= '9'); + r = r * 10u + static_cast(*p - '0'); + } + return r; + } + + // Assume a * b + k < 2^128 + static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) { +#if defined(_MSC_VER) && defined(_M_AMD64) + uint64_t low = _umul128(a, b, outHigh) + k; + if (low < k) + (*outHigh)++; + return low; +#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) + __extension__ typedef unsigned __int128 uint128; + uint128 p = static_cast(a) * static_cast(b); + p += k; + *outHigh = static_cast(p >> 64); + return static_cast(p); +#else + const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32; + uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1; + x1 += (x0 >> 32); // can't give carry + x1 += x2; + if (x1 < x2) + x3 += (static_cast(1) << 32); + uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF); + uint64_t hi = x3 + (x1 >> 32); + + lo += k; + if (lo < k) + hi++; + *outHigh = hi; + return lo; +#endif + } + + static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000 + static const size_t kCapacity = kBitCount / sizeof(Type); + static const size_t kTypeBit = sizeof(Type) * 8; + + Type digits_[kCapacity]; + size_t count_; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_BIGINTEGER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/diyfp.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/diyfp.h new file mode 100644 index 0000000000..c9fefdc613 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/diyfp.h @@ -0,0 +1,258 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// This is a C++ header-only implementation of Grisu2 algorithm from the publication: +// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with +// integers." ACM Sigplan Notices 45.6 (2010): 233-243. + +#ifndef RAPIDJSON_DIYFP_H_ +#define RAPIDJSON_DIYFP_H_ + +#include "../rapidjson.h" + +#if defined(_MSC_VER) && defined(_M_AMD64) +#include +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_umul128) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +struct DiyFp { + DiyFp() : f(), e() {} + + DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {} + + explicit DiyFp(double d) { + union { + double d; + uint64_t u64; + } u = { d }; + + int biased_e = static_cast((u.u64 & kDpExponentMask) >> kDpSignificandSize); + uint64_t significand = (u.u64 & kDpSignificandMask); + if (biased_e != 0) { + f = significand + kDpHiddenBit; + e = biased_e - kDpExponentBias; + } + else { + f = significand; + e = kDpMinExponent + 1; + } + } + + DiyFp operator-(const DiyFp& rhs) const { + return DiyFp(f - rhs.f, e); + } + + DiyFp operator*(const DiyFp& rhs) const { +#if defined(_MSC_VER) && defined(_M_AMD64) + uint64_t h; + uint64_t l = _umul128(f, rhs.f, &h); + if (l & (uint64_t(1) << 63)) // rounding + h++; + return DiyFp(h, e + rhs.e + 64); +#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) + __extension__ typedef unsigned __int128 uint128; + uint128 p = static_cast(f) * static_cast(rhs.f); + uint64_t h = static_cast(p >> 64); + uint64_t l = static_cast(p); + if (l & (uint64_t(1) << 63)) // rounding + h++; + return DiyFp(h, e + rhs.e + 64); +#else + const uint64_t M32 = 0xFFFFFFFF; + const uint64_t a = f >> 32; + const uint64_t b = f & M32; + const uint64_t c = rhs.f >> 32; + const uint64_t d = rhs.f & M32; + const uint64_t ac = a * c; + const uint64_t bc = b * c; + const uint64_t ad = a * d; + const uint64_t bd = b * d; + uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32); + tmp += 1U << 31; /// mult_round + return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64); +#endif + } + + DiyFp Normalize() const { +#if defined(_MSC_VER) && defined(_M_AMD64) + unsigned long index; + _BitScanReverse64(&index, f); + return DiyFp(f << (63 - index), e - (63 - index)); +#elif defined(__GNUC__) && __GNUC__ >= 4 + int s = __builtin_clzll(f); + return DiyFp(f << s, e - s); +#else + DiyFp res = *this; + while (!(res.f & (static_cast(1) << 63))) { + res.f <<= 1; + res.e--; + } + return res; +#endif + } + + DiyFp NormalizeBoundary() const { + DiyFp res = *this; + while (!(res.f & (kDpHiddenBit << 1))) { + res.f <<= 1; + res.e--; + } + res.f <<= (kDiySignificandSize - kDpSignificandSize - 2); + res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2); + return res; + } + + void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const { + DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary(); + DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1); + mi.f <<= mi.e - pl.e; + mi.e = pl.e; + *plus = pl; + *minus = mi; + } + + double ToDouble() const { + union { + double d; + uint64_t u64; + }u; + const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : + static_cast(e + kDpExponentBias); + u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize); + return u.d; + } + + static const int kDiySignificandSize = 64; + static const int kDpSignificandSize = 52; + static const int kDpExponentBias = 0x3FF + kDpSignificandSize; + static const int kDpMaxExponent = 0x7FF - kDpExponentBias; + static const int kDpMinExponent = -kDpExponentBias; + static const int kDpDenormalExponent = -kDpExponentBias + 1; + static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); + static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); + static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); + + uint64_t f; + int e; +}; + +inline DiyFp GetCachedPowerByIndex(size_t index) { + // 10^-348, 10^-340, ..., 10^340 + static const uint64_t kCachedPowers_F[] = { + RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76), + RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea), + RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df), + RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f), + RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c), + RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5), + RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d), + RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637), + RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7), + RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5), + RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b), + RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996), + RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6), + RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8), + RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053), + RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd), + RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94), + RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b), + RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac), + RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3), + RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb), + RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c), + RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000), + RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984), + RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70), + RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245), + RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8), + RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a), + RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea), + RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85), + RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2), + RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3), + RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25), + RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece), + RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5), + RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a), + RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c), + RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a), + RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129), + RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429), + RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d), + RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841), + RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9), + RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b) + }; + static const int16_t kCachedPowers_E[] = { + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, + -954, -927, -901, -874, -847, -821, -794, -768, -741, -715, + -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, + -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, + -157, -130, -103, -77, -50, -24, 3, 30, 56, 83, + 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, + 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, + 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, + 907, 933, 960, 986, 1013, 1039, 1066 + }; + return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]); +} + +inline DiyFp GetCachedPower(int e, int* K) { + + //int k = static_cast(ceil((-61 - e) * 0.30102999566398114)) + 374; + double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive + int k = static_cast(dk); + if (dk - k > 0.0) + k++; + + unsigned index = static_cast((k >> 3) + 1); + *K = -(-348 + static_cast(index << 3)); // decimal exponent no need lookup table + + return GetCachedPowerByIndex(index); +} + +inline DiyFp GetCachedPower10(int exp, int *outExp) { + unsigned index = (static_cast(exp) + 348u) / 8u; + *outExp = -348 + static_cast(index) * 8; + return GetCachedPowerByIndex(index); + } + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +RAPIDJSON_DIAG_OFF(padded) +#endif + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_DIYFP_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/dtoa.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/dtoa.h new file mode 100644 index 0000000000..8d6350e626 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/dtoa.h @@ -0,0 +1,245 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// This is a C++ header-only implementation of Grisu2 algorithm from the publication: +// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with +// integers." ACM Sigplan Notices 45.6 (2010): 233-243. + +#ifndef RAPIDJSON_DTOA_ +#define RAPIDJSON_DTOA_ + +#include "itoa.h" // GetDigitsLut() +#include "diyfp.h" +#include "ieee754.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124 +#endif + +inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) { + while (rest < wp_w && delta - rest >= ten_kappa && + (rest + ten_kappa < wp_w || /// closer + wp_w - rest > rest + ten_kappa - wp_w)) { + buffer[len - 1]--; + rest += ten_kappa; + } +} + +inline unsigned CountDecimalDigit32(uint32_t n) { + // Simple pure C++ implementation was faster than __builtin_clz version in this situation. + if (n < 10) return 1; + if (n < 100) return 2; + if (n < 1000) return 3; + if (n < 10000) return 4; + if (n < 100000) return 5; + if (n < 1000000) return 6; + if (n < 10000000) return 7; + if (n < 100000000) return 8; + // Will not reach 10 digits in DigitGen() + //if (n < 1000000000) return 9; + //return 10; + return 9; +} + +inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) { + static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; + const DiyFp one(uint64_t(1) << -Mp.e, Mp.e); + const DiyFp wp_w = Mp - W; + uint32_t p1 = static_cast(Mp.f >> -one.e); + uint64_t p2 = Mp.f & (one.f - 1); + unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9] + *len = 0; + + while (kappa > 0) { + uint32_t d = 0; + switch (kappa) { + case 9: d = p1 / 100000000; p1 %= 100000000; break; + case 8: d = p1 / 10000000; p1 %= 10000000; break; + case 7: d = p1 / 1000000; p1 %= 1000000; break; + case 6: d = p1 / 100000; p1 %= 100000; break; + case 5: d = p1 / 10000; p1 %= 10000; break; + case 4: d = p1 / 1000; p1 %= 1000; break; + case 3: d = p1 / 100; p1 %= 100; break; + case 2: d = p1 / 10; p1 %= 10; break; + case 1: d = p1; p1 = 0; break; + default:; + } + if (d || *len) + buffer[(*len)++] = static_cast('0' + static_cast(d)); + kappa--; + uint64_t tmp = (static_cast(p1) << -one.e) + p2; + if (tmp <= delta) { + *K += kappa; + GrisuRound(buffer, *len, delta, tmp, static_cast(kPow10[kappa]) << -one.e, wp_w.f); + return; + } + } + + // kappa = 0 + for (;;) { + p2 *= 10; + delta *= 10; + char d = static_cast(p2 >> -one.e); + if (d || *len) + buffer[(*len)++] = static_cast('0' + d); + p2 &= one.f - 1; + kappa--; + if (p2 < delta) { + *K += kappa; + int index = -static_cast(kappa); + GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast(kappa)] : 0)); + return; + } + } +} + +inline void Grisu2(double value, char* buffer, int* length, int* K) { + const DiyFp v(value); + DiyFp w_m, w_p; + v.NormalizedBoundaries(&w_m, &w_p); + + const DiyFp c_mk = GetCachedPower(w_p.e, K); + const DiyFp W = v.Normalize() * c_mk; + DiyFp Wp = w_p * c_mk; + DiyFp Wm = w_m * c_mk; + Wm.f++; + Wp.f--; + DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K); +} + +inline char* WriteExponent(int K, char* buffer) { + if (K < 0) { + *buffer++ = '-'; + K = -K; + } + + if (K >= 100) { + *buffer++ = static_cast('0' + static_cast(K / 100)); + K %= 100; + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else if (K >= 10) { + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else + *buffer++ = static_cast('0' + static_cast(K)); + + return buffer; +} + +inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) { + const int kk = length + k; // 10^(kk-1) <= v < 10^kk + + if (0 <= k && kk <= 21) { + // 1234e7 -> 12340000000 + for (int i = length; i < kk; i++) + buffer[i] = '0'; + buffer[kk] = '.'; + buffer[kk + 1] = '0'; + return &buffer[kk + 2]; + } + else if (0 < kk && kk <= 21) { + // 1234e-2 -> 12.34 + std::memmove(&buffer[kk + 1], &buffer[kk], static_cast(length - kk)); + buffer[kk] = '.'; + if (0 > k + maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = kk + maxDecimalPlaces; i > kk + 1; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[kk + 2]; // Reserve one zero + } + else + return &buffer[length + 1]; + } + else if (-6 < kk && kk <= 0) { + // 1234e-6 -> 0.001234 + const int offset = 2 - kk; + std::memmove(&buffer[offset], &buffer[0], static_cast(length)); + buffer[0] = '0'; + buffer[1] = '.'; + for (int i = 2; i < offset; i++) + buffer[i] = '0'; + if (length - kk > maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = maxDecimalPlaces + 1; i > 2; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[3]; // Reserve one zero + } + else + return &buffer[length + offset]; + } + else if (kk < -maxDecimalPlaces) { + // Truncate to zero + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else if (length == 1) { + // 1e30 + buffer[1] = 'e'; + return WriteExponent(kk - 1, &buffer[2]); + } + else { + // 1234e30 -> 1.234e33 + std::memmove(&buffer[2], &buffer[1], static_cast(length - 1)); + buffer[1] = '.'; + buffer[length + 1] = 'e'; + return WriteExponent(kk - 1, &buffer[0 + length + 2]); + } +} + +inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) { + RAPIDJSON_ASSERT(maxDecimalPlaces >= 1); + Double d(value); + if (d.IsZero()) { + if (d.Sign()) + *buffer++ = '-'; // -0.0, Issue #289 + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else { + if (value < 0) { + *buffer++ = '-'; + value = -value; + } + int length, K; + Grisu2(value, buffer, &length, &K); + return Prettify(buffer, length, K, maxDecimalPlaces); + } +} + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_DTOA_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/ieee754.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/ieee754.h new file mode 100644 index 0000000000..82bb0b99e5 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/ieee754.h @@ -0,0 +1,78 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_IEEE754_ +#define RAPIDJSON_IEEE754_ + +#include "../rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +class Double { +public: + Double() {} + Double(double d) : d_(d) {} + Double(uint64_t u) : u_(u) {} + + double Value() const { return d_; } + uint64_t Uint64Value() const { return u_; } + + double NextPositiveDouble() const { + RAPIDJSON_ASSERT(!Sign()); + return Double(u_ + 1).Value(); + } + + bool Sign() const { return (u_ & kSignMask) != 0; } + uint64_t Significand() const { return u_ & kSignificandMask; } + int Exponent() const { return static_cast(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); } + + bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; } + bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; } + bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; } + bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; } + bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; } + + uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); } + int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; } + uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; } + + static unsigned EffectiveSignificandSize(int order) { + if (order >= -1021) + return 53; + else if (order <= -1074) + return 0; + else + return static_cast(order) + 1074; + } + +private: + static const int kSignificandSize = 52; + static const int kExponentBias = 0x3FF; + static const int kDenormalExponent = 1 - kExponentBias; + static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000); + static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); + static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); + static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); + + union { + double d_; + uint64_t u_; + }; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_IEEE754_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/itoa.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/itoa.h new file mode 100644 index 0000000000..01a4e7e72d --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/itoa.h @@ -0,0 +1,304 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ITOA_ +#define RAPIDJSON_ITOA_ + +#include "../rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline const char* GetDigitsLut() { + static const char cDigitsLut[200] = { + '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', + '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', + '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', + '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', + '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', + '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', + '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', + '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', + '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', + '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' + }; + return cDigitsLut; +} + +inline char* u32toa(uint32_t value, char* buffer) { + const char* cDigitsLut = GetDigitsLut(); + + if (value < 10000) { + const uint32_t d1 = (value / 100) << 1; + const uint32_t d2 = (value % 100) << 1; + + if (value >= 1000) + *buffer++ = cDigitsLut[d1]; + if (value >= 100) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 10) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + } + else if (value < 100000000) { + // value = bbbbcccc + const uint32_t b = value / 10000; + const uint32_t c = value % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buffer++ = cDigitsLut[d1]; + if (value >= 1000000) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 100000) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + else { + // value = aabbbbcccc in decimal + + const uint32_t a = value / 100000000; // 1 to 42 + value %= 100000000; + + if (a >= 10) { + const unsigned i = a << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else + *buffer++ = static_cast('0' + static_cast(a)); + + const uint32_t b = value / 10000; // 0 to 9999 + const uint32_t c = value % 10000; // 0 to 9999 + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + *buffer++ = cDigitsLut[d1]; + *buffer++ = cDigitsLut[d1 + 1]; + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + return buffer; +} + +inline char* i32toa(int32_t value, char* buffer) { + uint32_t u = static_cast(value); + if (value < 0) { + *buffer++ = '-'; + u = ~u + 1; + } + + return u32toa(u, buffer); +} + +inline char* u64toa(uint64_t value, char* buffer) { + const char* cDigitsLut = GetDigitsLut(); + const uint64_t kTen8 = 100000000; + const uint64_t kTen9 = kTen8 * 10; + const uint64_t kTen10 = kTen8 * 100; + const uint64_t kTen11 = kTen8 * 1000; + const uint64_t kTen12 = kTen8 * 10000; + const uint64_t kTen13 = kTen8 * 100000; + const uint64_t kTen14 = kTen8 * 1000000; + const uint64_t kTen15 = kTen8 * 10000000; + const uint64_t kTen16 = kTen8 * kTen8; + + if (value < kTen8) { + uint32_t v = static_cast(value); + if (v < 10000) { + const uint32_t d1 = (v / 100) << 1; + const uint32_t d2 = (v % 100) << 1; + + if (v >= 1000) + *buffer++ = cDigitsLut[d1]; + if (v >= 100) + *buffer++ = cDigitsLut[d1 + 1]; + if (v >= 10) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + } + else { + // value = bbbbcccc + const uint32_t b = v / 10000; + const uint32_t c = v % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buffer++ = cDigitsLut[d1]; + if (value >= 1000000) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 100000) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + } + else if (value < kTen16) { + const uint32_t v0 = static_cast(value / kTen8); + const uint32_t v1 = static_cast(value % kTen8); + + const uint32_t b0 = v0 / 10000; + const uint32_t c0 = v0 % 10000; + + const uint32_t d1 = (b0 / 100) << 1; + const uint32_t d2 = (b0 % 100) << 1; + + const uint32_t d3 = (c0 / 100) << 1; + const uint32_t d4 = (c0 % 100) << 1; + + const uint32_t b1 = v1 / 10000; + const uint32_t c1 = v1 % 10000; + + const uint32_t d5 = (b1 / 100) << 1; + const uint32_t d6 = (b1 % 100) << 1; + + const uint32_t d7 = (c1 / 100) << 1; + const uint32_t d8 = (c1 % 100) << 1; + + if (value >= kTen15) + *buffer++ = cDigitsLut[d1]; + if (value >= kTen14) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= kTen13) + *buffer++ = cDigitsLut[d2]; + if (value >= kTen12) + *buffer++ = cDigitsLut[d2 + 1]; + if (value >= kTen11) + *buffer++ = cDigitsLut[d3]; + if (value >= kTen10) + *buffer++ = cDigitsLut[d3 + 1]; + if (value >= kTen9) + *buffer++ = cDigitsLut[d4]; + if (value >= kTen8) + *buffer++ = cDigitsLut[d4 + 1]; + + *buffer++ = cDigitsLut[d5]; + *buffer++ = cDigitsLut[d5 + 1]; + *buffer++ = cDigitsLut[d6]; + *buffer++ = cDigitsLut[d6 + 1]; + *buffer++ = cDigitsLut[d7]; + *buffer++ = cDigitsLut[d7 + 1]; + *buffer++ = cDigitsLut[d8]; + *buffer++ = cDigitsLut[d8 + 1]; + } + else { + const uint32_t a = static_cast(value / kTen16); // 1 to 1844 + value %= kTen16; + + if (a < 10) + *buffer++ = static_cast('0' + static_cast(a)); + else if (a < 100) { + const uint32_t i = a << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else if (a < 1000) { + *buffer++ = static_cast('0' + static_cast(a / 100)); + + const uint32_t i = (a % 100) << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else { + const uint32_t i = (a / 100) << 1; + const uint32_t j = (a % 100) << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + *buffer++ = cDigitsLut[j]; + *buffer++ = cDigitsLut[j + 1]; + } + + const uint32_t v0 = static_cast(value / kTen8); + const uint32_t v1 = static_cast(value % kTen8); + + const uint32_t b0 = v0 / 10000; + const uint32_t c0 = v0 % 10000; + + const uint32_t d1 = (b0 / 100) << 1; + const uint32_t d2 = (b0 % 100) << 1; + + const uint32_t d3 = (c0 / 100) << 1; + const uint32_t d4 = (c0 % 100) << 1; + + const uint32_t b1 = v1 / 10000; + const uint32_t c1 = v1 % 10000; + + const uint32_t d5 = (b1 / 100) << 1; + const uint32_t d6 = (b1 % 100) << 1; + + const uint32_t d7 = (c1 / 100) << 1; + const uint32_t d8 = (c1 % 100) << 1; + + *buffer++ = cDigitsLut[d1]; + *buffer++ = cDigitsLut[d1 + 1]; + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + *buffer++ = cDigitsLut[d5]; + *buffer++ = cDigitsLut[d5 + 1]; + *buffer++ = cDigitsLut[d6]; + *buffer++ = cDigitsLut[d6 + 1]; + *buffer++ = cDigitsLut[d7]; + *buffer++ = cDigitsLut[d7 + 1]; + *buffer++ = cDigitsLut[d8]; + *buffer++ = cDigitsLut[d8 + 1]; + } + + return buffer; +} + +inline char* i64toa(int64_t value, char* buffer) { + uint64_t u = static_cast(value); + if (value < 0) { + *buffer++ = '-'; + u = ~u + 1; + } + + return u64toa(u, buffer); +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ITOA_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/meta.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/meta.h new file mode 100644 index 0000000000..5a9aaa4286 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/meta.h @@ -0,0 +1,181 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_META_H_ +#define RAPIDJSON_INTERNAL_META_H_ + +#include "../rapidjson.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif +#if defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(6334) +#endif + +#if RAPIDJSON_HAS_CXX11_TYPETRAITS +#include +#endif + +//@cond RAPIDJSON_INTERNAL +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching +template struct Void { typedef void Type; }; + +/////////////////////////////////////////////////////////////////////////////// +// BoolType, TrueType, FalseType +// +template struct BoolType { + static const bool Value = Cond; + typedef BoolType Type; +}; +typedef BoolType TrueType; +typedef BoolType FalseType; + + +/////////////////////////////////////////////////////////////////////////////// +// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr +// + +template struct SelectIfImpl { template struct Apply { typedef T1 Type; }; }; +template <> struct SelectIfImpl { template struct Apply { typedef T2 Type; }; }; +template struct SelectIfCond : SelectIfImpl::template Apply {}; +template struct SelectIf : SelectIfCond {}; + +template struct AndExprCond : FalseType {}; +template <> struct AndExprCond : TrueType {}; +template struct OrExprCond : TrueType {}; +template <> struct OrExprCond : FalseType {}; + +template struct BoolExpr : SelectIf::Type {}; +template struct NotExpr : SelectIf::Type {}; +template struct AndExpr : AndExprCond::Type {}; +template struct OrExpr : OrExprCond::Type {}; + + +/////////////////////////////////////////////////////////////////////////////// +// AddConst, MaybeAddConst, RemoveConst +template struct AddConst { typedef const T Type; }; +template struct MaybeAddConst : SelectIfCond {}; +template struct RemoveConst { typedef T Type; }; +template struct RemoveConst { typedef T Type; }; + + +/////////////////////////////////////////////////////////////////////////////// +// IsSame, IsConst, IsMoreConst, IsPointer +// +template struct IsSame : FalseType {}; +template struct IsSame : TrueType {}; + +template struct IsConst : FalseType {}; +template struct IsConst : TrueType {}; + +template +struct IsMoreConst + : AndExpr::Type, typename RemoveConst::Type>, + BoolType::Value >= IsConst::Value> >::Type {}; + +template struct IsPointer : FalseType {}; +template struct IsPointer : TrueType {}; + +/////////////////////////////////////////////////////////////////////////////// +// IsBaseOf +// +#if RAPIDJSON_HAS_CXX11_TYPETRAITS + +template struct IsBaseOf + : BoolType< ::std::is_base_of::value> {}; + +#else // simplified version adopted from Boost + +template struct IsBaseOfImpl { + RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0); + RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0); + + typedef char (&Yes)[1]; + typedef char (&No) [2]; + + template + static Yes Check(const D*, T); + static No Check(const B*, int); + + struct Host { + operator const B*() const; + operator const D*(); + }; + + enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) }; +}; + +template struct IsBaseOf + : OrExpr, BoolExpr > >::Type {}; + +#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS + + +////////////////////////////////////////////////////////////////////////// +// EnableIf / DisableIf +// +template struct EnableIfCond { typedef T Type; }; +template struct EnableIfCond { /* empty */ }; + +template struct DisableIfCond { typedef T Type; }; +template struct DisableIfCond { /* empty */ }; + +template +struct EnableIf : EnableIfCond {}; + +template +struct DisableIf : DisableIfCond {}; + +// SFINAE helpers +struct SfinaeTag {}; +template struct RemoveSfinaeTag; +template struct RemoveSfinaeTag { typedef T Type; }; + +#define RAPIDJSON_REMOVEFPTR_(type) \ + typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \ + < ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type + +#define RAPIDJSON_ENABLEIF(cond) \ + typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ + ::Type * = NULL + +#define RAPIDJSON_DISABLEIF(cond) \ + typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ + ::Type * = NULL + +#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \ + typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ + ::Type + +#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \ + typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ + ::Type + +} // namespace internal +RAPIDJSON_NAMESPACE_END +//@endcond + +#if defined(__GNUC__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_META_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/pow10.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/pow10.h new file mode 100644 index 0000000000..02f475d705 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/pow10.h @@ -0,0 +1,55 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_POW10_ +#define RAPIDJSON_POW10_ + +#include "../rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Computes integer powers of 10 in double (10.0^n). +/*! This function uses lookup table for fast and accurate results. + \param n non-negative exponent. Must <= 308. + \return 10.0^n +*/ +inline double Pow10(int n) { + static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes + 1e+0, + 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, + 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, + 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, + 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, + 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, + 1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120, + 1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140, + 1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160, + 1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180, + 1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200, + 1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220, + 1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240, + 1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260, + 1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280, + 1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300, + 1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308 + }; + RAPIDJSON_ASSERT(n >= 0 && n <= 308); + return e[n]; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_POW10_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/regex.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/regex.h new file mode 100644 index 0000000000..422a5240bf --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/regex.h @@ -0,0 +1,701 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_REGEX_H_ +#define RAPIDJSON_INTERNAL_REGEX_H_ + +#include "../allocators.h" +#include "../stream.h" +#include "stack.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(implicit-fallthrough) +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +#ifndef RAPIDJSON_REGEX_VERBOSE +#define RAPIDJSON_REGEX_VERBOSE 0 +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +/////////////////////////////////////////////////////////////////////////////// +// GenericRegex + +static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1 +static const SizeType kRegexInvalidRange = ~SizeType(0); + +//! Regular expression engine with subset of ECMAscript grammar. +/*! + Supported regular expression syntax: + - \c ab Concatenation + - \c a|b Alternation + - \c a? Zero or one + - \c a* Zero or more + - \c a+ One or more + - \c a{3} Exactly 3 times + - \c a{3,} At least 3 times + - \c a{3,5} 3 to 5 times + - \c (ab) Grouping + - \c ^a At the beginning + - \c a$ At the end + - \c . Any character + - \c [abc] Character classes + - \c [a-c] Character class range + - \c [a-z0-9_] Character class combination + - \c [^abc] Negated character classes + - \c [^a-c] Negated character class range + - \c [\b] Backspace (U+0008) + - \c \\| \\\\ ... Escape characters + - \c \\f Form feed (U+000C) + - \c \\n Line feed (U+000A) + - \c \\r Carriage return (U+000D) + - \c \\t Tab (U+0009) + - \c \\v Vertical tab (U+000B) + + \note This is a Thompson NFA engine, implemented with reference to + Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).", + https://swtch.com/~rsc/regexp/regexp1.html +*/ +template +class GenericRegex { +public: + typedef typename Encoding::Ch Ch; + + GenericRegex(const Ch* source, Allocator* allocator = 0) : + states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), + stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_() + { + GenericStringStream ss(source); + DecodedStream > ds(ss); + Parse(ds); + } + + ~GenericRegex() { + Allocator::Free(stateSet_); + } + + bool IsValid() const { + return root_ != kRegexInvalidState; + } + + template + bool Match(InputStream& is) const { + return SearchWithAnchoring(is, true, true); + } + + bool Match(const Ch* s) const { + GenericStringStream is(s); + return Match(is); + } + + template + bool Search(InputStream& is) const { + return SearchWithAnchoring(is, anchorBegin_, anchorEnd_); + } + + bool Search(const Ch* s) const { + GenericStringStream is(s); + return Search(is); + } + +private: + enum Operator { + kZeroOrOne, + kZeroOrMore, + kOneOrMore, + kConcatenation, + kAlternation, + kLeftParenthesis + }; + + static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.' + static const unsigned kRangeCharacterClass = 0xFFFFFFFE; + static const unsigned kRangeNegationFlag = 0x80000000; + + struct Range { + unsigned start; // + unsigned end; + SizeType next; + }; + + struct State { + SizeType out; //!< Equals to kInvalid for matching state + SizeType out1; //!< Equals to non-kInvalid for split + SizeType rangeStart; + unsigned codepoint; + }; + + struct Frag { + Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {} + SizeType start; + SizeType out; //!< link-list of all output states + SizeType minIndex; + }; + + template + class DecodedStream { + public: + DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); } + unsigned Peek() { return codepoint_; } + unsigned Take() { + unsigned c = codepoint_; + if (c) // No further decoding when '\0' + Decode(); + return c; + } + + private: + void Decode() { + if (!Encoding::Decode(ss_, &codepoint_)) + codepoint_ = 0; + } + + SourceStream& ss_; + unsigned codepoint_; + }; + + State& GetState(SizeType index) { + RAPIDJSON_ASSERT(index < stateCount_); + return states_.template Bottom()[index]; + } + + const State& GetState(SizeType index) const { + RAPIDJSON_ASSERT(index < stateCount_); + return states_.template Bottom()[index]; + } + + Range& GetRange(SizeType index) { + RAPIDJSON_ASSERT(index < rangeCount_); + return ranges_.template Bottom()[index]; + } + + const Range& GetRange(SizeType index) const { + RAPIDJSON_ASSERT(index < rangeCount_); + return ranges_.template Bottom()[index]; + } + + template + void Parse(DecodedStream& ds) { + Allocator allocator; + Stack operandStack(&allocator, 256); // Frag + Stack operatorStack(&allocator, 256); // Operator + Stack atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis) + + *atomCountStack.template Push() = 0; + + unsigned codepoint; + while (ds.Peek() != 0) { + switch (codepoint = ds.Take()) { + case '^': + anchorBegin_ = true; + break; + + case '$': + anchorEnd_ = true; + break; + + case '|': + while (!operatorStack.Empty() && *operatorStack.template Top() < kAlternation) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + *operatorStack.template Push() = kAlternation; + *atomCountStack.template Top() = 0; + break; + + case '(': + *operatorStack.template Push() = kLeftParenthesis; + *atomCountStack.template Push() = 0; + break; + + case ')': + while (!operatorStack.Empty() && *operatorStack.template Top() != kLeftParenthesis) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + if (operatorStack.Empty()) + return; + operatorStack.template Pop(1); + atomCountStack.template Pop(1); + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '?': + if (!Eval(operandStack, kZeroOrOne)) + return; + break; + + case '*': + if (!Eval(operandStack, kZeroOrMore)) + return; + break; + + case '+': + if (!Eval(operandStack, kOneOrMore)) + return; + break; + + case '{': + { + unsigned n, m; + if (!ParseUnsigned(ds, &n)) + return; + + if (ds.Peek() == ',') { + ds.Take(); + if (ds.Peek() == '}') + m = kInfinityQuantifier; + else if (!ParseUnsigned(ds, &m) || m < n) + return; + } + else + m = n; + + if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}') + return; + ds.Take(); + } + break; + + case '.': + PushOperand(operandStack, kAnyCharacterClass); + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '[': + { + SizeType range; + if (!ParseRange(ds, &range)) + return; + SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass); + GetState(s).rangeStart = range; + *operandStack.template Push() = Frag(s, s, s); + } + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '\\': // Escape character + if (!CharacterEscape(ds, &codepoint)) + return; // Unsupported escape character + // fall through to default + + default: // Pattern character + PushOperand(operandStack, codepoint); + ImplicitConcatenation(atomCountStack, operatorStack); + } + } + + while (!operatorStack.Empty()) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + + // Link the operand to matching state. + if (operandStack.GetSize() == sizeof(Frag)) { + Frag* e = operandStack.template Pop(1); + Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0)); + root_ = e->start; + +#if RAPIDJSON_REGEX_VERBOSE + printf("root: %d\n", root_); + for (SizeType i = 0; i < stateCount_ ; i++) { + State& s = GetState(i); + printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint); + } + printf("\n"); +#endif + } + + // Preallocate buffer for SearchWithAnchoring() + RAPIDJSON_ASSERT(stateSet_ == 0); + if (stateCount_ > 0) { + stateSet_ = static_cast(states_.GetAllocator().Malloc(GetStateSetSize())); + state0_.template Reserve(stateCount_); + state1_.template Reserve(stateCount_); + } + } + + SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) { + State* s = states_.template Push(); + s->out = out; + s->out1 = out1; + s->codepoint = codepoint; + s->rangeStart = kRegexInvalidRange; + return stateCount_++; + } + + void PushOperand(Stack& operandStack, unsigned codepoint) { + SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint); + *operandStack.template Push() = Frag(s, s, s); + } + + void ImplicitConcatenation(Stack& atomCountStack, Stack& operatorStack) { + if (*atomCountStack.template Top()) + *operatorStack.template Push() = kConcatenation; + (*atomCountStack.template Top())++; + } + + SizeType Append(SizeType l1, SizeType l2) { + SizeType old = l1; + while (GetState(l1).out != kRegexInvalidState) + l1 = GetState(l1).out; + GetState(l1).out = l2; + return old; + } + + void Patch(SizeType l, SizeType s) { + for (SizeType next; l != kRegexInvalidState; l = next) { + next = GetState(l).out; + GetState(l).out = s; + } + } + + bool Eval(Stack& operandStack, Operator op) { + switch (op) { + case kConcatenation: + RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2); + { + Frag e2 = *operandStack.template Pop(1); + Frag e1 = *operandStack.template Pop(1); + Patch(e1.out, e2.start); + *operandStack.template Push() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex)); + } + return true; + + case kAlternation: + if (operandStack.GetSize() >= sizeof(Frag) * 2) { + Frag e2 = *operandStack.template Pop(1); + Frag e1 = *operandStack.template Pop(1); + SizeType s = NewState(e1.start, e2.start, 0); + *operandStack.template Push() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex)); + return true; + } + return false; + + case kZeroOrOne: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + *operandStack.template Push() = Frag(s, Append(e.out, s), e.minIndex); + return true; + } + return false; + + case kZeroOrMore: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + Patch(e.out, s); + *operandStack.template Push() = Frag(s, s, e.minIndex); + return true; + } + return false; + + default: + RAPIDJSON_ASSERT(op == kOneOrMore); + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + Patch(e.out, s); + *operandStack.template Push() = Frag(e.start, s, e.minIndex); + return true; + } + return false; + } + } + + bool EvalQuantifier(Stack& operandStack, unsigned n, unsigned m) { + RAPIDJSON_ASSERT(n <= m); + RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag)); + + if (n == 0) { + if (m == 0) // a{0} not support + return false; + else if (m == kInfinityQuantifier) + Eval(operandStack, kZeroOrMore); // a{0,} -> a* + else { + Eval(operandStack, kZeroOrOne); // a{0,5} -> a? + for (unsigned i = 0; i < m - 1; i++) + CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a? + for (unsigned i = 0; i < m - 1; i++) + Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a? + } + return true; + } + + for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a + CloneTopOperand(operandStack); + + if (m == kInfinityQuantifier) + Eval(operandStack, kOneOrMore); // a{3,} -> a a a+ + else if (m > n) { + CloneTopOperand(operandStack); // a{3,5} -> a a a a + Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a? + for (unsigned i = n; i < m - 1; i++) + CloneTopOperand(operandStack); // a{3,5} -> a a a a? a? + for (unsigned i = n; i < m; i++) + Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a? + } + + for (unsigned i = 0; i < n - 1; i++) + Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a? + + return true; + } + + static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; } + + void CloneTopOperand(Stack& operandStack) { + const Frag src = *operandStack.template Top(); // Copy constructor to prevent invalidation + SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_) + State* s = states_.template Push(count); + memcpy(s, &GetState(src.minIndex), count * sizeof(State)); + for (SizeType j = 0; j < count; j++) { + if (s[j].out != kRegexInvalidState) + s[j].out += count; + if (s[j].out1 != kRegexInvalidState) + s[j].out1 += count; + } + *operandStack.template Push() = Frag(src.start + count, src.out + count, src.minIndex + count); + stateCount_ += count; + } + + template + bool ParseUnsigned(DecodedStream& ds, unsigned* u) { + unsigned r = 0; + if (ds.Peek() < '0' || ds.Peek() > '9') + return false; + while (ds.Peek() >= '0' && ds.Peek() <= '9') { + if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295 + return false; // overflow + r = r * 10 + (ds.Take() - '0'); + } + *u = r; + return true; + } + + template + bool ParseRange(DecodedStream& ds, SizeType* range) { + bool isBegin = true; + bool negate = false; + int step = 0; + SizeType start = kRegexInvalidRange; + SizeType current = kRegexInvalidRange; + unsigned codepoint; + while ((codepoint = ds.Take()) != 0) { + if (isBegin) { + isBegin = false; + if (codepoint == '^') { + negate = true; + continue; + } + } + + switch (codepoint) { + case ']': + if (start == kRegexInvalidRange) + return false; // Error: nothing inside [] + if (step == 2) { // Add trailing '-' + SizeType r = NewRange('-'); + RAPIDJSON_ASSERT(current != kRegexInvalidRange); + GetRange(current).next = r; + } + if (negate) + GetRange(start).start |= kRangeNegationFlag; + *range = start; + return true; + + case '\\': + if (ds.Peek() == 'b') { + ds.Take(); + codepoint = 0x0008; // Escape backspace character + } + else if (!CharacterEscape(ds, &codepoint)) + return false; + // fall through to default + + default: + switch (step) { + case 1: + if (codepoint == '-') { + step++; + break; + } + // fall through to step 0 for other characters + + case 0: + { + SizeType r = NewRange(codepoint); + if (current != kRegexInvalidRange) + GetRange(current).next = r; + if (start == kRegexInvalidRange) + start = r; + current = r; + } + step = 1; + break; + + default: + RAPIDJSON_ASSERT(step == 2); + GetRange(current).end = codepoint; + step = 0; + } + } + } + return false; + } + + SizeType NewRange(unsigned codepoint) { + Range* r = ranges_.template Push(); + r->start = r->end = codepoint; + r->next = kRegexInvalidRange; + return rangeCount_++; + } + + template + bool CharacterEscape(DecodedStream& ds, unsigned* escapedCodepoint) { + unsigned codepoint; + switch (codepoint = ds.Take()) { + case '^': + case '$': + case '|': + case '(': + case ')': + case '?': + case '*': + case '+': + case '.': + case '[': + case ']': + case '{': + case '}': + case '\\': + *escapedCodepoint = codepoint; return true; + case 'f': *escapedCodepoint = 0x000C; return true; + case 'n': *escapedCodepoint = 0x000A; return true; + case 'r': *escapedCodepoint = 0x000D; return true; + case 't': *escapedCodepoint = 0x0009; return true; + case 'v': *escapedCodepoint = 0x000B; return true; + default: + return false; // Unsupported escape character + } + } + + template + bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const { + RAPIDJSON_ASSERT(IsValid()); + DecodedStream ds(is); + + state0_.Clear(); + Stack *current = &state0_, *next = &state1_; + const size_t stateSetSize = GetStateSetSize(); + std::memset(stateSet_, 0, stateSetSize); + + bool matched = AddState(*current, root_); + unsigned codepoint; + while (!current->Empty() && (codepoint = ds.Take()) != 0) { + std::memset(stateSet_, 0, stateSetSize); + next->Clear(); + matched = false; + for (const SizeType* s = current->template Bottom(); s != current->template End(); ++s) { + const State& sr = GetState(*s); + if (sr.codepoint == codepoint || + sr.codepoint == kAnyCharacterClass || + (sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint))) + { + matched = AddState(*next, sr.out) || matched; + if (!anchorEnd && matched) + return true; + } + if (!anchorBegin) + AddState(*next, root_); + } + internal::Swap(current, next); + } + + return matched; + } + + size_t GetStateSetSize() const { + return (stateCount_ + 31) / 32 * 4; + } + + // Return whether the added states is a match state + bool AddState(Stack& l, SizeType index) const { + RAPIDJSON_ASSERT(index != kRegexInvalidState); + + const State& s = GetState(index); + if (s.out1 != kRegexInvalidState) { // Split + bool matched = AddState(l, s.out); + return AddState(l, s.out1) || matched; + } + else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) { + stateSet_[index >> 5] |= (1 << (index & 31)); + *l.template PushUnsafe() = index; + } + return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation. + } + + bool MatchRange(SizeType rangeIndex, unsigned codepoint) const { + bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0; + while (rangeIndex != kRegexInvalidRange) { + const Range& r = GetRange(rangeIndex); + if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end) + return yes; + rangeIndex = r.next; + } + return !yes; + } + + Stack states_; + Stack ranges_; + SizeType root_; + SizeType stateCount_; + SizeType rangeCount_; + + static const unsigned kInfinityQuantifier = ~0u; + + // For SearchWithAnchoring() + uint32_t* stateSet_; // allocated by states_.GetAllocator() + mutable Stack state0_; + mutable Stack state1_; + bool anchorBegin_; + bool anchorEnd_; +}; + +typedef GenericRegex > Regex; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_REGEX_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/stack.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/stack.h new file mode 100644 index 0000000000..022c9aab41 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/stack.h @@ -0,0 +1,230 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_STACK_H_ +#define RAPIDJSON_INTERNAL_STACK_H_ + +#include "../allocators.h" +#include "swap.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +/////////////////////////////////////////////////////////////////////////////// +// Stack + +//! A type-unsafe stack for storing different types of data. +/*! \tparam Allocator Allocator for allocating stack memory. +*/ +template +class Stack { +public: + // Optimization note: Do not allocate memory for stack_ in constructor. + // Do it lazily when first Push() -> Expand() -> Resize(). + Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) { + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Stack(Stack&& rhs) + : allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + stack_(rhs.stack_), + stackTop_(rhs.stackTop_), + stackEnd_(rhs.stackEnd_), + initialCapacity_(rhs.initialCapacity_) + { + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.stack_ = 0; + rhs.stackTop_ = 0; + rhs.stackEnd_ = 0; + rhs.initialCapacity_ = 0; + } +#endif + + ~Stack() { + Destroy(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Stack& operator=(Stack&& rhs) { + if (&rhs != this) + { + Destroy(); + + allocator_ = rhs.allocator_; + ownAllocator_ = rhs.ownAllocator_; + stack_ = rhs.stack_; + stackTop_ = rhs.stackTop_; + stackEnd_ = rhs.stackEnd_; + initialCapacity_ = rhs.initialCapacity_; + + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.stack_ = 0; + rhs.stackTop_ = 0; + rhs.stackEnd_ = 0; + rhs.initialCapacity_ = 0; + } + return *this; + } +#endif + + void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT { + internal::Swap(allocator_, rhs.allocator_); + internal::Swap(ownAllocator_, rhs.ownAllocator_); + internal::Swap(stack_, rhs.stack_); + internal::Swap(stackTop_, rhs.stackTop_); + internal::Swap(stackEnd_, rhs.stackEnd_); + internal::Swap(initialCapacity_, rhs.initialCapacity_); + } + + void Clear() { stackTop_ = stack_; } + + void ShrinkToFit() { + if (Empty()) { + // If the stack is empty, completely deallocate the memory. + Allocator::Free(stack_); + stack_ = 0; + stackTop_ = 0; + stackEnd_ = 0; + } + else + Resize(GetSize()); + } + + // Optimization note: try to minimize the size of this function for force inline. + // Expansion is run very infrequently, so it is moved to another (probably non-inline) function. + template + RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) { + // Expand the stack if needed + if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_)) + Expand(count); + } + + template + RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) { + Reserve(count); + return PushUnsafe(count); + } + + template + RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) { + RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_); + T* ret = reinterpret_cast(stackTop_); + stackTop_ += sizeof(T) * count; + return ret; + } + + template + T* Pop(size_t count) { + RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T)); + stackTop_ -= count * sizeof(T); + return reinterpret_cast(stackTop_); + } + + template + T* Top() { + RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); + return reinterpret_cast(stackTop_ - sizeof(T)); + } + + template + const T* Top() const { + RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); + return reinterpret_cast(stackTop_ - sizeof(T)); + } + + template + T* End() { return reinterpret_cast(stackTop_); } + + template + const T* End() const { return reinterpret_cast(stackTop_); } + + template + T* Bottom() { return reinterpret_cast(stack_); } + + template + const T* Bottom() const { return reinterpret_cast(stack_); } + + bool HasAllocator() const { + return allocator_ != 0; + } + + Allocator& GetAllocator() { + RAPIDJSON_ASSERT(allocator_); + return *allocator_; + } + + bool Empty() const { return stackTop_ == stack_; } + size_t GetSize() const { return static_cast(stackTop_ - stack_); } + size_t GetCapacity() const { return static_cast(stackEnd_ - stack_); } + +private: + template + void Expand(size_t count) { + // Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity. + size_t newCapacity; + if (stack_ == 0) { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + newCapacity = initialCapacity_; + } else { + newCapacity = GetCapacity(); + newCapacity += (newCapacity + 1) / 2; + } + size_t newSize = GetSize() + sizeof(T) * count; + if (newCapacity < newSize) + newCapacity = newSize; + + Resize(newCapacity); + } + + void Resize(size_t newCapacity) { + const size_t size = GetSize(); // Backup the current size + stack_ = static_cast(allocator_->Realloc(stack_, GetCapacity(), newCapacity)); + stackTop_ = stack_ + size; + stackEnd_ = stack_ + newCapacity; + } + + void Destroy() { + Allocator::Free(stack_); + RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack + } + + // Prohibit copy constructor & assignment operator. + Stack(const Stack&); + Stack& operator=(const Stack&); + + Allocator* allocator_; + Allocator* ownAllocator_; + char *stack_; + char *stackTop_; + char *stackEnd_; + size_t initialCapacity_; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_STACK_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/strfunc.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/strfunc.h new file mode 100644 index 0000000000..2edfae5267 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/strfunc.h @@ -0,0 +1,55 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_ +#define RAPIDJSON_INTERNAL_STRFUNC_H_ + +#include "../stream.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Custom strlen() which works on different character types. +/*! \tparam Ch Character type (e.g. char, wchar_t, short) + \param s Null-terminated input string. + \return Number of characters in the string. + \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints. +*/ +template +inline SizeType StrLen(const Ch* s) { + const Ch* p = s; + while (*p) ++p; + return SizeType(p - s); +} + +//! Returns number of code points in a encoded string. +template +bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) { + GenericStringStream is(s); + const typename Encoding::Ch* end = s + length; + SizeType count = 0; + while (is.src_ < end) { + unsigned codepoint; + if (!Encoding::Decode(is, &codepoint)) + return false; + count++; + } + *outCount = count; + return true; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_INTERNAL_STRFUNC_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/strtod.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/strtod.h new file mode 100644 index 0000000000..289c413b07 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/strtod.h @@ -0,0 +1,269 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_STRTOD_ +#define RAPIDJSON_STRTOD_ + +#include "ieee754.h" +#include "biginteger.h" +#include "diyfp.h" +#include "pow10.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline double FastPath(double significand, int exp) { + if (exp < -308) + return 0.0; + else if (exp >= 0) + return significand * internal::Pow10(exp); + else + return significand / internal::Pow10(-exp); +} + +inline double StrtodNormalPrecision(double d, int p) { + if (p < -308) { + // Prevent expSum < -308, making Pow10(p) = 0 + d = FastPath(d, -308); + d = FastPath(d, p + 308); + } + else + d = FastPath(d, p); + return d; +} + +template +inline T Min3(T a, T b, T c) { + T m = a; + if (m > b) m = b; + if (m > c) m = c; + return m; +} + +inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) { + const Double db(b); + const uint64_t bInt = db.IntegerSignificand(); + const int bExp = db.IntegerExponent(); + const int hExp = bExp - 1; + + int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0; + + // Adjust for decimal exponent + if (dExp >= 0) { + dS_Exp2 += dExp; + dS_Exp5 += dExp; + } + else { + bS_Exp2 -= dExp; + bS_Exp5 -= dExp; + hS_Exp2 -= dExp; + hS_Exp5 -= dExp; + } + + // Adjust for binary exponent + if (bExp >= 0) + bS_Exp2 += bExp; + else { + dS_Exp2 -= bExp; + hS_Exp2 -= bExp; + } + + // Adjust for half ulp exponent + if (hExp >= 0) + hS_Exp2 += hExp; + else { + dS_Exp2 -= hExp; + bS_Exp2 -= hExp; + } + + // Remove common power of two factor from all three scaled values + int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2); + dS_Exp2 -= common_Exp2; + bS_Exp2 -= common_Exp2; + hS_Exp2 -= common_Exp2; + + BigInteger dS = d; + dS.MultiplyPow5(static_cast(dS_Exp5)) <<= static_cast(dS_Exp2); + + BigInteger bS(bInt); + bS.MultiplyPow5(static_cast(bS_Exp5)) <<= static_cast(bS_Exp2); + + BigInteger hS(1); + hS.MultiplyPow5(static_cast(hS_Exp5)) <<= static_cast(hS_Exp2); + + BigInteger delta(0); + dS.Difference(bS, &delta); + + return delta.Compare(hS); +} + +inline bool StrtodFast(double d, int p, double* result) { + // Use fast path for string-to-double conversion if possible + // see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ + if (p > 22 && p < 22 + 16) { + // Fast Path Cases In Disguise + d *= internal::Pow10(p - 22); + p = 22; + } + + if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1 + *result = FastPath(d, p); + return true; + } + else + return false; +} + +// Compute an approximation and see if it is within 1/2 ULP +inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) { + uint64_t significand = 0; + size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999 + for (; i < length; i++) { + if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || + (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5')) + break; + significand = significand * 10u + static_cast(decimals[i] - '0'); + } + + if (i < length && decimals[i] >= '5') // Rounding + significand++; + + size_t remaining = length - i; + const unsigned kUlpShift = 3; + const unsigned kUlp = 1 << kUlpShift; + int64_t error = (remaining == 0) ? 0 : kUlp / 2; + + DiyFp v(significand, 0); + v = v.Normalize(); + error <<= -v.e; + + const int dExp = static_cast(decimalPosition) - static_cast(i) + exp; + + int actualExp; + DiyFp cachedPower = GetCachedPower10(dExp, &actualExp); + if (actualExp != dExp) { + static const DiyFp kPow10[] = { + DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1 + DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2 + DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3 + DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4 + DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5 + DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6 + DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7 + }; + int adjustment = dExp - actualExp - 1; + RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7); + v = v * kPow10[adjustment]; + if (length + static_cast(adjustment)> 19u) // has more digits than decimal digits in 64-bit + error += kUlp / 2; + } + + v = v * cachedPower; + + error += kUlp + (error == 0 ? 0 : 1); + + const int oldExp = v.e; + v = v.Normalize(); + error <<= oldExp - v.e; + + const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e); + unsigned precisionSize = 64 - effectiveSignificandSize; + if (precisionSize + kUlpShift >= 64) { + unsigned scaleExp = (precisionSize + kUlpShift) - 63; + v.f >>= scaleExp; + v.e += scaleExp; + error = (error >> scaleExp) + 1 + static_cast(kUlp); + precisionSize -= scaleExp; + } + + DiyFp rounded(v.f >> precisionSize, v.e + static_cast(precisionSize)); + const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp; + const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp; + if (precisionBits >= halfWay + static_cast(error)) { + rounded.f++; + if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340) + rounded.f >>= 1; + rounded.e++; + } + } + + *result = rounded.ToDouble(); + + return halfWay - static_cast(error) >= precisionBits || precisionBits >= halfWay + static_cast(error); +} + +inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) { + const BigInteger dInt(decimals, length); + const int dExp = static_cast(decimalPosition) - static_cast(length) + exp; + Double a(approx); + int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp); + if (cmp < 0) + return a.Value(); // within half ULP + else if (cmp == 0) { + // Round towards even + if (a.Significand() & 1) + return a.NextPositiveDouble(); + else + return a.Value(); + } + else // adjustment + return a.NextPositiveDouble(); +} + +inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) { + RAPIDJSON_ASSERT(d >= 0.0); + RAPIDJSON_ASSERT(length >= 1); + + double result; + if (StrtodFast(d, p, &result)) + return result; + + // Trim leading zeros + while (*decimals == '0' && length > 1) { + length--; + decimals++; + decimalPosition--; + } + + // Trim trailing zeros + while (decimals[length - 1] == '0' && length > 1) { + length--; + decimalPosition--; + exp++; + } + + // Trim right-most digits + const int kMaxDecimalDigit = 780; + if (static_cast(length) > kMaxDecimalDigit) { + int delta = (static_cast(length) - kMaxDecimalDigit); + exp += delta; + decimalPosition -= static_cast(delta); + length = kMaxDecimalDigit; + } + + // If too small, underflow to zero + if (int(length) + exp < -324) + return 0.0; + + if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result)) + return result; + + // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison + return StrtodBigInteger(result, decimals, length, decimalPosition, exp); +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_STRTOD_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/internal/swap.h b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/swap.h new file mode 100644 index 0000000000..666e49f97b --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/internal/swap.h @@ -0,0 +1,46 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_SWAP_H_ +#define RAPIDJSON_INTERNAL_SWAP_H_ + +#include "../rapidjson.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Custom swap() to avoid dependency on C++ header +/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only. + \note This has the same semantics as std::swap(). +*/ +template +inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT { + T tmp = a; + a = b; + b = tmp; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_SWAP_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/istreamwrapper.h b/sql-odbc/libraries/rapidjson/include/rapidjson/istreamwrapper.h new file mode 100644 index 0000000000..f5fe28977e --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/istreamwrapper.h @@ -0,0 +1,115 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ISTREAMWRAPPER_H_ +#define RAPIDJSON_ISTREAMWRAPPER_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept. +/*! + The classes can be wrapped including but not limited to: + + - \c std::istringstream + - \c std::stringstream + - \c std::wistringstream + - \c std::wstringstream + - \c std::ifstream + - \c std::fstream + - \c std::wifstream + - \c std::wfstream + + \tparam StreamType Class derived from \c std::basic_istream. +*/ + +template +class BasicIStreamWrapper { +public: + typedef typename StreamType::char_type Ch; + BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {} + + Ch Peek() const { + typename StreamType::int_type c = stream_.peek(); + return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast(c) : '\0'; + } + + Ch Take() { + typename StreamType::int_type c = stream_.get(); + if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) { + count_++; + return static_cast(c); + } + else + return '\0'; + } + + // tellg() may return -1 when failed. So we count by ourself. + size_t Tell() const { return count_; } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream. + int i; + bool hasError = false; + for (i = 0; i < 4; ++i) { + typename StreamType::int_type c = stream_.get(); + if (c == StreamType::traits_type::eof()) { + hasError = true; + stream_.clear(); + break; + } + peekBuffer_[i] = static_cast(c); + } + for (--i; i >= 0; --i) + stream_.putback(peekBuffer_[i]); + return !hasError ? peekBuffer_ : 0; + } + +private: + BasicIStreamWrapper(const BasicIStreamWrapper&); + BasicIStreamWrapper& operator=(const BasicIStreamWrapper&); + + StreamType& stream_; + size_t count_; //!< Number of characters read. Note: + mutable Ch peekBuffer_[4]; +}; + +typedef BasicIStreamWrapper IStreamWrapper; +typedef BasicIStreamWrapper WIStreamWrapper; + +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ISTREAMWRAPPER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/memorybuffer.h b/sql-odbc/libraries/rapidjson/include/rapidjson/memorybuffer.h new file mode 100644 index 0000000000..39bee1dec1 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/memorybuffer.h @@ -0,0 +1,70 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_MEMORYBUFFER_H_ +#define RAPIDJSON_MEMORYBUFFER_H_ + +#include "stream.h" +#include "internal/stack.h" + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory output byte stream. +/*! + This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream. + + It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file. + + Differences between MemoryBuffer and StringBuffer: + 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer. + 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator. + + \tparam Allocator type for allocating memory buffer. + \note implements Stream concept +*/ +template +struct GenericMemoryBuffer { + typedef char Ch; // byte + + GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} + + void Put(Ch c) { *stack_.template Push() = c; } + void Flush() {} + + void Clear() { stack_.Clear(); } + void ShrinkToFit() { stack_.ShrinkToFit(); } + Ch* Push(size_t count) { return stack_.template Push(count); } + void Pop(size_t count) { stack_.template Pop(count); } + + const Ch* GetBuffer() const { + return stack_.template Bottom(); + } + + size_t GetSize() const { return stack_.GetSize(); } + + static const size_t kDefaultCapacity = 256; + mutable internal::Stack stack_; +}; + +typedef GenericMemoryBuffer<> MemoryBuffer; + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) { + std::memset(memoryBuffer.stack_.Push(n), c, n * sizeof(c)); +} + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/memorystream.h b/sql-odbc/libraries/rapidjson/include/rapidjson/memorystream.h new file mode 100644 index 0000000000..1d71d8a4f0 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/memorystream.h @@ -0,0 +1,71 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_MEMORYSTREAM_H_ +#define RAPIDJSON_MEMORYSTREAM_H_ + +#include "stream.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(missing-noreturn) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory input byte stream. +/*! + This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream. + + It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file. + + Differences between MemoryStream and StringStream: + 1. StringStream has encoding but MemoryStream is a byte stream. + 2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source. + 3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4(). + \note implements Stream concept +*/ +struct MemoryStream { + typedef char Ch; // byte + + MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {} + + Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; } + Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; } + size_t Tell() const { return static_cast(src_ - begin_); } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return Tell() + 4 <= size_ ? src_ : 0; + } + + const Ch* src_; //!< Current read position. + const Ch* begin_; //!< Original head of the string. + const Ch* end_; //!< End of stream. + size_t size_; //!< Size of the stream. +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/msinttypes/inttypes.h b/sql-odbc/libraries/rapidjson/include/rapidjson/msinttypes/inttypes.h new file mode 100644 index 0000000000..18111286bf --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/msinttypes/inttypes.h @@ -0,0 +1,316 @@ +// ISO C9x compliant inttypes.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +// The above software in this distribution may have been modified by +// THL A29 Limited ("Tencent Modifications"). +// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_INTTYPES_H_ // [ +#define _MSC_INTTYPES_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include "stdint.h" + +// miloyip: VC supports inttypes.h since VC2013 +#if _MSC_VER >= 1800 +#include +#else + +// 7.8 Format conversion of integer types + +typedef struct { + intmax_t quot; + intmax_t rem; +} imaxdiv_t; + +// 7.8.1 Macros for format specifiers + +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 + +// The fprintf macros for signed integers are: +#define PRId8 "d" +#define PRIi8 "i" +#define PRIdLEAST8 "d" +#define PRIiLEAST8 "i" +#define PRIdFAST8 "d" +#define PRIiFAST8 "i" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIdLEAST16 "hd" +#define PRIiLEAST16 "hi" +#define PRIdFAST16 "hd" +#define PRIiFAST16 "hi" + +#define PRId32 "I32d" +#define PRIi32 "I32i" +#define PRIdLEAST32 "I32d" +#define PRIiLEAST32 "I32i" +#define PRIdFAST32 "I32d" +#define PRIiFAST32 "I32i" + +#define PRId64 "I64d" +#define PRIi64 "I64i" +#define PRIdLEAST64 "I64d" +#define PRIiLEAST64 "I64i" +#define PRIdFAST64 "I64d" +#define PRIiFAST64 "I64i" + +#define PRIdMAX "I64d" +#define PRIiMAX "I64i" + +#define PRIdPTR "Id" +#define PRIiPTR "Ii" + +// The fprintf macros for unsigned integers are: +#define PRIo8 "o" +#define PRIu8 "u" +#define PRIx8 "x" +#define PRIX8 "X" +#define PRIoLEAST8 "o" +#define PRIuLEAST8 "u" +#define PRIxLEAST8 "x" +#define PRIXLEAST8 "X" +#define PRIoFAST8 "o" +#define PRIuFAST8 "u" +#define PRIxFAST8 "x" +#define PRIXFAST8 "X" + +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" +#define PRIoLEAST16 "ho" +#define PRIuLEAST16 "hu" +#define PRIxLEAST16 "hx" +#define PRIXLEAST16 "hX" +#define PRIoFAST16 "ho" +#define PRIuFAST16 "hu" +#define PRIxFAST16 "hx" +#define PRIXFAST16 "hX" + +#define PRIo32 "I32o" +#define PRIu32 "I32u" +#define PRIx32 "I32x" +#define PRIX32 "I32X" +#define PRIoLEAST32 "I32o" +#define PRIuLEAST32 "I32u" +#define PRIxLEAST32 "I32x" +#define PRIXLEAST32 "I32X" +#define PRIoFAST32 "I32o" +#define PRIuFAST32 "I32u" +#define PRIxFAST32 "I32x" +#define PRIXFAST32 "I32X" + +#define PRIo64 "I64o" +#define PRIu64 "I64u" +#define PRIx64 "I64x" +#define PRIX64 "I64X" +#define PRIoLEAST64 "I64o" +#define PRIuLEAST64 "I64u" +#define PRIxLEAST64 "I64x" +#define PRIXLEAST64 "I64X" +#define PRIoFAST64 "I64o" +#define PRIuFAST64 "I64u" +#define PRIxFAST64 "I64x" +#define PRIXFAST64 "I64X" + +#define PRIoMAX "I64o" +#define PRIuMAX "I64u" +#define PRIxMAX "I64x" +#define PRIXMAX "I64X" + +#define PRIoPTR "Io" +#define PRIuPTR "Iu" +#define PRIxPTR "Ix" +#define PRIXPTR "IX" + +// The fscanf macros for signed integers are: +#define SCNd8 "d" +#define SCNi8 "i" +#define SCNdLEAST8 "d" +#define SCNiLEAST8 "i" +#define SCNdFAST8 "d" +#define SCNiFAST8 "i" + +#define SCNd16 "hd" +#define SCNi16 "hi" +#define SCNdLEAST16 "hd" +#define SCNiLEAST16 "hi" +#define SCNdFAST16 "hd" +#define SCNiFAST16 "hi" + +#define SCNd32 "ld" +#define SCNi32 "li" +#define SCNdLEAST32 "ld" +#define SCNiLEAST32 "li" +#define SCNdFAST32 "ld" +#define SCNiFAST32 "li" + +#define SCNd64 "I64d" +#define SCNi64 "I64i" +#define SCNdLEAST64 "I64d" +#define SCNiLEAST64 "I64i" +#define SCNdFAST64 "I64d" +#define SCNiFAST64 "I64i" + +#define SCNdMAX "I64d" +#define SCNiMAX "I64i" + +#ifdef _WIN64 // [ +# define SCNdPTR "I64d" +# define SCNiPTR "I64i" +#else // _WIN64 ][ +# define SCNdPTR "ld" +# define SCNiPTR "li" +#endif // _WIN64 ] + +// The fscanf macros for unsigned integers are: +#define SCNo8 "o" +#define SCNu8 "u" +#define SCNx8 "x" +#define SCNX8 "X" +#define SCNoLEAST8 "o" +#define SCNuLEAST8 "u" +#define SCNxLEAST8 "x" +#define SCNXLEAST8 "X" +#define SCNoFAST8 "o" +#define SCNuFAST8 "u" +#define SCNxFAST8 "x" +#define SCNXFAST8 "X" + +#define SCNo16 "ho" +#define SCNu16 "hu" +#define SCNx16 "hx" +#define SCNX16 "hX" +#define SCNoLEAST16 "ho" +#define SCNuLEAST16 "hu" +#define SCNxLEAST16 "hx" +#define SCNXLEAST16 "hX" +#define SCNoFAST16 "ho" +#define SCNuFAST16 "hu" +#define SCNxFAST16 "hx" +#define SCNXFAST16 "hX" + +#define SCNo32 "lo" +#define SCNu32 "lu" +#define SCNx32 "lx" +#define SCNX32 "lX" +#define SCNoLEAST32 "lo" +#define SCNuLEAST32 "lu" +#define SCNxLEAST32 "lx" +#define SCNXLEAST32 "lX" +#define SCNoFAST32 "lo" +#define SCNuFAST32 "lu" +#define SCNxFAST32 "lx" +#define SCNXFAST32 "lX" + +#define SCNo64 "I64o" +#define SCNu64 "I64u" +#define SCNx64 "I64x" +#define SCNX64 "I64X" +#define SCNoLEAST64 "I64o" +#define SCNuLEAST64 "I64u" +#define SCNxLEAST64 "I64x" +#define SCNXLEAST64 "I64X" +#define SCNoFAST64 "I64o" +#define SCNuFAST64 "I64u" +#define SCNxFAST64 "I64x" +#define SCNXFAST64 "I64X" + +#define SCNoMAX "I64o" +#define SCNuMAX "I64u" +#define SCNxMAX "I64x" +#define SCNXMAX "I64X" + +#ifdef _WIN64 // [ +# define SCNoPTR "I64o" +# define SCNuPTR "I64u" +# define SCNxPTR "I64x" +# define SCNXPTR "I64X" +#else // _WIN64 ][ +# define SCNoPTR "lo" +# define SCNuPTR "lu" +# define SCNxPTR "lx" +# define SCNXPTR "lX" +#endif // _WIN64 ] + +#endif // __STDC_FORMAT_MACROS ] + +// 7.8.2 Functions for greatest-width integer types + +// 7.8.2.1 The imaxabs function +#define imaxabs _abs64 + +// 7.8.2.2 The imaxdiv function + +// This is modified version of div() function from Microsoft's div.c found +// in %MSVC.NET%\crt\src\div.c +#ifdef STATIC_IMAXDIV // [ +static +#else // STATIC_IMAXDIV ][ +_inline +#endif // STATIC_IMAXDIV ] +imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) +{ + imaxdiv_t result; + + result.quot = numer / denom; + result.rem = numer % denom; + + if (numer < 0 && result.rem > 0) { + // did division wrong; must fix up + ++result.quot; + result.rem -= denom; + } + + return result; +} + +// 7.8.2.3 The strtoimax and strtoumax functions +#define strtoimax _strtoi64 +#define strtoumax _strtoui64 + +// 7.8.2.4 The wcstoimax and wcstoumax functions +#define wcstoimax _wcstoi64 +#define wcstoumax _wcstoui64 + +#endif // _MSC_VER >= 1800 + +#endif // _MSC_INTTYPES_H_ ] diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/msinttypes/stdint.h b/sql-odbc/libraries/rapidjson/include/rapidjson/msinttypes/stdint.h new file mode 100644 index 0000000000..3d4477b9a0 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/msinttypes/stdint.h @@ -0,0 +1,300 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +// The above software in this distribution may have been modified by +// THL A29 Limited ("Tencent Modifications"). +// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010. +#if _MSC_VER >= 1600 // [ +#include + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +#undef INT8_C +#undef INT16_C +#undef INT32_C +#undef INT64_C +#undef UINT8_C +#undef UINT16_C +#undef UINT32_C +#undef UINT64_C + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#else // ] _MSC_VER >= 1700 [ + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we have to wrap include with 'extern "C++" {}' +// or compiler would give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#if defined(__cplusplus) && !defined(_M_ARM) +extern "C" { +#endif +# include +#if defined(__cplusplus) && !defined(_M_ARM) +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#endif // _MSC_VER >= 1600 ] + +#endif // _MSC_STDINT_H_ ] diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/ostreamwrapper.h b/sql-odbc/libraries/rapidjson/include/rapidjson/ostreamwrapper.h new file mode 100644 index 0000000000..6f4667c08a --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/ostreamwrapper.h @@ -0,0 +1,81 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_OSTREAMWRAPPER_H_ +#define RAPIDJSON_OSTREAMWRAPPER_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept. +/*! + The classes can be wrapped including but not limited to: + + - \c std::ostringstream + - \c std::stringstream + - \c std::wpstringstream + - \c std::wstringstream + - \c std::ifstream + - \c std::fstream + - \c std::wofstream + - \c std::wfstream + + \tparam StreamType Class derived from \c std::basic_ostream. +*/ + +template +class BasicOStreamWrapper { +public: + typedef typename StreamType::char_type Ch; + BasicOStreamWrapper(StreamType& stream) : stream_(stream) {} + + void Put(Ch c) { + stream_.put(c); + } + + void Flush() { + stream_.flush(); + } + + // Not implemented + char Peek() const { RAPIDJSON_ASSERT(false); return 0; } + char Take() { RAPIDJSON_ASSERT(false); return 0; } + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + BasicOStreamWrapper(const BasicOStreamWrapper&); + BasicOStreamWrapper& operator=(const BasicOStreamWrapper&); + + StreamType& stream_; +}; + +typedef BasicOStreamWrapper OStreamWrapper; +typedef BasicOStreamWrapper WOStreamWrapper; + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_OSTREAMWRAPPER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/pointer.h b/sql-odbc/libraries/rapidjson/include/rapidjson/pointer.h new file mode 100644 index 0000000000..c680c70506 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/pointer.h @@ -0,0 +1,1367 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// clang-format off + +#ifndef RAPIDJSON_POINTER_H_ +#define RAPIDJSON_POINTER_H_ + +#include "document.h" +#include "internal/itoa.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(switch-enum) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token + +//! Error code of parsing. +/*! \ingroup RAPIDJSON_ERRORS + \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode +*/ +enum PointerParseErrorCode { + kPointerParseErrorNone = 0, //!< The parse is successful + + kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/' + kPointerParseErrorInvalidEscape, //!< Invalid escape + kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment + kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericPointer + +//! Represents a JSON Pointer. Use Pointer for UTF8 encoding and default allocator. +/*! + This class implements RFC 6901 "JavaScript Object Notation (JSON) Pointer" + (https://tools.ietf.org/html/rfc6901). + + A JSON pointer is for identifying a specific value in a JSON document + (GenericDocument). It can simplify coding of DOM tree manipulation, because it + can access multiple-level depth of DOM tree with single API call. + + After it parses a string representation (e.g. "/foo/0" or URI fragment + representation (e.g. "#/foo/0") into its internal representation (tokens), + it can be used to resolve a specific value in multiple documents, or sub-tree + of documents. + + Contrary to GenericValue, Pointer can be copy constructed and copy assigned. + Apart from assignment, a Pointer cannot be modified after construction. + + Although Pointer is very convenient, please aware that constructing Pointer + involves parsing and dynamic memory allocation. A special constructor with user- + supplied tokens eliminates these. + + GenericPointer depends on GenericDocument and GenericValue. + + \tparam ValueType The value type of the DOM tree. E.g. GenericValue > + \tparam Allocator The allocator type for allocating memory for internal representation. + + \note GenericPointer uses same encoding of ValueType. + However, Allocator of GenericPointer is independent of Allocator of Value. +*/ +template +class GenericPointer { +public: + typedef typename ValueType::EncodingType EncodingType; //!< Encoding type from Value + typedef typename ValueType::Ch Ch; //!< Character type from Value + + //! A token is the basic units of internal representation. + /*! + A JSON pointer string representation "/foo/123" is parsed to two tokens: + "foo" and 123. 123 will be represented in both numeric form and string form. + They are resolved according to the actual value type (object or array). + + For token that are not numbers, or the numeric value is out of bound + (greater than limits of SizeType), they are only treated as string form + (i.e. the token's index will be equal to kPointerInvalidIndex). + + This struct is public so that user can create a Pointer without parsing and + allocation, using a special constructor. + */ + struct Token { + const Ch* name; //!< Name of the token. It has null character at the end but it can contain null character. + SizeType length; //!< Length of the name. + SizeType index; //!< A valid array index, if it is not equal to kPointerInvalidIndex. + }; + + //!@name Constructors and destructor. + //@{ + + //! Default constructor. + GenericPointer(Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} + + //! Constructor that parses a string or URI fragment representation. + /*! + \param source A null-terminated, string or URI fragment representation of JSON pointer. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + */ + explicit GenericPointer(const Ch* source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source, internal::StrLen(source)); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Constructor that parses a string or URI fragment representation. + /*! + \param source A string or URI fragment representation of JSON pointer. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + explicit GenericPointer(const std::basic_string& source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source.c_str(), source.size()); + } +#endif + + //! Constructor that parses a string or URI fragment representation, with length of the source string. + /*! + \param source A string or URI fragment representation of JSON pointer. + \param length Length of source. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + \note Slightly faster than the overload without length. + */ + GenericPointer(const Ch* source, size_t length, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source, length); + } + + //! Constructor with user-supplied tokens. + /*! + This constructor let user supplies const array of tokens. + This prevents the parsing process and eliminates allocation. + This is preferred for memory constrained environments. + + \param tokens An constant array of tokens representing the JSON pointer. + \param tokenCount Number of tokens. + + \b Example + \code + #define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex } + #define INDEX(i) { #i, sizeof(#i) - 1, i } + + static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) }; + static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0])); + // Equivalent to static const Pointer p("/foo/123"); + + #undef NAME + #undef INDEX + \endcode + */ + GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} + + //! Copy constructor. + GenericPointer(const GenericPointer& rhs, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + *this = rhs; + } + + //! Destructor. + ~GenericPointer() { + if (nameBuffer_) // If user-supplied tokens constructor is used, nameBuffer_ is nullptr and tokens_ are not deallocated. + Allocator::Free(tokens_); + RAPIDJSON_DELETE(ownAllocator_); + } + + //! Assignment operator. + GenericPointer& operator=(const GenericPointer& rhs) { + if (this != &rhs) { + // Do not delete ownAllcator + if (nameBuffer_) + Allocator::Free(tokens_); + + tokenCount_ = rhs.tokenCount_; + parseErrorOffset_ = rhs.parseErrorOffset_; + parseErrorCode_ = rhs.parseErrorCode_; + + if (rhs.nameBuffer_) + CopyFromRaw(rhs); // Normally parsed tokens. + else { + tokens_ = rhs.tokens_; // User supplied const tokens. + nameBuffer_ = 0; + } + } + return *this; + } + + //@} + + //!@name Append token + //@{ + + //! Append a token and return a new Pointer + /*! + \param token Token to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const Token& token, Allocator* allocator = 0) const { + GenericPointer r; + r.allocator_ = allocator; + Ch *p = r.CopyFromRaw(*this, 1, token.length + 1); + std::memcpy(p, token.name, (token.length + 1) * sizeof(Ch)); + r.tokens_[tokenCount_].name = p; + r.tokens_[tokenCount_].length = token.length; + r.tokens_[tokenCount_].index = token.index; + return r; + } + + //! Append a name token with length, and return a new Pointer + /*! + \param name Name to be appended. + \param length Length of name. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const Ch* name, SizeType length, Allocator* allocator = 0) const { + Token token = { name, length, kPointerInvalidIndex }; + return Append(token, allocator); + } + + //! Append a name token without length, and return a new Pointer + /*! + \param name Name (const Ch*) to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >), (GenericPointer)) + Append(T* name, Allocator* allocator = 0) const { + return Append(name, StrLen(name), allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Append a name token, and return a new Pointer + /*! + \param name Name to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const std::basic_string& name, Allocator* allocator = 0) const { + return Append(name.c_str(), static_cast(name.size()), allocator); + } +#endif + + //! Append a index token, and return a new Pointer + /*! + \param index Index to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(SizeType index, Allocator* allocator = 0) const { + char buffer[21]; + char* end = sizeof(SizeType) == 4 ? internal::u32toa(index, buffer) : internal::u64toa(index, buffer); + SizeType length = static_cast(end - buffer); + buffer[length] = '\0'; + + #ifdef WIN32 + #pragma warning(push) + #pragma warning(disable: 4127) // Conditional Expression is Constant + #endif // WIN32 + if (sizeof(Ch) == 1) { + #ifdef WIN32 + #pragma warning(pop) + #endif // WIN32 + Token token = { reinterpret_cast(buffer), length, index }; + return Append(token, allocator); + } + else { + Ch name[21]; + for (size_t i = 0; i <= length; i++) + name[i] = buffer[i]; + Token token = { name, length, index }; + return Append(token, allocator); + } + } + + //! Append a token by value, and return a new Pointer + /*! + \param token token to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const ValueType& token, Allocator* allocator = 0) const { + if (token.IsString()) + return Append(token.GetString(), token.GetStringLength(), allocator); + else { + RAPIDJSON_ASSERT(token.IsUint64()); + RAPIDJSON_ASSERT(token.GetUint64() <= SizeType(~0)); + return Append(static_cast(token.GetUint64()), allocator); + } + } + + //!@name Handling Parse Error + //@{ + + //! Check whether this is a valid pointer. + bool IsValid() const { return parseErrorCode_ == kPointerParseErrorNone; } + + //! Get the parsing error offset in code unit. + size_t GetParseErrorOffset() const { return parseErrorOffset_; } + + //! Get the parsing error code. + PointerParseErrorCode GetParseErrorCode() const { return parseErrorCode_; } + + //@} + + //! Get the allocator of this pointer. + Allocator& GetAllocator() { return *allocator_; } + + //!@name Tokens + //@{ + + //! Get the token array (const version only). + const Token* GetTokens() const { return tokens_; } + + //! Get the number of tokens. + size_t GetTokenCount() const { return tokenCount_; } + + //@} + + //!@name Equality/inequality operators + //@{ + + //! Equality operator. + /*! + \note When any pointers are invalid, always returns false. + */ + bool operator==(const GenericPointer& rhs) const { + if (!IsValid() || !rhs.IsValid() || tokenCount_ != rhs.tokenCount_) + return false; + + for (size_t i = 0; i < tokenCount_; i++) { + if (tokens_[i].index != rhs.tokens_[i].index || + tokens_[i].length != rhs.tokens_[i].length || + (tokens_[i].length != 0 && std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch)* tokens_[i].length) != 0)) + { + return false; + } + } + + return true; + } + + //! Inequality operator. + /*! + \note When any pointers are invalid, always returns true. + */ + bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); } + + //@} + + //!@name Stringify + //@{ + + //! Stringify the pointer into string representation. + /*! + \tparam OutputStream Type of output stream. + \param os The output stream. + */ + template + bool Stringify(OutputStream& os) const { + return Stringify(os); + } + + //! Stringify the pointer into URI fragment representation. + /*! + \tparam OutputStream Type of output stream. + \param os The output stream. + */ + template + bool StringifyUriFragment(OutputStream& os) const { + return Stringify(os); + } + + //@} + + //!@name Create value + //@{ + + //! Create a value in a subtree. + /*! + If the value is not exist, it creates all parent values and a JSON Null value. + So it always succeed and return the newly created or existing value. + + Remind that it may change types of parents according to tokens, so it + potentially removes previously stored values. For example, if a document + was an array, and "/foo" is used to create a value, then the document + will be changed to an object, and all existing array elements are lost. + + \param root Root value of a DOM subtree to be resolved. It can be any value other than document root. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \param alreadyExist If non-null, it stores whether the resolved value is already exist. + \return The resolved newly created (a JSON Null value), or already exists value. + */ + ValueType& Create(ValueType& root, typename ValueType::AllocatorType& allocator, bool* alreadyExist = 0) const { + RAPIDJSON_ASSERT(IsValid()); + ValueType* v = &root; + bool exist = true; + for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + if (v->IsArray() && t->name[0] == '-' && t->length == 1) { + v->PushBack(ValueType().Move(), allocator); + v = &((*v)[v->Size() - 1]); + exist = false; + } + else { + if (t->index == kPointerInvalidIndex) { // must be object name + if (!v->IsObject()) + v->SetObject(); // Change to Object + } + else { // object name or array index + if (!v->IsArray() && !v->IsObject()) + v->SetArray(); // Change to Array + } + + if (v->IsArray()) { + if (t->index >= v->Size()) { + v->Reserve(t->index + 1, allocator); + while (t->index >= v->Size()) + v->PushBack(ValueType().Move(), allocator); + exist = false; + } + v = &((*v)[t->index]); + } + else { + typename ValueType::MemberIterator m = v->FindMember(GenericStringRef(t->name, t->length)); + if (m == v->MemberEnd()) { + v->AddMember(ValueType(t->name, t->length, allocator).Move(), ValueType().Move(), allocator); + v = &(--v->MemberEnd())->value; // Assumes AddMember() appends at the end + exist = false; + } + else + v = &m->value; + } + } + } + + if (alreadyExist) + *alreadyExist = exist; + + return *v; + } + + //! Creates a value in a document. + /*! + \param document A document to be resolved. + \param alreadyExist If non-null, it stores whether the resolved value is already exist. + \return The resolved newly created, or already exists value. + */ + template + ValueType& Create(GenericDocument& document, bool* alreadyExist = 0) const { + return Create(document, document.GetAllocator(), alreadyExist); + } + + //@} + + //!@name Query value + //@{ + + //! Query a value in a subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param unresolvedTokenIndex If the pointer cannot resolve a token in the pointer, this parameter can obtain the index of unresolved token. + \return Pointer to the value if it can be resolved. Otherwise null. + + \note + There are only 3 situations when a value cannot be resolved: + 1. A value in the path is not an array nor object. + 2. An object value does not contain the token. + 3. A token is out of range of an array value. + + Use unresolvedTokenIndex to retrieve the token index. + */ + ValueType* Get(ValueType& root, size_t* unresolvedTokenIndex = 0) const { + RAPIDJSON_ASSERT(IsValid()); + ValueType* v = &root; + for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + switch (v->GetType()) { + case kObjectType: + { + typename ValueType::MemberIterator m = v->FindMember(GenericStringRef(t->name, t->length)); + if (m == v->MemberEnd()) + break; + v = &m->value; + } + continue; + case kArrayType: + if (t->index == kPointerInvalidIndex || t->index >= v->Size()) + break; + v = &((*v)[t->index]); + continue; + default: + break; + } + + // Error: unresolved token + if (unresolvedTokenIndex) + *unresolvedTokenIndex = static_cast(t - tokens_); + return 0; + } + return v; + } + + //! Query a const value in a const subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \return Pointer to the value if it can be resolved. Otherwise null. + */ + const ValueType* Get(const ValueType& root, size_t* unresolvedTokenIndex = 0) const { + return Get(const_cast(root), unresolvedTokenIndex); + } + + //@} + + //!@name Query a value with default + //@{ + + //! Query a value in a subtree with default value. + /*! + Similar to Get(), but if the specified value do not exists, it creates all parents and clone the default value. + So that this function always succeed. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param defaultValue Default value to be cloned if the value was not exists. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + Value& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.CopyFrom(defaultValue, allocator); + } + + //! Query a value in a subtree with default null-terminated string. + ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + Value& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.SetString(defaultValue, allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Query a value in a subtree with default std::basic_string. + ValueType& GetWithDefault(ValueType& root, const std::basic_string& defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + Value& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.SetString(defaultValue, allocator); + } +#endif + + //! Query a value in a subtree with default primitive value. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + GetWithDefault(ValueType& root, T defaultValue, typename ValueType::AllocatorType& allocator) const { + return GetWithDefault(root, ValueType(defaultValue).Move(), allocator); + } + + //! Query a value in a document with default value. + template + ValueType& GetWithDefault(GenericDocument& document, const ValueType& defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + + //! Query a value in a document with default null-terminated string. + template + ValueType& GetWithDefault(GenericDocument& document, const Ch* defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Query a value in a document with default std::basic_string. + template + ValueType& GetWithDefault(GenericDocument& document, const std::basic_string& defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } +#endif + + //! Query a value in a document with default primitive value. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + GetWithDefault(GenericDocument& document, T defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + + //@} + + //!@name Set a value + //@{ + + //! Set a value in a subtree, with move semantics. + /*! + It creates all parents if they are not exist or types are different to the tokens. + So this function always succeeds but potentially remove existing values. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param value Value to be set. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& Set(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = value; + } + + //! Set a value in a subtree, with copy semantics. + ValueType& Set(ValueType& root, const ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator).CopyFrom(value, allocator); + } + + //! Set a null-terminated string in a subtree. + ValueType& Set(ValueType& root, const Ch* value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value, allocator).Move(); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Set a std::basic_string in a subtree. + ValueType& Set(ValueType& root, const std::basic_string& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value, allocator).Move(); + } +#endif + + //! Set a primitive value in a subtree. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + Set(ValueType& root, T value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value).Move(); + } + + //! Set a value in a document, with move semantics. + template + ValueType& Set(GenericDocument& document, ValueType& value) const { + return Create(document) = value; + } + + //! Set a value in a document, with copy semantics. + template + ValueType& Set(GenericDocument& document, const ValueType& value) const { + return Create(document).CopyFrom(value, document.GetAllocator()); + } + + //! Set a null-terminated string in a document. + template + ValueType& Set(GenericDocument& document, const Ch* value) const { + return Create(document) = ValueType(value, document.GetAllocator()).Move(); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Sets a std::basic_string in a document. + template + ValueType& Set(GenericDocument& document, const std::basic_string& value) const { + return Create(document) = ValueType(value, document.GetAllocator()).Move(); + } +#endif + + //! Set a primitive value in a document. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + Set(GenericDocument& document, T value) const { + return Create(document) = value; + } + + //@} + + //!@name Swap a value + //@{ + + //! Swap a value with a value in a subtree. + /*! + It creates all parents if they are not exist or types are different to the tokens. + So this function always succeeds but potentially remove existing values. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param value Value to be swapped. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& Swap(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator).Swap(value); + } + + //! Swap a value with a value in a document. + template + ValueType& Swap(GenericDocument& document, ValueType& value) const { + return Create(document).Swap(value); + } + + //@} + + //! Erase a value in a subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \return Whether the resolved value is found and erased. + + \note Erasing with an empty pointer \c Pointer(""), i.e. the root, always fail and return false. + */ + bool Erase(ValueType& root) const { + RAPIDJSON_ASSERT(IsValid()); + if (tokenCount_ == 0) // Cannot erase the root + return false; + + ValueType* v = &root; + const Token* last = tokens_ + (tokenCount_ - 1); + for (const Token *t = tokens_; t != last; ++t) { + switch (v->GetType()) { + case kObjectType: + { + typename ValueType::MemberIterator m = v->FindMember(GenericStringRef(t->name, t->length)); + if (m == v->MemberEnd()) + return false; + v = &m->value; + } + break; + case kArrayType: + if (t->index == kPointerInvalidIndex || t->index >= v->Size()) + return false; + v = &((*v)[t->index]); + break; + default: + return false; + } + } + + switch (v->GetType()) { + case kObjectType: + return v->EraseMember(GenericStringRef(last->name, last->length)); + case kArrayType: + if (last->index == kPointerInvalidIndex || last->index >= v->Size()) + return false; + v->Erase(v->Begin() + last->index); + return true; + default: + return false; + } + } + +private: + //! Clone the content from rhs to this. + /*! + \param rhs Source pointer. + \param extraToken Extra tokens to be allocated. + \param extraNameBufferSize Extra name buffer size (in number of Ch) to be allocated. + \return Start of non-occupied name buffer, for storing extra names. + */ + Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) { + if (!allocator_) // allocator is independently owned. + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + + size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens + for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t) + nameBufferSize += t->length; + + tokenCount_ = rhs.tokenCount_ + extraToken; + tokens_ = static_cast(allocator_->Malloc(tokenCount_ * sizeof(Token) + (nameBufferSize + extraNameBufferSize) * sizeof(Ch))); + nameBuffer_ = reinterpret_cast(tokens_ + tokenCount_); + if (rhs.tokenCount_ > 0) { + std::memcpy(tokens_, rhs.tokens_, rhs.tokenCount_ * sizeof(Token)); + } + if (nameBufferSize > 0) { + std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch)); + } + + // Adjust pointers to name buffer + std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_; + for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t) + t->name += diff; + + return nameBuffer_ + nameBufferSize; + } + + //! Check whether a character should be percent-encoded. + /*! + According to RFC 3986 2.3 Unreserved Characters. + \param c The character (code unit) to be tested. + */ + bool NeedPercentEncode(Ch c) const { + return !((c >= '0' && c <= '9') || (c >= 'A' && c <='Z') || (c >= 'a' && c <= 'z') || c == '-' || c == '.' || c == '_' || c =='~'); + } + + //! Parse a JSON String or its URI fragment representation into tokens. +#ifndef __clang__ // -Wdocumentation + /*! + \param source Either a JSON Pointer string, or its URI fragment representation. Not need to be null terminated. + \param length Length of the source string. + \note Source cannot be JSON String Representation of JSON Pointer, e.g. In "/\u0000", \u0000 will not be unescaped. + */ +#endif + void Parse(const Ch* source, size_t length) { + RAPIDJSON_ASSERT(source != NULL); + RAPIDJSON_ASSERT(nameBuffer_ == 0); + RAPIDJSON_ASSERT(tokens_ == 0); + + // Create own allocator if user did not supply. + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + + // Count number of '/' as tokenCount + tokenCount_ = 0; + for (const Ch* s = source; s != source + length; s++) + if (*s == '/') + tokenCount_++; + + Token* token = tokens_ = static_cast(allocator_->Malloc(tokenCount_ * sizeof(Token) + length * sizeof(Ch))); + Ch* name = nameBuffer_ = reinterpret_cast(tokens_ + tokenCount_); + size_t i = 0; + + // Detect if it is a URI fragment + bool uriFragment = false; + if (source[i] == '#') { + uriFragment = true; + i++; + } + + if (i != length && source[i] != '/') { + parseErrorCode_ = kPointerParseErrorTokenMustBeginWithSolidus; + goto error; + } + + while (i < length) { + RAPIDJSON_ASSERT(source[i] == '/'); + i++; // consumes '/' + + token->name = name; + bool isNumber = true; + + while (i < length && source[i] != '/') { + Ch c = source[i]; + if (uriFragment) { + // Decoding percent-encoding for URI fragment + if (c == '%') { + PercentDecodeStream is(&source[i], source + length); + GenericInsituStringStream os(name); + Ch* begin = os.PutBegin(); + if (!Transcoder, EncodingType>().Validate(is, os) || !is.IsValid()) { + parseErrorCode_ = kPointerParseErrorInvalidPercentEncoding; + goto error; + } + size_t len = os.PutEnd(begin); + i += is.Tell() - 1; + if (len == 1) + c = *name; + else { + name += len; + isNumber = false; + i++; + continue; + } + } + else if (NeedPercentEncode(c)) { + parseErrorCode_ = kPointerParseErrorCharacterMustPercentEncode; + goto error; + } + } + + i++; + + // Escaping "~0" -> '~', "~1" -> '/' + if (c == '~') { + if (i < length) { + c = source[i]; + if (c == '0') c = '~'; + else if (c == '1') c = '/'; + else { + parseErrorCode_ = kPointerParseErrorInvalidEscape; + goto error; + } + i++; + } + else { + parseErrorCode_ = kPointerParseErrorInvalidEscape; + goto error; + } + } + + // First check for index: all of characters are digit + if (c < '0' || c > '9') + isNumber = false; + + *name++ = c; + } + token->length = static_cast(name - token->name); + if (token->length == 0) + isNumber = false; + *name++ = '\0'; // Null terminator + + // Second check for index: more than one digit cannot have leading zero + if (isNumber && token->length > 1 && token->name[0] == '0') + isNumber = false; + + // String to SizeType conversion + SizeType n = 0; + if (isNumber) { + for (size_t j = 0; j < token->length; j++) { + SizeType m = n * 10 + static_cast(token->name[j] - '0'); + if (m < n) { // overflow detection + isNumber = false; + break; + } + n = m; + } + } + + token->index = isNumber ? n : kPointerInvalidIndex; + token++; + } + + RAPIDJSON_ASSERT(name <= nameBuffer_ + length); // Should not overflow buffer + parseErrorCode_ = kPointerParseErrorNone; + return; + + error: + Allocator::Free(tokens_); + nameBuffer_ = 0; + tokens_ = 0; + tokenCount_ = 0; + parseErrorOffset_ = i; + return; + } + + //! Stringify to string or URI fragment representation. + /*! + \tparam uriFragment True for stringifying to URI fragment representation. False for string representation. + \tparam OutputStream type of output stream. + \param os The output stream. + */ + template + bool Stringify(OutputStream& os) const { + RAPIDJSON_ASSERT(IsValid()); + + if (uriFragment) + os.Put('#'); + + for (Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + os.Put('/'); + for (size_t j = 0; j < t->length; j++) { + Ch c = t->name[j]; + if (c == '~') { + os.Put('~'); + os.Put('0'); + } + else if (c == '/') { + os.Put('~'); + os.Put('1'); + } + else if (uriFragment && NeedPercentEncode(c)) { + // Transcode to UTF8 sequence + GenericStringStream source(&t->name[j]); + PercentEncodeStream target(os); + if (!Transcoder >().Validate(source, target)) + return false; + j += source.Tell() - 1; + } + else + os.Put(c); + } + } + return true; + } + + //! A helper stream for decoding a percent-encoded sequence into code unit. + /*! + This stream decodes %XY triplet into code unit (0-255). + If it encounters invalid characters, it sets output code unit as 0 and + mark invalid, and to be checked by IsValid(). + */ + class PercentDecodeStream { + public: + typedef typename ValueType::Ch Ch; + + //! Constructor + /*! + \param source Start of the stream + \param end Past-the-end of the stream. + */ + PercentDecodeStream(const Ch* source, const Ch* end) : src_(source), head_(source), end_(end), valid_(true) {} + + Ch Take() { + if (*src_ != '%' || src_ + 3 > end_) { // %XY triplet + valid_ = false; + return 0; + } + src_++; + Ch c = 0; + for (int j = 0; j < 2; j++) { + c = static_cast(c << 4); + Ch h = *src_; + if (h >= '0' && h <= '9') c = static_cast(c + h - '0'); + else if (h >= 'A' && h <= 'F') c = static_cast(c + h - 'A' + 10); + else if (h >= 'a' && h <= 'f') c = static_cast(c + h - 'a' + 10); + else { + valid_ = false; + return 0; + } + src_++; + } + return c; + } + + size_t Tell() const { return static_cast(src_ - head_); } + bool IsValid() const { return valid_; } + + private: + const Ch* src_; //!< Current read position. + const Ch* head_; //!< Original head of the string. + const Ch* end_; //!< Past-the-end position. + bool valid_; //!< Whether the parsing is valid. + }; + + //! A helper stream to encode character (UTF-8 code unit) into percent-encoded sequence. + template + class PercentEncodeStream { + public: + PercentEncodeStream(OutputStream& os) : os_(os) {} + void Put(char c) { // UTF-8 must be byte + unsigned char u = static_cast(c); + static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + os_.Put('%'); + os_.Put(hexDigits[u >> 4]); + os_.Put(hexDigits[u & 15]); + } + private: + OutputStream& os_; + }; + + Allocator* allocator_; //!< The current allocator. It is either user-supplied or equal to ownAllocator_. + Allocator* ownAllocator_; //!< Allocator owned by this Pointer. + Ch* nameBuffer_; //!< A buffer containing all names in tokens. + Token* tokens_; //!< A list of tokens. + size_t tokenCount_; //!< Number of tokens in tokens_. + size_t parseErrorOffset_; //!< Offset in code unit when parsing fail. + PointerParseErrorCode parseErrorCode_; //!< Parsing error code. +}; + +//! GenericPointer for Value (UTF-8, default allocator). +typedef GenericPointer Pointer; + +//!@name Helper functions for GenericPointer +//@{ + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& CreateValueByPointer(T& root, const GenericPointer& pointer, typename T::AllocatorType& a) { + return pointer.Create(root, a); +} + +template +typename T::ValueType& CreateValueByPointer(T& root, const CharType(&source)[N], typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Create(root, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const GenericPointer& pointer) { + return pointer.Create(document); +} + +template +typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const CharType(&source)[N]) { + return GenericPointer(source, N - 1).Create(document); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType* GetValueByPointer(T& root, const GenericPointer& pointer, size_t* unresolvedTokenIndex = 0) { + return pointer.Get(root, unresolvedTokenIndex); +} + +template +const typename T::ValueType* GetValueByPointer(const T& root, const GenericPointer& pointer, size_t* unresolvedTokenIndex = 0) { + return pointer.Get(root, unresolvedTokenIndex); +} + +template +typename T::ValueType* GetValueByPointer(T& root, const CharType (&source)[N], size_t* unresolvedTokenIndex = 0) { + return GenericPointer(source, N - 1).Get(root, unresolvedTokenIndex); +} + +template +const typename T::ValueType* GetValueByPointer(const T& root, const CharType(&source)[N], size_t* unresolvedTokenIndex = 0) { + return GenericPointer(source, N - 1).Get(root, unresolvedTokenIndex); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const typename T::Ch* defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const std::basic_string& defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, T2 defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::Ch* defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const std::basic_string& defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +GetValueByPointerWithDefault(T& root, const CharType(&source)[N], T2 defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::ValueType& defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::Ch* defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const std::basic_string& defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, T2 defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const std::basic_string& defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], T2 defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const typename T::Ch* value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const std::basic_string& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +SetValueByPointer(T& root, const GenericPointer& pointer, T2 value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::Ch* value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const std::basic_string& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +SetValueByPointer(T& root, const CharType(&source)[N], T2 value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, typename DocumentType::ValueType& value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::ValueType& value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::Ch* value) { + return pointer.Set(document, value); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const std::basic_string& value) { + return pointer.Set(document, value); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +SetValueByPointer(DocumentType& document, const GenericPointer& pointer, T2 value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const std::basic_string& value) { + return GenericPointer(source, N - 1).Set(document, value); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +SetValueByPointer(DocumentType& document, const CharType(&source)[N], T2 value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& SwapValueByPointer(T& root, const GenericPointer& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Swap(root, value, a); +} + +template +typename T::ValueType& SwapValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Swap(root, value, a); +} + +template +typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const GenericPointer& pointer, typename DocumentType::ValueType& value) { + return pointer.Swap(document, value); +} + +template +typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Swap(document, value); +} + +////////////////////////////////////////////////////////////////////////////// + +template +bool EraseValueByPointer(T& root, const GenericPointer& pointer) { + return pointer.Erase(root); +} + +template +bool EraseValueByPointer(T& root, const CharType(&source)[N]) { + return GenericPointer(source, N - 1).Erase(root); +} + +//@} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_POINTER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/prettywriter.h b/sql-odbc/libraries/rapidjson/include/rapidjson/prettywriter.h new file mode 100644 index 0000000000..0dcb0fee92 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/prettywriter.h @@ -0,0 +1,255 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_PRETTYWRITER_H_ +#define RAPIDJSON_PRETTYWRITER_H_ + +#include "writer.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Combination of PrettyWriter format flags. +/*! \see PrettyWriter::SetFormatOptions + */ +enum PrettyFormatOptions { + kFormatDefault = 0, //!< Default pretty formatting. + kFormatSingleLineArray = 1 //!< Format arrays on a single line. +}; + +//! Writer with indentation and spacing. +/*! + \tparam OutputStream Type of ouptut os. + \tparam SourceEncoding Encoding of source string. + \tparam TargetEncoding Encoding of output stream. + \tparam StackAllocator Type of allocator for allocating memory of stack. +*/ +template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> +class PrettyWriter : public Writer { +public: + typedef Writer Base; + typedef typename Base::Ch Ch; + + //! Constructor + /*! \param os Output stream. + \param allocator User supplied allocator. If it is null, it will create a private one. + \param levelDepth Initial capacity of stack. + */ + explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : + Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {} + + + explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : + Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {} + + //! Set custom indentation. + /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r'). + \param indentCharCount Number of indent characters for each indentation level. + \note The default indentation is 4 spaces. + */ + PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) { + RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r'); + indentChar_ = indentChar; + indentCharCount_ = indentCharCount; + return *this; + } + + //! Set pretty writer formatting options. + /*! \param options Formatting options. + */ + PrettyWriter& SetFormatOptions(PrettyFormatOptions options) { + formatOptions_ = options; + return *this; + } + + /*! @name Implementation of Handler + \see Handler + */ + //@{ + + bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); } + bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); } + bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); } + bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); } + bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); } + bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); } + bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); } + + bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + PrettyPrefix(kNumberType); + return Base::WriteString(str, length); + } + + bool String(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + PrettyPrefix(kStringType); + return Base::WriteString(str, length); + } + +#if RAPIDJSON_HAS_STDSTRING + bool String(const std::basic_string& str) { + return String(str.data(), SizeType(str.size())); + } +#endif + + bool StartObject() { + PrettyPrefix(kObjectType); + new (Base::level_stack_.template Push()) typename Base::Level(false); + return Base::WriteStartObject(); + } + + bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } + +#if RAPIDJSON_HAS_STDSTRING + bool Key(const std::basic_string& str) { + return Key(str.data(), SizeType(str.size())); + } +#endif + + bool EndObject(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); + RAPIDJSON_ASSERT(!Base::level_stack_.template Top()->inArray); + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + + if (!empty) { + Base::os_->Put('\n'); + WriteIndent(); + } + bool ret = Base::WriteEndObject(); + (void)ret; + RAPIDJSON_ASSERT(ret == true); + if (Base::level_stack_.Empty()) // end of json text + Base::os_->Flush(); + return true; + } + + bool StartArray() { + PrettyPrefix(kArrayType); + new (Base::level_stack_.template Push()) typename Base::Level(true); + return Base::WriteStartArray(); + } + + bool EndArray(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); + RAPIDJSON_ASSERT(Base::level_stack_.template Top()->inArray); + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + + if (!empty && !(formatOptions_ & kFormatSingleLineArray)) { + Base::os_->Put('\n'); + WriteIndent(); + } + bool ret = Base::WriteEndArray(); + (void)ret; + RAPIDJSON_ASSERT(ret == true); + if (Base::level_stack_.Empty()) // end of json text + Base::os_->Flush(); + return true; + } + + //@} + + /*! @name Convenience extensions */ + //@{ + + //! Simpler but slower overload. + bool String(const Ch* str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } + + //@} + + //! Write a raw JSON value. + /*! + For user to write a stringified JSON as a value. + + \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. + \param length Length of the json. + \param type Type of the root of json. + \note When using PrettyWriter::RawValue(), the result json may not be indented correctly. + */ + bool RawValue(const Ch* json, size_t length, Type type) { PrettyPrefix(type); return Base::WriteRawValue(json, length); } + +protected: + void PrettyPrefix(Type type) { + (void)type; + if (Base::level_stack_.GetSize() != 0) { // this value is not at root + typename Base::Level* level = Base::level_stack_.template Top(); + + if (level->inArray) { + if (level->valueCount > 0) { + Base::os_->Put(','); // add comma if it is not the first element in array + if (formatOptions_ & kFormatSingleLineArray) + Base::os_->Put(' '); + } + + if (!(formatOptions_ & kFormatSingleLineArray)) { + Base::os_->Put('\n'); + WriteIndent(); + } + } + else { // in object + if (level->valueCount > 0) { + if (level->valueCount % 2 == 0) { + Base::os_->Put(','); + Base::os_->Put('\n'); + } + else { + Base::os_->Put(':'); + Base::os_->Put(' '); + } + } + else + Base::os_->Put('\n'); + + if (level->valueCount % 2 == 0) + WriteIndent(); + } + if (!level->inArray && level->valueCount % 2 == 0) + RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name + level->valueCount++; + } + else { + RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root. + Base::hasRoot_ = true; + } + } + + void WriteIndent() { + size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_; + PutN(*Base::os_, static_cast(indentChar_), count); + } + + Ch indentChar_; + unsigned indentCharCount_; + PrettyFormatOptions formatOptions_; + +private: + // Prohibit copy constructor & assignment operator. + PrettyWriter(const PrettyWriter&); + PrettyWriter& operator=(const PrettyWriter&); +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/rapidjson.h b/sql-odbc/libraries/rapidjson/include/rapidjson/rapidjson.h new file mode 100644 index 0000000000..053b2ce43f --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/rapidjson.h @@ -0,0 +1,615 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_RAPIDJSON_H_ +#define RAPIDJSON_RAPIDJSON_H_ + +/*!\file rapidjson.h + \brief common definitions and configuration + + \see RAPIDJSON_CONFIG + */ + +/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration + \brief Configuration macros for library features + + Some RapidJSON features are configurable to adapt the library to a wide + variety of platforms, environments and usage scenarios. Most of the + features can be configured in terms of overriden or predefined + preprocessor macros at compile-time. + + Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs. + + \note These macros should be given on the compiler command-line + (where applicable) to avoid inconsistent values when compiling + different translation units of a single application. + */ + +#include // malloc(), realloc(), free(), size_t +#include // memset(), memcpy(), memmove(), memcmp() + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_VERSION_STRING +// +// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt. +// + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +// token stringification +#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x) +#define RAPIDJSON_DO_STRINGIFY(x) #x +//!@endcond + +/*! \def RAPIDJSON_MAJOR_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Major version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_MINOR_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Minor version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_PATCH_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Patch version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_VERSION_STRING + \ingroup RAPIDJSON_CONFIG + \brief Version of RapidJSON in ".." string format. +*/ +#define RAPIDJSON_MAJOR_VERSION 1 +#define RAPIDJSON_MINOR_VERSION 1 +#define RAPIDJSON_PATCH_VERSION 0 +#define RAPIDJSON_VERSION_STRING \ + RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION) + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NAMESPACE_(BEGIN|END) +/*! \def RAPIDJSON_NAMESPACE + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace + + In order to avoid symbol clashes and/or "One Definition Rule" errors + between multiple inclusions of (different versions of) RapidJSON in + a single binary, users can customize the name of the main RapidJSON + namespace. + + In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE + to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple + levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref + RAPIDJSON_NAMESPACE_END need to be defined as well: + + \code + // in some .cpp file + #define RAPIDJSON_NAMESPACE my::rapidjson + #define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson { + #define RAPIDJSON_NAMESPACE_END } } + #include "rapidjson/..." + \endcode + + \see rapidjson + */ +/*! \def RAPIDJSON_NAMESPACE_BEGIN + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace (opening expression) + \see RAPIDJSON_NAMESPACE +*/ +/*! \def RAPIDJSON_NAMESPACE_END + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace (closing expression) + \see RAPIDJSON_NAMESPACE +*/ +#ifndef RAPIDJSON_NAMESPACE +#define RAPIDJSON_NAMESPACE rapidjson +#endif +#ifndef RAPIDJSON_NAMESPACE_BEGIN +#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE { +#endif +#ifndef RAPIDJSON_NAMESPACE_END +#define RAPIDJSON_NAMESPACE_END } +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_HAS_STDSTRING + +#ifndef RAPIDJSON_HAS_STDSTRING +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation +#else +#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default +#endif +/*! \def RAPIDJSON_HAS_STDSTRING + \ingroup RAPIDJSON_CONFIG + \brief Enable RapidJSON support for \c std::string + + By defining this preprocessor symbol to \c 1, several convenience functions for using + \ref rapidjson::GenericValue with \c std::string are enabled, especially + for construction and comparison. + + \hideinitializer +*/ +#endif // !defined(RAPIDJSON_HAS_STDSTRING) + +#if RAPIDJSON_HAS_STDSTRING +#include +#endif // RAPIDJSON_HAS_STDSTRING + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NO_INT64DEFINE + +/*! \def RAPIDJSON_NO_INT64DEFINE + \ingroup RAPIDJSON_CONFIG + \brief Use external 64-bit integer types. + + RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types + to be available at global scope. + + If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to + prevent RapidJSON from defining its own types. +*/ +#ifndef RAPIDJSON_NO_INT64DEFINE +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013 +#include "msinttypes/stdint.h" +#include "msinttypes/inttypes.h" +#else +// Other compilers should have this. +#include +#include +#endif +//!@endcond +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_NO_INT64DEFINE +#endif +#endif // RAPIDJSON_NO_INT64TYPEDEF + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_FORCEINLINE + +#ifndef RAPIDJSON_FORCEINLINE +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#if defined(_MSC_VER) && defined(NDEBUG) +#define RAPIDJSON_FORCEINLINE __forceinline +#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG) +#define RAPIDJSON_FORCEINLINE __attribute__((always_inline)) +#else +#define RAPIDJSON_FORCEINLINE +#endif +//!@endcond +#endif // RAPIDJSON_FORCEINLINE + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ENDIAN +#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine +#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine + +//! Endianness of the machine. +/*! + \def RAPIDJSON_ENDIAN + \ingroup RAPIDJSON_CONFIG + + GCC 4.6 provided macro for detecting endianness of the target machine. But other + compilers may not have this. User can define RAPIDJSON_ENDIAN to either + \ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN. + + Default detection implemented with reference to + \li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html + \li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp +*/ +#ifndef RAPIDJSON_ENDIAN +// Detect with GCC 4.6's macro +# ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# else +# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# endif // __BYTE_ORDER__ +// Detect with GLIBC's endian.h +# elif defined(__GLIBC__) +# include +# if (__BYTE_ORDER == __LITTLE_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif (__BYTE_ORDER == __BIG_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# else +# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# endif // __GLIBC__ +// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro +# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +// Detect with architecture macros +# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(_MSC_VER) && defined(_M_ARM) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(RAPIDJSON_DOXYGEN_RUNNING) +# define RAPIDJSON_ENDIAN +# else +# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# endif +#endif // RAPIDJSON_ENDIAN + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_64BIT + +//! Whether using 64-bit architecture +#ifndef RAPIDJSON_64BIT +#if defined(__LP64__) || (defined(__x86_64__) && defined(__ILP32__)) || defined(_WIN64) || defined(__EMSCRIPTEN__) +#define RAPIDJSON_64BIT 1 +#else +#define RAPIDJSON_64BIT 0 +#endif +#endif // RAPIDJSON_64BIT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ALIGN + +//! Data alignment of the machine. +/*! \ingroup RAPIDJSON_CONFIG + \param x pointer to align + + Some machines require strict data alignment. Currently the default uses 4 bytes + alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms. + User can customize by defining the RAPIDJSON_ALIGN function macro. +*/ +#ifndef RAPIDJSON_ALIGN +#if RAPIDJSON_64BIT == 1 +#define RAPIDJSON_ALIGN(x) (((x) + static_cast(7u)) & ~static_cast(7u)) +#else +#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u) +#endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_UINT64_C2 + +//! Construct a 64-bit literal by a pair of 32-bit integer. +/*! + 64-bit literal with or without ULL suffix is prone to compiler warnings. + UINT64_C() is C macro which cause compilation problems. + Use this macro to define 64-bit constants by a pair of 32-bit integer. +*/ +#ifndef RAPIDJSON_UINT64_C2 +#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast(high32) << 32) | static_cast(low32)) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_48BITPOINTER_OPTIMIZATION + +//! Use only lower 48-bit address for some pointers. +/*! + \ingroup RAPIDJSON_CONFIG + + This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address. + The higher 16-bit can be used for storing other data. + \c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture. +*/ +#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION +#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) +#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1 +#else +#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0 +#endif +#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION + +#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1 +#if RAPIDJSON_64BIT != 1 +#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1 +#endif +#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast((reinterpret_cast(p) & static_cast(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast(reinterpret_cast(x)))) +#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast(reinterpret_cast(p) & static_cast(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF)))) +#else +#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x)) +#define RAPIDJSON_GETPOINTER(type, p) (p) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD + +/*! \def RAPIDJSON_SIMD + \ingroup RAPIDJSON_CONFIG + \brief Enable SSE2/SSE4.2 optimization. + + RapidJSON supports optimized implementations for some parsing operations + based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible + processors. + + To enable these optimizations, two different symbols can be defined; + \code + // Enable SSE2 optimization. + #define RAPIDJSON_SSE2 + + // Enable SSE4.2 optimization. + #define RAPIDJSON_SSE42 + \endcode + + \c RAPIDJSON_SSE42 takes precedence, if both are defined. + + If any of these symbols is defined, RapidJSON defines the macro + \c RAPIDJSON_SIMD to indicate the availability of the optimized code. +*/ +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \ + || defined(RAPIDJSON_DOXYGEN_RUNNING) +#define RAPIDJSON_SIMD +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NO_SIZETYPEDEFINE + +#ifndef RAPIDJSON_NO_SIZETYPEDEFINE +/*! \def RAPIDJSON_NO_SIZETYPEDEFINE + \ingroup RAPIDJSON_CONFIG + \brief User-provided \c SizeType definition. + + In order to avoid using 32-bit size types for indexing strings and arrays, + define this preprocessor symbol and provide the type rapidjson::SizeType + before including RapidJSON: + \code + #define RAPIDJSON_NO_SIZETYPEDEFINE + namespace rapidjson { typedef ::std::size_t SizeType; } + #include "rapidjson/..." + \endcode + + \see rapidjson::SizeType +*/ +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_NO_SIZETYPEDEFINE +#endif +RAPIDJSON_NAMESPACE_BEGIN +//! Size type (for string lengths, array sizes, etc.) +/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms, + instead of using \c size_t. Users may override the SizeType by defining + \ref RAPIDJSON_NO_SIZETYPEDEFINE. +*/ +typedef unsigned SizeType; +RAPIDJSON_NAMESPACE_END +#endif + +// always import std::size_t to rapidjson namespace +RAPIDJSON_NAMESPACE_BEGIN +using std::size_t; +RAPIDJSON_NAMESPACE_END + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ASSERT + +//! Assertion. +/*! \ingroup RAPIDJSON_CONFIG + By default, rapidjson uses C \c assert() for internal assertions. + User can override it by defining RAPIDJSON_ASSERT(x) macro. + + \note Parsing errors are handled and can be customized by the + \ref RAPIDJSON_ERRORS APIs. +*/ +#ifndef RAPIDJSON_ASSERT +#include +#define RAPIDJSON_ASSERT(x) assert(x) +#endif // RAPIDJSON_ASSERT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_STATIC_ASSERT + +// Adopt from boost +#ifndef RAPIDJSON_STATIC_ASSERT +#ifndef __clang__ +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#endif +RAPIDJSON_NAMESPACE_BEGIN +template struct STATIC_ASSERTION_FAILURE; +template <> struct STATIC_ASSERTION_FAILURE { enum { value = 1 }; }; +template struct StaticAssertTest {}; +RAPIDJSON_NAMESPACE_END + +#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) +#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) +#define RAPIDJSON_DO_JOIN2(X, Y) X##Y + +#if defined(__GNUC__) +#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) +#else +#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE +#endif +#ifndef __clang__ +//!@endcond +#endif + +/*! \def RAPIDJSON_STATIC_ASSERT + \brief (Internal) macro to check for conditions at compile-time + \param x compile-time condition + \hideinitializer + */ +#define RAPIDJSON_STATIC_ASSERT(x) \ + typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \ + sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE)> \ + RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY + +//! Compiler branching hint for expression with high probability to be true. +/*! + \ingroup RAPIDJSON_CONFIG + \param x Boolean expression likely to be true. +*/ +#ifndef RAPIDJSON_LIKELY +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define RAPIDJSON_LIKELY(x) (x) +#endif +#endif + +//! Compiler branching hint for expression with low probability to be true. +/*! + \ingroup RAPIDJSON_CONFIG + \param x Boolean expression unlikely to be true. +*/ +#ifndef RAPIDJSON_UNLIKELY +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define RAPIDJSON_UNLIKELY(x) (x) +#endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// Helpers + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN + +#define RAPIDJSON_MULTILINEMACRO_BEGIN do { +#define RAPIDJSON_MULTILINEMACRO_END \ +} while((void)0, 0) + +// adopted from Boost +#define RAPIDJSON_VERSION_CODE(x,y,z) \ + (((x)*100000) + ((y)*100) + (z)) + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF + +#if defined(__GNUC__) +#define RAPIDJSON_GNUC \ + RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) +#endif + +#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0)) + +#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x)) +#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x) +#define RAPIDJSON_DIAG_OFF(x) \ + RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x))) + +// push/pop support in Clang and GCC>=4.6 +#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) +#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) +#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) +#else // GCC >= 4.2, < 4.6 +#define RAPIDJSON_DIAG_PUSH /* ignored */ +#define RAPIDJSON_DIAG_POP /* ignored */ +#endif + +#elif defined(_MSC_VER) + +// pragma (MSVC specific) +#define RAPIDJSON_PRAGMA(x) __pragma(x) +#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x)) + +#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x) +#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) +#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) + +#else + +#define RAPIDJSON_DIAG_OFF(x) /* ignored */ +#define RAPIDJSON_DIAG_PUSH /* ignored */ +#define RAPIDJSON_DIAG_POP /* ignored */ + +#endif // RAPIDJSON_DIAG_* + +/////////////////////////////////////////////////////////////////////////////// +// C++11 features + +#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS +#if defined(__clang__) +#if __has_feature(cxx_rvalue_references) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#else +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 +#endif +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1600) + +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#else +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 +#endif +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + +#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT +#if defined(__clang__) +#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) +// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported +#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 +#else +#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0 +#endif +#endif +#if RAPIDJSON_HAS_CXX11_NOEXCEPT +#define RAPIDJSON_NOEXCEPT noexcept +#else +#define RAPIDJSON_NOEXCEPT /* noexcept */ +#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT + +// no automatic detection, yet +#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS +#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0 +#endif + +#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR +#if defined(__clang__) +#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1700) +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 +#else +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0 +#endif +#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR + +//!@endcond + +/////////////////////////////////////////////////////////////////////////////// +// new/delete + +#ifndef RAPIDJSON_NEW +///! customization point for global \c new +#define RAPIDJSON_NEW(x) new x +#endif +#ifndef RAPIDJSON_DELETE +///! customization point for global \c delete +#define RAPIDJSON_DELETE(x) delete x +#endif + +/////////////////////////////////////////////////////////////////////////////// +// Type + +/*! \namespace rapidjson + \brief main RapidJSON namespace + \see RAPIDJSON_NAMESPACE +*/ +RAPIDJSON_NAMESPACE_BEGIN + +//! Type of JSON value +enum Type { + kNullType = 0, //!< null + kFalseType = 1, //!< false + kTrueType = 2, //!< true + kObjectType = 3, //!< object + kArrayType = 4, //!< array + kStringType = 5, //!< string + kNumberType = 6 //!< number +}; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/reader.h b/sql-odbc/libraries/rapidjson/include/rapidjson/reader.h new file mode 100644 index 0000000000..19f8849b14 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/reader.h @@ -0,0 +1,1879 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_READER_H_ +#define RAPIDJSON_READER_H_ + +/*! \file reader.h */ + +#include "allocators.h" +#include "stream.h" +#include "encodedstream.h" +#include "internal/meta.h" +#include "internal/stack.h" +#include "internal/strtod.h" +#include + +#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) +#include +#pragma intrinsic(_BitScanForward) +#endif +#ifdef RAPIDJSON_SSE42 +#include +#elif defined(RAPIDJSON_SSE2) +#include +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4702) // unreachable code +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(old-style-cast) +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define RAPIDJSON_NOTHING /* deliberately empty */ +#ifndef RAPIDJSON_PARSE_ERROR_EARLY_RETURN +#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN(value) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + if (RAPIDJSON_UNLIKELY(HasParseError())) { return value; } \ + RAPIDJSON_MULTILINEMACRO_END +#endif +#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID \ + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(RAPIDJSON_NOTHING) +//!@endcond + +/*! \def RAPIDJSON_PARSE_ERROR_NORETURN + \ingroup RAPIDJSON_ERRORS + \brief Macro to indicate a parse error. + \param parseErrorCode \ref rapidjson::ParseErrorCode of the error + \param offset position of the error in JSON input (\c size_t) + + This macros can be used as a customization point for the internal + error handling mechanism of RapidJSON. + + A common usage model is to throw an exception instead of requiring the + caller to explicitly check the \ref rapidjson::GenericReader::Parse's + return value: + + \code + #define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode,offset) \ + throw ParseException(parseErrorCode, #parseErrorCode, offset) + + #include // std::runtime_error + #include "rapidjson/error/error.h" // rapidjson::ParseResult + + struct ParseException : std::runtime_error, rapidjson::ParseResult { + ParseException(rapidjson::ParseErrorCode code, const char* msg, size_t offset) + : std::runtime_error(msg), ParseResult(code, offset) {} + }; + + #include "rapidjson/reader.h" + \endcode + + \see RAPIDJSON_PARSE_ERROR, rapidjson::GenericReader::Parse + */ +#ifndef RAPIDJSON_PARSE_ERROR_NORETURN +#define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + RAPIDJSON_ASSERT(!HasParseError()); /* Error can only be assigned once */ \ + SetParseError(parseErrorCode, offset); \ + RAPIDJSON_MULTILINEMACRO_END +#endif + +/*! \def RAPIDJSON_PARSE_ERROR + \ingroup RAPIDJSON_ERRORS + \brief (Internal) macro to indicate and handle a parse error. + \param parseErrorCode \ref rapidjson::ParseErrorCode of the error + \param offset position of the error in JSON input (\c size_t) + + Invokes RAPIDJSON_PARSE_ERROR_NORETURN and stops the parsing. + + \see RAPIDJSON_PARSE_ERROR_NORETURN + \hideinitializer + */ +#ifndef RAPIDJSON_PARSE_ERROR +#define RAPIDJSON_PARSE_ERROR(parseErrorCode, offset) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset); \ + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; \ + RAPIDJSON_MULTILINEMACRO_END +#endif + +#include "error/error.h" // ParseErrorCode, ParseResult + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// ParseFlag + +/*! \def RAPIDJSON_PARSE_DEFAULT_FLAGS + \ingroup RAPIDJSON_CONFIG + \brief User-defined kParseDefaultFlags definition. + + User can define this as any \c ParseFlag combinations. +*/ +#ifndef RAPIDJSON_PARSE_DEFAULT_FLAGS +#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseNoFlags +#endif + +//! Combination of parseFlags +/*! \see Reader::Parse, Document::Parse, Document::ParseInsitu, Document::ParseStream + */ +enum ParseFlag { + kParseNoFlags = 0, //!< No flags are set. + kParseInsituFlag = 1, //!< In-situ(destructive) parsing. + kParseValidateEncodingFlag = 2, //!< Validate encoding of JSON strings. + kParseIterativeFlag = 4, //!< Iterative(constant complexity in terms of function call stack size) parsing. + kParseStopWhenDoneFlag = 8, //!< After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate kParseErrorDocumentRootNotSingular error. + kParseFullPrecisionFlag = 16, //!< Parse number in full precision (but slower). + kParseCommentsFlag = 32, //!< Allow one-line (//) and multi-line (/**/) comments. + kParseNumbersAsStringsFlag = 64, //!< Parse all numbers (ints/doubles) as strings. + kParseTrailingCommasFlag = 128, //!< Allow trailing commas at the end of objects and arrays. + kParseNanAndInfFlag = 256, //!< Allow parsing NaN, Inf, Infinity, -Inf and -Infinity as doubles. + kParseDefaultFlags = RAPIDJSON_PARSE_DEFAULT_FLAGS //!< Default parse flags. Can be customized by defining RAPIDJSON_PARSE_DEFAULT_FLAGS +}; + +/////////////////////////////////////////////////////////////////////////////// +// Handler + +/*! \class rapidjson::Handler + \brief Concept for receiving events from GenericReader upon parsing. + The functions return true if no error occurs. If they return false, + the event publisher should terminate the process. +\code +concept Handler { + typename Ch; + + bool Null(); + bool Bool(bool b); + bool Int(int i); + bool Uint(unsigned i); + bool Int64(int64_t i); + bool Uint64(uint64_t i); + bool Double(double d); + /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) + bool RawNumber(const Ch* str, SizeType length, bool copy); + bool String(const Ch* str, SizeType length, bool copy); + bool StartObject(); + bool Key(const Ch* str, SizeType length, bool copy); + bool EndObject(SizeType memberCount); + bool StartArray(); + bool EndArray(SizeType elementCount); +}; +\endcode +*/ +/////////////////////////////////////////////////////////////////////////////// +// BaseReaderHandler + +//! Default implementation of Handler. +/*! This can be used as base class of any reader handler. + \note implements Handler concept +*/ +template, typename Derived = void> +struct BaseReaderHandler { + typedef typename Encoding::Ch Ch; + + typedef typename internal::SelectIf, BaseReaderHandler, Derived>::Type Override; + + bool Default() { return true; } + bool Null() { return static_cast(*this).Default(); } + bool Bool(bool) { return static_cast(*this).Default(); } + bool Int(int) { return static_cast(*this).Default(); } + bool Uint(unsigned) { return static_cast(*this).Default(); } + bool Int64(int64_t) { return static_cast(*this).Default(); } + bool Uint64(uint64_t) { return static_cast(*this).Default(); } + bool Double(double) { return static_cast(*this).Default(); } + /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) + bool RawNumber(const Ch* str, SizeType len, bool copy) { return static_cast(*this).String(str, len, copy); } + bool String(const Ch*, SizeType, bool) { return static_cast(*this).Default(); } + bool StartObject() { return static_cast(*this).Default(); } + bool Key(const Ch* str, SizeType len, bool copy) { return static_cast(*this).String(str, len, copy); } + bool EndObject(SizeType) { return static_cast(*this).Default(); } + bool StartArray() { return static_cast(*this).Default(); } + bool EndArray(SizeType) { return static_cast(*this).Default(); } +}; + +/////////////////////////////////////////////////////////////////////////////// +// StreamLocalCopy + +namespace internal { + +template::copyOptimization> +class StreamLocalCopy; + +//! Do copy optimization. +template +class StreamLocalCopy { +public: + StreamLocalCopy(Stream& original) : s(original), original_(original) {} + ~StreamLocalCopy() { original_ = s; } + + Stream s; + +private: + StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; + + Stream& original_; +}; + +//! Keep reference. +template +class StreamLocalCopy { +public: + StreamLocalCopy(Stream& original) : s(original) {} + + Stream& s; + +private: + StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; +}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// SkipWhitespace + +//! Skip the JSON white spaces in a stream. +/*! \param is A input stream for skipping white spaces. + \note This function has SSE2/SSE4.2 specialization. +*/ +template +void SkipWhitespace(InputStream& is) { + internal::StreamLocalCopy copy(is); + InputStream& s(copy.s); + + typename InputStream::Ch c; + while ((c = s.Peek()) == ' ' || c == '\n' || c == '\r' || c == '\t') + s.Take(); +} + +inline const char* SkipWhitespace(const char* p, const char* end) { + while (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + return p; +} + +#ifdef RAPIDJSON_SSE42 +//! Skip whitespace with SSE 4.2 pcmpistrm instruction, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // The rest of string using SIMD + static const char whitespace[16] = " \n\r\t"; + const __m128i w = _mm_loadu_si128(reinterpret_cast(&whitespace[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); + if (r != 0) { // some of characters is non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + // The middle of string using SIMD + static const char whitespace[16] = " \n\r\t"; + const __m128i w = _mm_loadu_si128(reinterpret_cast(&whitespace[0])); + + for (; p <= end - 16; p += 16) { + const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); + const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); + if (r != 0) { // some of characters is non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } + + return SkipWhitespace(p, end); +} + +#elif defined(RAPIDJSON_SSE2) + +//! Skip whitespace with SSE2 instructions, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // The rest of string + #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } + static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; + #undef C16 + + const __m128i w0 = _mm_loadu_si128(reinterpret_cast(&whitespaces[0][0])); + const __m128i w1 = _mm_loadu_si128(reinterpret_cast(&whitespaces[1][0])); + const __m128i w2 = _mm_loadu_si128(reinterpret_cast(&whitespaces[2][0])); + const __m128i w3 = _mm_loadu_si128(reinterpret_cast(&whitespaces[3][0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + __m128i x = _mm_cmpeq_epi8(s, w0); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); + unsigned short r = static_cast(~_mm_movemask_epi8(x)); + if (r != 0) { // some of characters may be non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + // The rest of string + #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } + static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; + #undef C16 + + const __m128i w0 = _mm_loadu_si128(reinterpret_cast(&whitespaces[0][0])); + const __m128i w1 = _mm_loadu_si128(reinterpret_cast(&whitespaces[1][0])); + const __m128i w2 = _mm_loadu_si128(reinterpret_cast(&whitespaces[2][0])); + const __m128i w3 = _mm_loadu_si128(reinterpret_cast(&whitespaces[3][0])); + + for (; p <= end - 16; p += 16) { + const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); + __m128i x = _mm_cmpeq_epi8(s, w0); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); + unsigned short r = static_cast(~_mm_movemask_epi8(x)); + if (r != 0) { // some of characters may be non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } + + return SkipWhitespace(p, end); +} + +#endif // RAPIDJSON_SSE2 + +#ifdef RAPIDJSON_SIMD +//! Template function specialization for InsituStringStream +template<> inline void SkipWhitespace(InsituStringStream& is) { + is.src_ = const_cast(SkipWhitespace_SIMD(is.src_)); +} + +//! Template function specialization for StringStream +template<> inline void SkipWhitespace(StringStream& is) { + is.src_ = SkipWhitespace_SIMD(is.src_); +} + +template<> inline void SkipWhitespace(EncodedInputStream, MemoryStream>& is) { + is.is_.src_ = SkipWhitespace_SIMD(is.is_.src_, is.is_.end_); +} +#endif // RAPIDJSON_SIMD + +/////////////////////////////////////////////////////////////////////////////// +// GenericReader + +//! SAX-style JSON parser. Use \ref Reader for UTF8 encoding and default allocator. +/*! GenericReader parses JSON text from a stream, and send events synchronously to an + object implementing Handler concept. + + It needs to allocate a stack for storing a single decoded string during + non-destructive parsing. + + For in-situ parsing, the decoded string is directly written to the source + text string, no temporary buffer is required. + + A GenericReader object can be reused for parsing multiple JSON text. + + \tparam SourceEncoding Encoding of the input stream. + \tparam TargetEncoding Encoding of the parse output. + \tparam StackAllocator Allocator type for stack. +*/ +template +class GenericReader { +public: + typedef typename SourceEncoding::Ch Ch; //!< SourceEncoding character type + + //! Constructor. + /*! \param stackAllocator Optional allocator for allocating stack memory. (Only use for non-destructive parsing) + \param stackCapacity stack capacity in bytes for storing a single decoded string. (Only use for non-destructive parsing) + */ + GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) : stack_(stackAllocator, stackCapacity), parseResult_() {} + + //! Parse JSON text. + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept. + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + ParseResult Parse(InputStream& is, Handler& handler) { + if (parseFlags & kParseIterativeFlag) + return IterativeParse(is, handler); + + parseResult_.Clear(); + + ClearStackOnExit scope(*this); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentEmpty, is.Tell()); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + else { + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (!(parseFlags & kParseStopWhenDoneFlag)) { + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (RAPIDJSON_UNLIKELY(is.Peek() != '\0')) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentRootNotSingular, is.Tell()); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + } + } + + return parseResult_; + } + + //! Parse JSON text (with \ref kParseDefaultFlags) + /*! \tparam InputStream Type of input stream, implementing Stream concept + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + ParseResult Parse(InputStream& is, Handler& handler) { + return Parse(is, handler); + } + + //! Whether a parse error has occured in the last parsing. + bool HasParseError() const { return parseResult_.IsError(); } + + //! Get the \ref ParseErrorCode of last parsing. + ParseErrorCode GetParseErrorCode() const { return parseResult_.Code(); } + + //! Get the position of last parsing error in input, 0 otherwise. + size_t GetErrorOffset() const { return parseResult_.Offset(); } + +protected: + void SetParseError(ParseErrorCode code, size_t offset) { parseResult_.Set(code, offset); } + +private: + // Prohibit copy constructor & assignment operator. + GenericReader(const GenericReader&); + GenericReader& operator=(const GenericReader&); + + void ClearStack() { stack_.Clear(); } + + // clear stack on any exit from ParseStream, e.g. due to exception + struct ClearStackOnExit { + explicit ClearStackOnExit(GenericReader& r) : r_(r) {} + ~ClearStackOnExit() { r_.ClearStack(); } + private: + GenericReader& r_; + ClearStackOnExit(const ClearStackOnExit&); + ClearStackOnExit& operator=(const ClearStackOnExit&); + }; + + template + void SkipWhitespaceAndComments(InputStream& is) { + SkipWhitespace(is); + + if (parseFlags & kParseCommentsFlag) { + while (RAPIDJSON_UNLIKELY(Consume(is, '/'))) { + if (Consume(is, '*')) { + while (true) { + if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) + RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); + else if (Consume(is, '*')) { + if (Consume(is, '/')) + break; + } + else + is.Take(); + } + } + else if (RAPIDJSON_LIKELY(Consume(is, '/'))) + while (is.Peek() != '\0' && is.Take() != '\n'); + else + RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); + + SkipWhitespace(is); + } + } + } + + // Parse object: { string : value, ... } + template + void ParseObject(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == '{'); + is.Take(); // Skip '{' + + if (RAPIDJSON_UNLIKELY(!handler.StartObject())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, '}')) { + if (RAPIDJSON_UNLIKELY(!handler.EndObject(0))) // empty object + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + + for (SizeType memberCount = 0;;) { + if (RAPIDJSON_UNLIKELY(is.Peek() != '"')) + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); + + ParseString(is, handler, true); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (RAPIDJSON_UNLIKELY(!Consume(is, ':'))) + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ++memberCount; + + switch (is.Peek()) { + case ',': + is.Take(); + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + break; + case '}': + is.Take(); + if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + default: + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); break; // This useless break is only for making warning and coverage happy + } + + if (parseFlags & kParseTrailingCommasFlag) { + if (is.Peek() == '}') { + if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + is.Take(); + return; + } + } + } + } + + // Parse array: [ value, ... ] + template + void ParseArray(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == '['); + is.Take(); // Skip '[' + + if (RAPIDJSON_UNLIKELY(!handler.StartArray())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, ']')) { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(0))) // empty array + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + + for (SizeType elementCount = 0;;) { + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ++elementCount; + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, ',')) { + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + } + else if (Consume(is, ']')) { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); + + if (parseFlags & kParseTrailingCommasFlag) { + if (is.Peek() == ']') { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + is.Take(); + return; + } + } + } + } + + template + void ParseNull(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 'n'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'u') && Consume(is, 'l') && Consume(is, 'l'))) { + if (RAPIDJSON_UNLIKELY(!handler.Null())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + void ParseTrue(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 't'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'r') && Consume(is, 'u') && Consume(is, 'e'))) { + if (RAPIDJSON_UNLIKELY(!handler.Bool(true))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + void ParseFalse(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 'f'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'a') && Consume(is, 'l') && Consume(is, 's') && Consume(is, 'e'))) { + if (RAPIDJSON_UNLIKELY(!handler.Bool(false))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + RAPIDJSON_FORCEINLINE static bool Consume(InputStream& is, typename InputStream::Ch expect) { + if (RAPIDJSON_LIKELY(is.Peek() == expect)) { + is.Take(); + return true; + } + else + return false; + } + + // Helper function to parse four hexidecimal digits in \uXXXX in ParseString(). + template + unsigned ParseHex4(InputStream& is, size_t escapeOffset) { + unsigned codepoint = 0; + for (int i = 0; i < 4; i++) { + Ch c = is.Peek(); + codepoint <<= 4; + codepoint += static_cast(c); + if (c >= '0' && c <= '9') + codepoint -= '0'; + else if (c >= 'A' && c <= 'F') + codepoint -= 'A' - 10; + else if (c >= 'a' && c <= 'f') + codepoint -= 'a' - 10; + else { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorStringUnicodeEscapeInvalidHex, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(0); + } + is.Take(); + } + return codepoint; + } + + template + class StackStream { + public: + typedef CharType Ch; + + StackStream(internal::Stack& stack) : stack_(stack), length_(0) {} + RAPIDJSON_FORCEINLINE void Put(Ch c) { + *stack_.template Push() = c; + ++length_; + } + + RAPIDJSON_FORCEINLINE void* Push(SizeType count) { + length_ += count; + return stack_.template Push(count); + } + + size_t Length() const { return length_; } + + Ch* Pop() { + return stack_.template Pop(length_); + } + + private: + StackStream(const StackStream&); + StackStream& operator=(const StackStream&); + + internal::Stack& stack_; + SizeType length_; + }; + + // Parse string and generate String event. Different code paths for kParseInsituFlag. + template + void ParseString(InputStream& is, Handler& handler, bool isKey = false) { + internal::StreamLocalCopy copy(is); + InputStream& s(copy.s); + + RAPIDJSON_ASSERT(s.Peek() == '\"'); + s.Take(); // Skip '\"' + + bool success = false; + if (parseFlags & kParseInsituFlag) { + typename InputStream::Ch *head = s.PutBegin(); + ParseStringToStream(s, s); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + size_t length = s.PutEnd(head) - 1; + RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); + const typename TargetEncoding::Ch* const str = reinterpret_cast(head); + success = (isKey ? handler.Key(str, SizeType(length), false) : handler.String(str, SizeType(length), false)); + } + else { + StackStream stackStream(stack_); + ParseStringToStream(s, stackStream); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + SizeType length = static_cast(stackStream.Length()) - 1; + const typename TargetEncoding::Ch* const str = stackStream.Pop(); + success = (isKey ? handler.Key(str, length, true) : handler.String(str, length, true)); + } + if (RAPIDJSON_UNLIKELY(!success)) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, s.Tell()); + } + + // Parse string to an output is + // This function handles the prefix/suffix double quotes, escaping, and optional encoding validation. + template + RAPIDJSON_FORCEINLINE void ParseStringToStream(InputStream& is, OutputStream& os) { +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + static const char escape[256] = { + Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'/', + Z16, Z16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, + 0, 0,'\b', 0, 0, 0,'\f', 0, 0, 0, 0, 0, 0, 0,'\n', 0, + 0, 0,'\r', 0,'\t', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 + }; +#undef Z16 +//!@endcond + + for (;;) { + // Scan and copy string before "\\\"" or < 0x20. This is an optional optimzation. + if (!(parseFlags & kParseValidateEncodingFlag)) + ScanCopyUnescapedString(is, os); + + Ch c = is.Peek(); + if (RAPIDJSON_UNLIKELY(c == '\\')) { // Escape + size_t escapeOffset = is.Tell(); // For invalid escaping, report the inital '\\' as error offset + is.Take(); + Ch e = is.Peek(); + if ((sizeof(Ch) == 1 || unsigned(e) < 256) && RAPIDJSON_LIKELY(escape[static_cast(e)])) { + is.Take(); + os.Put(static_cast(escape[static_cast(e)])); + } + else if (RAPIDJSON_LIKELY(e == 'u')) { // Unicode + is.Take(); + unsigned codepoint = ParseHex4(is, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDBFF)) { + // Handle UTF-16 surrogate pair + if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u'))) + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + unsigned codepoint2 = ParseHex4(is, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF)) + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000; + } + TEncoding::Encode(os, codepoint); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, escapeOffset); + } + else if (RAPIDJSON_UNLIKELY(c == '"')) { // Closing double quote + is.Take(); + os.Put('\0'); // null-terminate the string + return; + } + else if (RAPIDJSON_UNLIKELY(static_cast(c) < 0x20)) { // RFC 4627: unescaped = %x20-21 / %x23-5B / %x5D-10FFFF + if (c == '\0') + RAPIDJSON_PARSE_ERROR(kParseErrorStringMissQuotationMark, is.Tell()); + else + RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, is.Tell()); + } + else { + size_t offset = is.Tell(); + if (RAPIDJSON_UNLIKELY((parseFlags & kParseValidateEncodingFlag ? + !Transcoder::Validate(is, os) : + !Transcoder::Transcode(is, os)))) + RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, offset); + } + } + } + + template + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InputStream&, OutputStream&) { + // Do nothing for generic version + } + +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) + // StringStream -> StackStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream& os) { + const char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + return; + } + else + os.Put(*p++); + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + SizeType length; + #ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; + #else + length = static_cast(__builtin_ffs(r) - 1); + #endif + char* q = reinterpret_cast(os.Push(length)); + for (size_t i = 0; i < length; i++) + q[i] = p[i]; + + p += length; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(os.Push(16)), s); + } + + is.src_ = p; + } + + // InsituStringStream -> InsituStringStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) { + RAPIDJSON_ASSERT(&is == &os); + (void)os; + + if (is.src_ == is.dst_) { + SkipUnescapedString(is); + return; + } + + char* p = is.src_; + char *q = is.dst_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + is.dst_ = q; + return; + } + else + *q++ = *p++; + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16, q += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + size_t length; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; +#else + length = static_cast(__builtin_ffs(r) - 1); +#endif + for (const char* pend = p + length; p != pend; ) + *q++ = *p++; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(q), s); + } + + is.src_ = p; + is.dst_ = q; + } + + // When read/write pointers are the same for insitu stream, just skip unescaped characters + static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) { + RAPIDJSON_ASSERT(is.src_ == is.dst_); + char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + for (; p != nextAligned; p++) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = is.dst_ = p; + return; + } + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + size_t length; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; +#else + length = static_cast(__builtin_ffs(r) - 1); +#endif + p += length; + break; + } + } + + is.src_ = is.dst_ = p; + } +#endif + + template + class NumberStream; + + template + class NumberStream { + public: + typedef typename InputStream::Ch Ch; + + NumberStream(GenericReader& reader, InputStream& s) : is(s) { (void)reader; } + ~NumberStream() {} + + RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); } + RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); } + RAPIDJSON_FORCEINLINE Ch Take() { return is.Take(); } + RAPIDJSON_FORCEINLINE void Push(char) {} + + size_t Tell() { return is.Tell(); } + size_t Length() { return 0; } + const char* Pop() { return 0; } + + protected: + NumberStream& operator=(const NumberStream&); + + InputStream& is; + }; + + template + class NumberStream : public NumberStream { + typedef NumberStream Base; + public: + NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {} + ~NumberStream() {} + + RAPIDJSON_FORCEINLINE Ch TakePush() { + stackStream.Put(static_cast(Base::is.Peek())); + return Base::is.Take(); + } + + RAPIDJSON_FORCEINLINE void Push(char c) { + stackStream.Put(c); + } + + size_t Length() { return stackStream.Length(); } + + const char* Pop() { + stackStream.Put('\0'); + return stackStream.Pop(); + } + + private: + StackStream stackStream; + }; + + template + class NumberStream : public NumberStream { + typedef NumberStream Base; + public: + NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {} + ~NumberStream() {} + + RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); } + }; + + template + void ParseNumber(InputStream& is, Handler& handler) { + internal::StreamLocalCopy copy(is); + NumberStream s(*this, copy.s); + + size_t startOffset = s.Tell(); + double d = 0.0; + bool useNanOrInf = false; + + // Parse minus + bool minus = Consume(s, '-'); + + // Parse int: zero / ( digit1-9 *DIGIT ) + unsigned i = 0; + uint64_t i64 = 0; + bool use64bit = false; + int significandDigit = 0; + if (RAPIDJSON_UNLIKELY(s.Peek() == '0')) { + i = 0; + s.TakePush(); + } + else if (RAPIDJSON_LIKELY(s.Peek() >= '1' && s.Peek() <= '9')) { + i = static_cast(s.TakePush() - '0'); + + if (minus) + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i >= 214748364)) { // 2^31 = 2147483648 + if (RAPIDJSON_LIKELY(i != 214748364 || s.Peek() > '8')) { + i64 = i; + use64bit = true; + break; + } + } + i = i * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + else + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i >= 429496729)) { // 2^32 - 1 = 4294967295 + if (RAPIDJSON_LIKELY(i != 429496729 || s.Peek() > '5')) { + i64 = i; + use64bit = true; + break; + } + } + i = i * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + } + // Parse NaN or Infinity here + else if ((parseFlags & kParseNanAndInfFlag) && RAPIDJSON_LIKELY((s.Peek() == 'I' || s.Peek() == 'N'))) { + useNanOrInf = true; + if (RAPIDJSON_LIKELY(Consume(s, 'N') && Consume(s, 'a') && Consume(s, 'N'))) { + d = std::numeric_limits::quiet_NaN(); + } + else if (RAPIDJSON_LIKELY(Consume(s, 'I') && Consume(s, 'n') && Consume(s, 'f'))) { + d = (minus ? -std::numeric_limits::infinity() : std::numeric_limits::infinity()); + if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n') + && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + + // Parse 64bit int + bool useDouble = false; + if (use64bit) { + if (minus) + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC))) // 2^63 = 9223372036854775808 + if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC) || s.Peek() > '8')) { + d = static_cast(i64); + useDouble = true; + break; + } + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + else + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x19999999, 0x99999999))) // 2^64 - 1 = 18446744073709551615 + if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || s.Peek() > '5')) { + d = static_cast(i64); + useDouble = true; + break; + } + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + } + + // Force double for big integer + if (useDouble) { + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(d >= 1.7976931348623157e307)) // DBL_MAX / 10.0 + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + d = d * 10 + (s.TakePush() - '0'); + } + } + + // Parse frac = decimal-point 1*DIGIT + int expFrac = 0; + size_t decimalPosition; + if (Consume(s, '.')) { + decimalPosition = s.Length(); + + if (RAPIDJSON_UNLIKELY(!(s.Peek() >= '0' && s.Peek() <= '9'))) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissFraction, s.Tell()); + + if (!useDouble) { +#if RAPIDJSON_64BIT + // Use i64 to store significand in 64-bit architecture + if (!use64bit) + i64 = i; + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (i64 > RAPIDJSON_UINT64_C2(0x1FFFFF, 0xFFFFFFFF)) // 2^53 - 1 for fast path + break; + else { + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + --expFrac; + if (i64 != 0) + significandDigit++; + } + } + + d = static_cast(i64); +#else + // Use double to store significand in 32-bit architecture + d = static_cast(use64bit ? i64 : i); +#endif + useDouble = true; + } + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (significandDigit < 17) { + d = d * 10.0 + (s.TakePush() - '0'); + --expFrac; + if (RAPIDJSON_LIKELY(d > 0.0)) + significandDigit++; + } + else + s.TakePush(); + } + } + else + decimalPosition = s.Length(); // decimal position at the end of integer. + + // Parse exp = e [ minus / plus ] 1*DIGIT + int exp = 0; + if (Consume(s, 'e') || Consume(s, 'E')) { + if (!useDouble) { + d = static_cast(use64bit ? i64 : i); + useDouble = true; + } + + bool expMinus = false; + if (Consume(s, '+')) + ; + else if (Consume(s, '-')) + expMinus = true; + + if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = static_cast(s.Take() - '0'); + if (expMinus) { + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = exp * 10 + static_cast(s.Take() - '0'); + if (exp >= 214748364) { // Issue #313: prevent overflow exponent + while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9')) // Consume the rest of exponent + s.Take(); + } + } + } + else { // positive exp + int maxExp = 308 - expFrac; + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = exp * 10 + static_cast(s.Take() - '0'); + if (RAPIDJSON_UNLIKELY(exp > maxExp)) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + } + } + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissExponent, s.Tell()); + + if (expMinus) + exp = -exp; + } + + // Finish parsing, call event according to the type of number. + bool cont = true; + + if (parseFlags & kParseNumbersAsStringsFlag) { + if (parseFlags & kParseInsituFlag) { + s.Pop(); // Pop stack no matter if it will be used or not. + typename InputStream::Ch* head = is.PutBegin(); + const size_t length = s.Tell() - startOffset; + RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); + // unable to insert the \0 character here, it will erase the comma after this number + const typename TargetEncoding::Ch* const str = reinterpret_cast(head); + cont = handler.RawNumber(str, SizeType(length), false); + } + else { + SizeType numCharsToCopy = static_cast(s.Length()); + StringStream srcStream(s.Pop()); + StackStream dstStream(stack_); + while (numCharsToCopy--) { + Transcoder, TargetEncoding>::Transcode(srcStream, dstStream); + } + dstStream.Put('\0'); + const typename TargetEncoding::Ch* str = dstStream.Pop(); + const SizeType length = static_cast(dstStream.Length()) - 1; + cont = handler.RawNumber(str, SizeType(length), true); + } + } + else { + size_t length = s.Length(); + const char* decimal = s.Pop(); // Pop stack no matter if it will be used or not. + + if (useDouble) { + int p = exp + expFrac; + if (parseFlags & kParseFullPrecisionFlag) + d = internal::StrtodFullPrecision(d, p, decimal, length, decimalPosition, exp); + else + d = internal::StrtodNormalPrecision(d, p); + + cont = handler.Double(minus ? -d : d); + } + else if (useNanOrInf) { + cont = handler.Double(d); + } + else { + if (use64bit) { + if (minus) + cont = handler.Int64(static_cast(~i64 + 1)); + else + cont = handler.Uint64(i64); + } + else { + if (minus) + cont = handler.Int(static_cast(~i + 1)); + else + cont = handler.Uint(i); + } + } + } + if (RAPIDJSON_UNLIKELY(!cont)) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, startOffset); + } + + // Parse any JSON value + template + void ParseValue(InputStream& is, Handler& handler) { + switch (is.Peek()) { + case 'n': ParseNull (is, handler); break; + case 't': ParseTrue (is, handler); break; + case 'f': ParseFalse (is, handler); break; + case '"': ParseString(is, handler); break; + case '{': ParseObject(is, handler); break; + case '[': ParseArray (is, handler); break; + default : + ParseNumber(is, handler); + break; + + } + } + + // Iterative Parsing + + // States + enum IterativeParsingState { + IterativeParsingStartState = 0, + IterativeParsingFinishState, + IterativeParsingErrorState, + + // Object states + IterativeParsingObjectInitialState, + IterativeParsingMemberKeyState, + IterativeParsingKeyValueDelimiterState, + IterativeParsingMemberValueState, + IterativeParsingMemberDelimiterState, + IterativeParsingObjectFinishState, + + // Array states + IterativeParsingArrayInitialState, + IterativeParsingElementState, + IterativeParsingElementDelimiterState, + IterativeParsingArrayFinishState, + + // Single value state + IterativeParsingValueState + }; + + enum { cIterativeParsingStateCount = IterativeParsingValueState + 1 }; + + // Tokens + enum Token { + LeftBracketToken = 0, + RightBracketToken, + + LeftCurlyBracketToken, + RightCurlyBracketToken, + + CommaToken, + ColonToken, + + StringToken, + FalseToken, + TrueToken, + NullToken, + NumberToken, + + kTokenCount + }; + + RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) { + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define N NumberToken +#define N16 N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N + // Maps from ASCII to Token + static const unsigned char tokenMap[256] = { + N16, // 00~0F + N16, // 10~1F + N, N, StringToken, N, N, N, N, N, N, N, N, N, CommaToken, N, N, N, // 20~2F + N, N, N, N, N, N, N, N, N, N, ColonToken, N, N, N, N, N, // 30~3F + N16, // 40~4F + N, N, N, N, N, N, N, N, N, N, N, LeftBracketToken, N, RightBracketToken, N, N, // 50~5F + N, N, N, N, N, N, FalseToken, N, N, N, N, N, N, N, NullToken, N, // 60~6F + N, N, N, N, TrueToken, N, N, N, N, N, N, LeftCurlyBracketToken, N, RightCurlyBracketToken, N, N, // 70~7F + N16, N16, N16, N16, N16, N16, N16, N16 // 80~FF + }; +#undef N +#undef N16 +//!@endcond + + if (sizeof(Ch) == 1 || static_cast(c) < 256) + return static_cast(tokenMap[static_cast(c)]); + else + return NumberToken; + } + + RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) { + // current state x one lookahead token -> new state + static const char G[cIterativeParsingStateCount][kTokenCount] = { + // Start + { + IterativeParsingArrayInitialState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingValueState, // String + IterativeParsingValueState, // False + IterativeParsingValueState, // True + IterativeParsingValueState, // Null + IterativeParsingValueState // Number + }, + // Finish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Error(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // ObjectInitial + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // MemberKey + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingKeyValueDelimiterState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // KeyValueDelimiter + { + IterativeParsingArrayInitialState, // Left bracket(push MemberValue state) + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberValueState, // String + IterativeParsingMemberValueState, // False + IterativeParsingMemberValueState, // True + IterativeParsingMemberValueState, // Null + IterativeParsingMemberValueState // Number + }, + // MemberValue + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingMemberDelimiterState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // MemberDelimiter + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // ObjectFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // ArrayInitial + { + IterativeParsingArrayInitialState, // Left bracket(push Element state) + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push Element state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingElementState, // String + IterativeParsingElementState, // False + IterativeParsingElementState, // True + IterativeParsingElementState, // Null + IterativeParsingElementState // Number + }, + // Element + { + IterativeParsingErrorState, // Left bracket + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingElementDelimiterState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // ElementDelimiter + { + IterativeParsingArrayInitialState, // Left bracket(push Element state) + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push Element state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingElementState, // String + IterativeParsingElementState, // False + IterativeParsingElementState, // True + IterativeParsingElementState, // Null + IterativeParsingElementState // Number + }, + // ArrayFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Single Value (sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + } + }; // End of G + + return static_cast(G[state][token]); + } + + // Make an advance in the token stream and state based on the candidate destination state which was returned by Transit(). + // May return a new state on state pop. + template + RAPIDJSON_FORCEINLINE IterativeParsingState Transit(IterativeParsingState src, Token token, IterativeParsingState dst, InputStream& is, Handler& handler) { + (void)token; + + switch (dst) { + case IterativeParsingErrorState: + return dst; + + case IterativeParsingObjectInitialState: + case IterativeParsingArrayInitialState: + { + // Push the state(Element or MemeberValue) if we are nested in another array or value of member. + // In this way we can get the correct state on ObjectFinish or ArrayFinish by frame pop. + IterativeParsingState n = src; + if (src == IterativeParsingArrayInitialState || src == IterativeParsingElementDelimiterState) + n = IterativeParsingElementState; + else if (src == IterativeParsingKeyValueDelimiterState) + n = IterativeParsingMemberValueState; + // Push current state. + *stack_.template Push(1) = n; + // Initialize and push the member/element count. + *stack_.template Push(1) = 0; + // Call handler + bool hr = (dst == IterativeParsingObjectInitialState) ? handler.StartObject() : handler.StartArray(); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return dst; + } + } + + case IterativeParsingMemberKeyState: + ParseString(is, handler, true); + if (HasParseError()) + return IterativeParsingErrorState; + else + return dst; + + case IterativeParsingKeyValueDelimiterState: + RAPIDJSON_ASSERT(token == ColonToken); + is.Take(); + return dst; + + case IterativeParsingMemberValueState: + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return dst; + + case IterativeParsingElementState: + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return dst; + + case IterativeParsingMemberDelimiterState: + case IterativeParsingElementDelimiterState: + is.Take(); + // Update member/element count. + *stack_.template Top() = *stack_.template Top() + 1; + return dst; + + case IterativeParsingObjectFinishState: + { + // Transit from delimiter is only allowed when trailing commas are enabled + if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingMemberDelimiterState) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorObjectMissName, is.Tell()); + return IterativeParsingErrorState; + } + // Get member count. + SizeType c = *stack_.template Pop(1); + // If the object is not empty, count the last member. + if (src == IterativeParsingMemberValueState) + ++c; + // Restore the state. + IterativeParsingState n = static_cast(*stack_.template Pop(1)); + // Transit to Finish state if this is the topmost scope. + if (n == IterativeParsingStartState) + n = IterativeParsingFinishState; + // Call handler + bool hr = handler.EndObject(c); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return n; + } + } + + case IterativeParsingArrayFinishState: + { + // Transit from delimiter is only allowed when trailing commas are enabled + if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingElementDelimiterState) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorValueInvalid, is.Tell()); + return IterativeParsingErrorState; + } + // Get element count. + SizeType c = *stack_.template Pop(1); + // If the array is not empty, count the last element. + if (src == IterativeParsingElementState) + ++c; + // Restore the state. + IterativeParsingState n = static_cast(*stack_.template Pop(1)); + // Transit to Finish state if this is the topmost scope. + if (n == IterativeParsingStartState) + n = IterativeParsingFinishState; + // Call handler + bool hr = handler.EndArray(c); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return n; + } + } + + default: + // This branch is for IterativeParsingValueState actually. + // Use `default:` rather than + // `case IterativeParsingValueState:` is for code coverage. + + // The IterativeParsingStartState is not enumerated in this switch-case. + // It is impossible for that case. And it can be caught by following assertion. + + // The IterativeParsingFinishState is not enumerated in this switch-case either. + // It is a "derivative" state which cannot triggered from Predict() directly. + // Therefore it cannot happen here. And it can be caught by following assertion. + RAPIDJSON_ASSERT(dst == IterativeParsingValueState); + + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return IterativeParsingFinishState; + } + } + + template + void HandleError(IterativeParsingState src, InputStream& is) { + if (HasParseError()) { + // Error flag has been set. + return; + } + + switch (src) { + case IterativeParsingStartState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentEmpty, is.Tell()); return; + case IterativeParsingFinishState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentRootNotSingular, is.Tell()); return; + case IterativeParsingObjectInitialState: + case IterativeParsingMemberDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); return; + case IterativeParsingMemberKeyState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); return; + case IterativeParsingMemberValueState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); return; + case IterativeParsingKeyValueDelimiterState: + case IterativeParsingArrayInitialState: + case IterativeParsingElementDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); return; + default: RAPIDJSON_ASSERT(src == IterativeParsingElementState); RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); return; + } + } + + template + ParseResult IterativeParse(InputStream& is, Handler& handler) { + parseResult_.Clear(); + ClearStackOnExit scope(*this); + IterativeParsingState state = IterativeParsingStartState; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + while (is.Peek() != '\0') { + Token t = Tokenize(is.Peek()); + IterativeParsingState n = Predict(state, t); + IterativeParsingState d = Transit(state, t, n, is, handler); + + if (d == IterativeParsingErrorState) { + HandleError(state, is); + break; + } + + state = d; + + // Do not further consume streams if a root JSON has been parsed. + if ((parseFlags & kParseStopWhenDoneFlag) && state == IterativeParsingFinishState) + break; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + + // Handle the end of file. + if (state != IterativeParsingFinishState) + HandleError(state, is); + + return parseResult_; + } + + static const size_t kDefaultStackCapacity = 256; //!< Default stack capacity in bytes for storing a single decoded string. + internal::Stack stack_; //!< A stack for storing decoded string temporarily during non-destructive parsing. + ParseResult parseResult_; +}; // class GenericReader + +//! Reader with UTF8 encoding and default allocator. +typedef GenericReader, UTF8<> > Reader; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_READER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/schema.h b/sql-odbc/libraries/rapidjson/include/rapidjson/schema.h new file mode 100644 index 0000000000..87c667e214 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/schema.h @@ -0,0 +1,2015 @@ +// Tencent is pleased to support the open source community by making RapidJSON available-> +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip-> All rights reserved-> +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License-> You may obtain a copy of the License at +// +// http://opensource->org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied-> See the License for the +// specific language governing permissions and limitations under the License-> + +// clang-format off + +#ifndef RAPIDJSON_SCHEMA_H_ +#define RAPIDJSON_SCHEMA_H_ + +#include "document.h" +#include "pointer.h" +#include // abs, floor + +#if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX) +#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 1 +#else +#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0 +#endif + +#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && !defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)) +#define RAPIDJSON_SCHEMA_USE_STDREGEX 1 +#else +#define RAPIDJSON_SCHEMA_USE_STDREGEX 0 +#endif + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX +#include "internal/regex.h" +#elif RAPIDJSON_SCHEMA_USE_STDREGEX +#include +#endif + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX || RAPIDJSON_SCHEMA_USE_STDREGEX +#define RAPIDJSON_SCHEMA_HAS_REGEX 1 +#else +#define RAPIDJSON_SCHEMA_HAS_REGEX 0 +#endif + +#ifndef RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_SCHEMA_VERBOSE 0 +#endif + +#if RAPIDJSON_SCHEMA_VERBOSE +#include "stringbuffer.h" +#endif + +RAPIDJSON_DIAG_PUSH + +#if defined(__GNUC__) +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_OFF(weak-vtables) +RAPIDJSON_DIAG_OFF(exit-time-destructors) +RAPIDJSON_DIAG_OFF(c++98-compat-pedantic) +RAPIDJSON_DIAG_OFF(variadic-macros) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Verbose Utilities + +#if RAPIDJSON_SCHEMA_VERBOSE + +namespace internal { + +inline void PrintInvalidKeyword(const char* keyword) { + printf("Fail keyword: %s\n", keyword); +} + +inline void PrintInvalidKeyword(const wchar_t* keyword) { + wprintf(L"Fail keyword: %ls\n", keyword); +} + +inline void PrintInvalidDocument(const char* document) { + printf("Fail document: %s\n\n", document); +} + +inline void PrintInvalidDocument(const wchar_t* document) { + wprintf(L"Fail document: %ls\n\n", document); +} + +inline void PrintValidatorPointers(unsigned depth, const char* s, const char* d) { + printf("S: %*s%s\nD: %*s%s\n\n", depth * 4, " ", s, depth * 4, " ", d); +} + +inline void PrintValidatorPointers(unsigned depth, const wchar_t* s, const wchar_t* d) { + wprintf(L"S: %*ls%ls\nD: %*ls%ls\n\n", depth * 4, L" ", s, depth * 4, L" ", d); +} + +} // namespace internal + +#endif // RAPIDJSON_SCHEMA_VERBOSE + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_INVALID_KEYWORD_RETURN + +#if RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) internal::PrintInvalidKeyword(keyword) +#else +#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) +#endif + +#define RAPIDJSON_INVALID_KEYWORD_RETURN(keyword)\ +RAPIDJSON_MULTILINEMACRO_BEGIN\ + context.invalidKeyword = keyword.GetString();\ + RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword.GetString());\ + return false;\ +RAPIDJSON_MULTILINEMACRO_END + +/////////////////////////////////////////////////////////////////////////////// +// Forward declarations + +template +class GenericSchemaDocument; + +namespace internal { + +template +class Schema; + +/////////////////////////////////////////////////////////////////////////////// +// ISchemaValidator + +class ISchemaValidator { +public: + virtual ~ISchemaValidator() {} + virtual bool IsValid() const = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// ISchemaStateFactory + +template +class ISchemaStateFactory { +public: + virtual ~ISchemaStateFactory() {} + virtual ISchemaValidator* CreateSchemaValidator(const SchemaType&) = 0; + virtual void DestroySchemaValidator(ISchemaValidator* validator) = 0; + virtual void* CreateHasher() = 0; + virtual uint64_t GetHashCode(void* hasher) = 0; + virtual void DestroryHasher(void* hasher) = 0; + virtual void* MallocState(size_t size) = 0; + virtual void FreeState(void* p) = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Hasher + +// For comparison of compound value +template +class Hasher { +public: + typedef typename Encoding::Ch Ch; + + Hasher(Allocator* allocator = 0, size_t stackCapacity = kDefaultSize) : stack_(allocator, stackCapacity) {} + + bool Null() { return WriteType(kNullType); } + bool Bool(bool b) { return WriteType(b ? kTrueType : kFalseType); } + bool Int(int i) { Number n; n.u.i = i; n.d = static_cast(i); return WriteNumber(n); } + bool Uint(unsigned u) { Number n; n.u.u = u; n.d = static_cast(u); return WriteNumber(n); } + bool Int64(int64_t i) { Number n; n.u.i = i; n.d = static_cast(i); return WriteNumber(n); } + bool Uint64(uint64_t u) { Number n; n.u.u = u; n.d = static_cast(u); return WriteNumber(n); } + bool Double(double d) { + Number n; + if (d < 0) n.u.i = static_cast(d); + else n.u.u = static_cast(d); + n.d = d; + return WriteNumber(n); + } + + bool RawNumber(const Ch* str, SizeType len, bool) { + WriteBuffer(kNumberType, str, len * sizeof(Ch)); + return true; + } + + bool String(const Ch* str, SizeType len, bool) { + WriteBuffer(kStringType, str, len * sizeof(Ch)); + return true; + } + + bool StartObject() { return true; } + bool Key(const Ch* str, SizeType len, bool copy) { return String(str, len, copy); } + bool EndObject(SizeType memberCount) { + uint64_t h = Hash(0, kObjectType); + uint64_t* kv = stack_.template Pop(memberCount * 2); + for (SizeType i = 0; i < memberCount; i++) + h ^= Hash(kv[i * 2], kv[i * 2 + 1]); // Use xor to achieve member order insensitive + *stack_.template Push() = h; + return true; + } + + bool StartArray() { return true; } + bool EndArray(SizeType elementCount) { + uint64_t h = Hash(0, kArrayType); + uint64_t* e = stack_.template Pop(elementCount); + for (SizeType i = 0; i < elementCount; i++) + h = Hash(h, e[i]); // Use hash to achieve element order sensitive + *stack_.template Push() = h; + return true; + } + + bool IsValid() const { return stack_.GetSize() == sizeof(uint64_t); } + + uint64_t GetHashCode() const { + RAPIDJSON_ASSERT(IsValid()); + return *stack_.template Top(); + } + +private: + static const size_t kDefaultSize = 256; + struct Number { + union U { + uint64_t u; + int64_t i; + }u; + double d; + }; + + bool WriteType(Type type) { return WriteBuffer(type, 0, 0); } + + bool WriteNumber(const Number& n) { return WriteBuffer(kNumberType, &n, sizeof(n)); } + + bool WriteBuffer(Type type, const void* data, size_t len) { + // FNV-1a from http://isthe.com/chongo/tech/comp/fnv/ + uint64_t h = Hash(RAPIDJSON_UINT64_C2(0x84222325, 0xcbf29ce4), type); + const unsigned char* d = static_cast(data); + for (size_t i = 0; i < len; i++) + h = Hash(h, d[i]); + *stack_.template Push() = h; + return true; + } + + static uint64_t Hash(uint64_t h, uint64_t d) { + static const uint64_t kPrime = RAPIDJSON_UINT64_C2(0x00000100, 0x000001b3); + h ^= d; + h *= kPrime; + return h; + } + + Stack stack_; +}; + +/////////////////////////////////////////////////////////////////////////////// +// SchemaValidationContext + +template +struct SchemaValidationContext { + typedef Schema SchemaType; + typedef ISchemaStateFactory SchemaValidatorFactoryType; + typedef typename SchemaType::ValueType ValueType; + typedef typename ValueType::Ch Ch; + + enum PatternValidatorType { + kPatternValidatorOnly, + kPatternValidatorWithProperty, + kPatternValidatorWithAdditionalProperty + }; + + SchemaValidationContext(SchemaValidatorFactoryType& f, const SchemaType* s) : + factory(f), + schema(s), + valueSchema(), + invalidKeyword(), + hasher(), + arrayElementHashCodes(), + validators(), + validatorCount(), + patternPropertiesValidators(), + patternPropertiesValidatorCount(), + patternPropertiesSchemas(), + patternPropertiesSchemaCount(), + valuePatternValidatorType(kPatternValidatorOnly), + propertyExist(), + inArray(false), + valueUniqueness(false), + arrayUniqueness(false) + { + } + + ~SchemaValidationContext() { + if (hasher) + factory.DestroryHasher(hasher); + if (validators) { + for (SizeType i = 0; i < validatorCount; i++) + factory.DestroySchemaValidator(validators[i]); + factory.FreeState(validators); + } + if (patternPropertiesValidators) { + for (SizeType i = 0; i < patternPropertiesValidatorCount; i++) + factory.DestroySchemaValidator(patternPropertiesValidators[i]); + factory.FreeState(patternPropertiesValidators); + } + if (patternPropertiesSchemas) + factory.FreeState(patternPropertiesSchemas); + if (propertyExist) + factory.FreeState(propertyExist); + } + + SchemaValidatorFactoryType& factory; + const SchemaType* schema; + const SchemaType* valueSchema; + const Ch* invalidKeyword; + void* hasher; // Only validator access + void* arrayElementHashCodes; // Only validator access this + ISchemaValidator** validators; + SizeType validatorCount; + ISchemaValidator** patternPropertiesValidators; + SizeType patternPropertiesValidatorCount; + const SchemaType** patternPropertiesSchemas; + SizeType patternPropertiesSchemaCount; + PatternValidatorType valuePatternValidatorType; + PatternValidatorType objectPatternValidatorType; + SizeType arrayElementIndex; + bool* propertyExist; + bool inArray; + bool valueUniqueness; + bool arrayUniqueness; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Schema + +template +class Schema { +public: + typedef typename SchemaDocumentType::ValueType ValueType; + typedef typename SchemaDocumentType::AllocatorType AllocatorType; + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + typedef SchemaValidationContext Context; + typedef Schema SchemaType; + typedef GenericValue SValue; + friend class GenericSchemaDocument; + + Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) : + allocator_(allocator), + enum_(), + enumCount_(), + not_(), + type_((1 << kTotalSchemaType) - 1), // typeless + validatorCount_(), + properties_(), + additionalPropertiesSchema_(), + patternProperties_(), + patternPropertyCount_(), + propertyCount_(), + minProperties_(), + maxProperties_(SizeType(~0)), + additionalProperties_(true), + hasDependencies_(), + hasRequired_(), + hasSchemaDependencies_(), + additionalItemsSchema_(), + itemsList_(), + itemsTuple_(), + itemsTupleCount_(), + minItems_(), + maxItems_(SizeType(~0)), + additionalItems_(true), + uniqueItems_(false), + pattern_(), + minLength_(0), + maxLength_(~SizeType(0)), + exclusiveMinimum_(false), + exclusiveMaximum_(false) + { + typedef typename SchemaDocumentType::ValueType ValueType; + typedef typename ValueType::ConstValueIterator ConstValueIterator; + typedef typename ValueType::ConstMemberIterator ConstMemberIterator; + + if (!value.IsObject()) + return; + + if (const ValueType* v = GetMember(value, GetTypeString())) { + type_ = 0; + if (v->IsString()) + AddType(*v); + else if (v->IsArray()) + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) + AddType(*itr); + } + + if (const ValueType* v = GetMember(value, GetEnumString())) + if (v->IsArray() && v->Size() > 0) { + enum_ = static_cast(allocator_->Malloc(sizeof(uint64_t) * v->Size())); + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) { + typedef Hasher > EnumHasherType; + char buffer[256 + 24]; + MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer)); + EnumHasherType h(&hasherAllocator, 256); + itr->Accept(h); + enum_[enumCount_++] = h.GetHashCode(); + } + } + + if (schemaDocument) { + AssignIfExist(allOf_, *schemaDocument, p, value, GetAllOfString(), document); + AssignIfExist(anyOf_, *schemaDocument, p, value, GetAnyOfString(), document); + AssignIfExist(oneOf_, *schemaDocument, p, value, GetOneOfString(), document); + } + + if (const ValueType* v = GetMember(value, GetNotString())) { + schemaDocument->CreateSchema(¬_, p.Append(GetNotString(), allocator_), *v, document); + notValidatorIndex_ = validatorCount_; + validatorCount_++; + } + + // Object + + const ValueType* properties = GetMember(value, GetPropertiesString()); + const ValueType* required = GetMember(value, GetRequiredString()); + const ValueType* dependencies = GetMember(value, GetDependenciesString()); + { + // Gather properties from properties/required/dependencies + SValue allProperties(kArrayType); + + if (properties && properties->IsObject()) + for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) + AddUniqueElement(allProperties, itr->name); + + if (required && required->IsArray()) + for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) + if (itr->IsString()) + AddUniqueElement(allProperties, *itr); + + if (dependencies && dependencies->IsObject()) + for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { + AddUniqueElement(allProperties, itr->name); + if (itr->value.IsArray()) + for (ConstValueIterator i = itr->value.Begin(); i != itr->value.End(); ++i) + if (i->IsString()) + AddUniqueElement(allProperties, *i); + } + + if (allProperties.Size() > 0) { + propertyCount_ = allProperties.Size(); + properties_ = static_cast(allocator_->Malloc(sizeof(Property) * propertyCount_)); + for (SizeType i = 0; i < propertyCount_; i++) { + new (&properties_[i]) Property(); + properties_[i].name = allProperties[i]; + properties_[i].schema = GetTypeless(); + } + } + } + + if (properties && properties->IsObject()) { + PointerType q = p.Append(GetPropertiesString(), allocator_); + for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) { + SizeType index; + if (FindPropertyIndex(itr->name, &index)) + schemaDocument->CreateSchema(&properties_[index].schema, q.Append(itr->name, allocator_), itr->value, document); + } + } + + if (const ValueType* v = GetMember(value, GetPatternPropertiesString())) { + PointerType q = p.Append(GetPatternPropertiesString(), allocator_); + patternProperties_ = static_cast(allocator_->Malloc(sizeof(PatternProperty) * v->MemberCount())); + patternPropertyCount_ = 0; + + for (ConstMemberIterator itr = v->MemberBegin(); itr != v->MemberEnd(); ++itr) { + new (&patternProperties_[patternPropertyCount_]) PatternProperty(); + patternProperties_[patternPropertyCount_].pattern = CreatePattern(itr->name); + schemaDocument->CreateSchema(&patternProperties_[patternPropertyCount_].schema, q.Append(itr->name, allocator_), itr->value, document); + patternPropertyCount_++; + } + } + + if (required && required->IsArray()) + for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) + if (itr->IsString()) { + SizeType index; + if (FindPropertyIndex(*itr, &index)) { + properties_[index].required = true; + hasRequired_ = true; + } + } + + if (dependencies && dependencies->IsObject()) { + PointerType q = p.Append(GetDependenciesString(), allocator_); + hasDependencies_ = true; + for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { + SizeType sourceIndex; + if (FindPropertyIndex(itr->name, &sourceIndex)) { + if (itr->value.IsArray()) { + properties_[sourceIndex].dependencies = static_cast(allocator_->Malloc(sizeof(bool) * propertyCount_)); + std::memset(properties_[sourceIndex].dependencies, 0, sizeof(bool)* propertyCount_); + for (ConstValueIterator targetItr = itr->value.Begin(); targetItr != itr->value.End(); ++targetItr) { + SizeType targetIndex; + if (FindPropertyIndex(*targetItr, &targetIndex)) + properties_[sourceIndex].dependencies[targetIndex] = true; + } + } + else if (itr->value.IsObject()) { + hasSchemaDependencies_ = true; + schemaDocument->CreateSchema(&properties_[sourceIndex].dependenciesSchema, q.Append(itr->name, allocator_), itr->value, document); + properties_[sourceIndex].dependenciesValidatorIndex = validatorCount_; + validatorCount_++; + } + } + } + } + + if (const ValueType* v = GetMember(value, GetAdditionalPropertiesString())) { + if (v->IsBool()) + additionalProperties_ = v->GetBool(); + else if (v->IsObject()) + schemaDocument->CreateSchema(&additionalPropertiesSchema_, p.Append(GetAdditionalPropertiesString(), allocator_), *v, document); + } + + AssignIfExist(minProperties_, value, GetMinPropertiesString()); + AssignIfExist(maxProperties_, value, GetMaxPropertiesString()); + + // Array + if (const ValueType* v = GetMember(value, GetItemsString())) { + PointerType q = p.Append(GetItemsString(), allocator_); + if (v->IsObject()) // List validation + schemaDocument->CreateSchema(&itemsList_, q, *v, document); + else if (v->IsArray()) { // Tuple validation + itemsTuple_ = static_cast(allocator_->Malloc(sizeof(const Schema*) * v->Size())); + SizeType index = 0; + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr, index++) + schemaDocument->CreateSchema(&itemsTuple_[itemsTupleCount_++], q.Append(index, allocator_), *itr, document); + } + } + + AssignIfExist(minItems_, value, GetMinItemsString()); + AssignIfExist(maxItems_, value, GetMaxItemsString()); + + if (const ValueType* v = GetMember(value, GetAdditionalItemsString())) { + if (v->IsBool()) + additionalItems_ = v->GetBool(); + else if (v->IsObject()) + schemaDocument->CreateSchema(&additionalItemsSchema_, p.Append(GetAdditionalItemsString(), allocator_), *v, document); + } + + AssignIfExist(uniqueItems_, value, GetUniqueItemsString()); + + // String + AssignIfExist(minLength_, value, GetMinLengthString()); + AssignIfExist(maxLength_, value, GetMaxLengthString()); + + if (const ValueType* v = GetMember(value, GetPatternString())) + pattern_ = CreatePattern(*v); + + // Number + if (const ValueType* v = GetMember(value, GetMinimumString())) + if (v->IsNumber()) + minimum_.CopyFrom(*v, *allocator_); + + if (const ValueType* v = GetMember(value, GetMaximumString())) + if (v->IsNumber()) + maximum_.CopyFrom(*v, *allocator_); + + AssignIfExist(exclusiveMinimum_, value, GetExclusiveMinimumString()); + AssignIfExist(exclusiveMaximum_, value, GetExclusiveMaximumString()); + + if (const ValueType* v = GetMember(value, GetMultipleOfString())) + if (v->IsNumber() && v->GetDouble() > 0.0) + multipleOf_.CopyFrom(*v, *allocator_); + } + + ~Schema() { + if (allocator_) { + allocator_->Free(enum_); + } + if (properties_) { + for (SizeType i = 0; i < propertyCount_; i++) + properties_[i].~Property(); + AllocatorType::Free(properties_); + } + if (patternProperties_) { + for (SizeType i = 0; i < patternPropertyCount_; i++) + patternProperties_[i].~PatternProperty(); + AllocatorType::Free(patternProperties_); + } + AllocatorType::Free(itemsTuple_); +#if RAPIDJSON_SCHEMA_HAS_REGEX + if (pattern_) { + pattern_->~RegexType(); + allocator_->Free(pattern_); + } +#endif + } + + bool BeginValue(Context& context) const { + if (context.inArray) { + if (uniqueItems_) + context.valueUniqueness = true; + + if (itemsList_) + context.valueSchema = itemsList_; + else if (itemsTuple_) { + if (context.arrayElementIndex < itemsTupleCount_) + context.valueSchema = itemsTuple_[context.arrayElementIndex]; + else if (additionalItemsSchema_) + context.valueSchema = additionalItemsSchema_; + else if (additionalItems_) + context.valueSchema = GetTypeless(); + else + RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString()); + } + else + context.valueSchema = GetTypeless(); + + context.arrayElementIndex++; + } + return true; + } + + RAPIDJSON_FORCEINLINE bool EndValue(Context& context) const { + if (context.patternPropertiesValidatorCount > 0) { + bool otherValid = false; + SizeType count = context.patternPropertiesValidatorCount; + if (context.objectPatternValidatorType != Context::kPatternValidatorOnly) + otherValid = context.patternPropertiesValidators[--count]->IsValid(); + + bool patternValid = true; + for (SizeType i = 0; i < count; i++) + if (!context.patternPropertiesValidators[i]->IsValid()) { + patternValid = false; + break; + } + + if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) { + if (!patternValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) { + if (!patternValid || !otherValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + else if (!patternValid && !otherValid) // kPatternValidatorWithAdditionalProperty) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + + if (enum_) { + const uint64_t h = context.factory.GetHashCode(context.hasher); + for (SizeType i = 0; i < enumCount_; i++) + if (enum_[i] == h) + goto foundEnum; + RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString()); + foundEnum:; + } + + if (allOf_.schemas) + for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++) + if (!context.validators[i]->IsValid()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString()); + + if (anyOf_.schemas) { + for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++) + if (context.validators[i]->IsValid()) + goto foundAny; + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString()); + foundAny:; + } + + if (oneOf_.schemas) { + bool oneValid = false; + for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++) + if (context.validators[i]->IsValid()) { + if (oneValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + else + oneValid = true; + } + if (!oneValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + } + + if (not_ && context.validators[notValidatorIndex_]->IsValid()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString()); + + return true; + } + + bool Null(Context& context) const { + if (!(type_ & (1 << kNullSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + return CreateParallelValidator(context); + } + + bool Bool(Context& context, bool) const { + if (!(type_ & (1 << kBooleanSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + return CreateParallelValidator(context); + } + + bool Int(Context& context, int i) const { + if (!CheckInt(context, i)) + return false; + return CreateParallelValidator(context); + } + + bool Uint(Context& context, unsigned u) const { + if (!CheckUint(context, u)) + return false; + return CreateParallelValidator(context); + } + + bool Int64(Context& context, int64_t i) const { + if (!CheckInt(context, i)) + return false; + return CreateParallelValidator(context); + } + + bool Uint64(Context& context, uint64_t u) const { + if (!CheckUint(context, u)) + return false; + return CreateParallelValidator(context); + } + + bool Double(Context& context, double d) const { + if (!(type_ & (1 << kNumberSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d)) + return false; + + if (!maximum_.IsNull() && !CheckDoubleMaximum(context, d)) + return false; + + if (!multipleOf_.IsNull() && !CheckDoubleMultipleOf(context, d)) + return false; + + return CreateParallelValidator(context); + } + + bool String(Context& context, const Ch* str, SizeType length, bool) const { + if (!(type_ & (1 << kStringSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (minLength_ != 0 || maxLength_ != SizeType(~0)) { + SizeType count; + if (internal::CountStringCodePoint(str, length, &count)) { + if (count < minLength_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString()); + if (count > maxLength_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString()); + } + } + + if (pattern_ && !IsPatternMatch(pattern_, str, length)) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString()); + + return CreateParallelValidator(context); + } + + bool StartObject(Context& context) const { + if (!(type_ & (1 << kObjectSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (hasDependencies_ || hasRequired_) { + context.propertyExist = static_cast(context.factory.MallocState(sizeof(bool) * propertyCount_)); + std::memset(context.propertyExist, 0, sizeof(bool) * propertyCount_); + } + + if (patternProperties_) { // pre-allocate schema array + SizeType count = patternPropertyCount_ + 1; // extra for valuePatternValidatorType + context.patternPropertiesSchemas = static_cast(context.factory.MallocState(sizeof(const SchemaType*) * count)); + context.patternPropertiesSchemaCount = 0; + std::memset(context.patternPropertiesSchemas, 0, sizeof(SchemaType*) * count); + } + + return CreateParallelValidator(context); + } + + bool Key(Context& context, const Ch* str, SizeType len, bool) const { + if (patternProperties_) { + context.patternPropertiesSchemaCount = 0; + for (SizeType i = 0; i < patternPropertyCount_; i++) + if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema; + } + + SizeType index; + if (FindPropertyIndex(ValueType(str, len).Move(), &index)) { + if (context.patternPropertiesSchemaCount > 0) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema; + context.valueSchema = GetTypeless(); + context.valuePatternValidatorType = Context::kPatternValidatorWithProperty; + } + else + context.valueSchema = properties_[index].schema; + + if (context.propertyExist) + context.propertyExist[index] = true; + + return true; + } + + if (additionalPropertiesSchema_) { + if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_; + context.valueSchema = GetTypeless(); + context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty; + } + else + context.valueSchema = additionalPropertiesSchema_; + return true; + } + else if (additionalProperties_) { + context.valueSchema = GetTypeless(); + return true; + } + + if (context.patternPropertiesSchemaCount == 0) // patternProperties are not additional properties + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString()); + + return true; + } + + bool EndObject(Context& context, SizeType memberCount) const { + if (hasRequired_) + for (SizeType index = 0; index < propertyCount_; index++) + if (properties_[index].required) + if (!context.propertyExist[index]) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString()); + + if (memberCount < minProperties_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString()); + + if (memberCount > maxProperties_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString()); + + if (hasDependencies_) { + for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) + if (context.propertyExist[sourceIndex]) { + if (properties_[sourceIndex].dependencies) { + for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++) + if (properties_[sourceIndex].dependencies[targetIndex] && !context.propertyExist[targetIndex]) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); + } + else if (properties_[sourceIndex].dependenciesSchema) + if (!context.validators[properties_[sourceIndex].dependenciesValidatorIndex]->IsValid()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); + } + } + + return true; + } + + bool StartArray(Context& context) const { + if (!(type_ & (1 << kArraySchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + context.arrayElementIndex = 0; + context.inArray = true; + + return CreateParallelValidator(context); + } + + bool EndArray(Context& context, SizeType elementCount) const { + context.inArray = false; + + if (elementCount < minItems_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString()); + + if (elementCount > maxItems_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString()); + + return true; + } + + // Generate functions for string literal according to Ch +#define RAPIDJSON_STRING_(name, ...) \ + static const ValueType& Get##name##String() {\ + static const Ch s[] = { __VA_ARGS__, '\0' };\ + static const ValueType v(s, sizeof(s) / sizeof(Ch) - 1);\ + return v;\ + } + + RAPIDJSON_STRING_(Null, 'n', 'u', 'l', 'l') + RAPIDJSON_STRING_(Boolean, 'b', 'o', 'o', 'l', 'e', 'a', 'n') + RAPIDJSON_STRING_(Object, 'o', 'b', 'j', 'e', 'c', 't') + RAPIDJSON_STRING_(Array, 'a', 'r', 'r', 'a', 'y') + RAPIDJSON_STRING_(String, 's', 't', 'r', 'i', 'n', 'g') + RAPIDJSON_STRING_(Number, 'n', 'u', 'm', 'b', 'e', 'r') + RAPIDJSON_STRING_(Integer, 'i', 'n', 't', 'e', 'g', 'e', 'r') + RAPIDJSON_STRING_(Type, 't', 'y', 'p', 'e') + RAPIDJSON_STRING_(Enum, 'e', 'n', 'u', 'm') + RAPIDJSON_STRING_(AllOf, 'a', 'l', 'l', 'O', 'f') + RAPIDJSON_STRING_(AnyOf, 'a', 'n', 'y', 'O', 'f') + RAPIDJSON_STRING_(OneOf, 'o', 'n', 'e', 'O', 'f') + RAPIDJSON_STRING_(Not, 'n', 'o', 't') + RAPIDJSON_STRING_(Properties, 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(Required, 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd') + RAPIDJSON_STRING_(Dependencies, 'd', 'e', 'p', 'e', 'n', 'd', 'e', 'n', 'c', 'i', 'e', 's') + RAPIDJSON_STRING_(PatternProperties, 'p', 'a', 't', 't', 'e', 'r', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(AdditionalProperties, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(MinProperties, 'm', 'i', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(MaxProperties, 'm', 'a', 'x', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(Items, 'i', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MinItems, 'm', 'i', 'n', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MaxItems, 'm', 'a', 'x', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(AdditionalItems, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(UniqueItems, 'u', 'n', 'i', 'q', 'u', 'e', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MinLength, 'm', 'i', 'n', 'L', 'e', 'n', 'g', 't', 'h') + RAPIDJSON_STRING_(MaxLength, 'm', 'a', 'x', 'L', 'e', 'n', 'g', 't', 'h') + RAPIDJSON_STRING_(Pattern, 'p', 'a', 't', 't', 'e', 'r', 'n') + RAPIDJSON_STRING_(Minimum, 'm', 'i', 'n', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(Maximum, 'm', 'a', 'x', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f') + +#undef RAPIDJSON_STRING_ + +private: + enum SchemaValueType { + kNullSchemaType, + kBooleanSchemaType, + kObjectSchemaType, + kArraySchemaType, + kStringSchemaType, + kNumberSchemaType, + kIntegerSchemaType, + kTotalSchemaType + }; + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX + typedef internal::GenericRegex RegexType; +#elif RAPIDJSON_SCHEMA_USE_STDREGEX + typedef std::basic_regex RegexType; +#else + typedef char RegexType; +#endif + + struct SchemaArray { + SchemaArray() : schemas(), count() {} + ~SchemaArray() { AllocatorType::Free(schemas); } + const SchemaType** schemas; + SizeType begin; // begin index of context.validators + SizeType count; + }; + + static const SchemaType* GetTypeless() { + static SchemaType typeless(0, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), 0); + return &typeless; + } + + template + void AddUniqueElement(V1& a, const V2& v) { + for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr) + if (*itr == v) + return; + V1 c(v, *allocator_); + a.PushBack(c, *allocator_); + } + + static const ValueType* GetMember(const ValueType& value, const ValueType& name) { + typename ValueType::ConstMemberIterator itr = value.FindMember(name); + return itr != value.MemberEnd() ? &(itr->value) : 0; + } + + static void AssignIfExist(bool& out, const ValueType& value, const ValueType& name) { + if (const ValueType* v = GetMember(value, name)) + if (v->IsBool()) + out = v->GetBool(); + } + + static void AssignIfExist(SizeType& out, const ValueType& value, const ValueType& name) { + if (const ValueType* v = GetMember(value, name)) + if (v->IsUint64() && v->GetUint64() <= SizeType(~0)) + out = static_cast(v->GetUint64()); + } + + void AssignIfExist(SchemaArray& out, SchemaDocumentType& schemaDocument, const PointerType& p, const ValueType& value, const ValueType& name, const ValueType& document) { + if (const ValueType* v = GetMember(value, name)) { + if (v->IsArray() && v->Size() > 0) { + PointerType q = p.Append(name, allocator_); + out.count = v->Size(); + out.schemas = static_cast(allocator_->Malloc(out.count * sizeof(const Schema*))); + memset(out.schemas, 0, sizeof(Schema*)* out.count); + for (SizeType i = 0; i < out.count; i++) + schemaDocument.CreateSchema(&out.schemas[i], q.Append(i, allocator_), (*v)[i], document); + out.begin = validatorCount_; + validatorCount_ += out.count; + } + } + } + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX + template + RegexType* CreatePattern(const ValueType& value) { + if (value.IsString()) { + RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString()); + if (!r->IsValid()) { + r->~RegexType(); + AllocatorType::Free(r); + r = 0; + } + return r; + } + return 0; + } + + static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) { + return pattern->Search(str); + } +#elif RAPIDJSON_SCHEMA_USE_STDREGEX + template + RegexType* CreatePattern(const ValueType& value) { + if (value.IsString()) + try { + return new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript); + } + catch (const std::regex_error&) { + } + return 0; + } + + static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType length) { + std::match_results r; + return std::regex_search(str, str + length, r, *pattern); + } +#else + template + RegexType* CreatePattern(const ValueType&) { return 0; } + + static bool IsPatternMatch(const RegexType*, const Ch *, SizeType) { return true; } +#endif // RAPIDJSON_SCHEMA_USE_STDREGEX + + void AddType(const ValueType& type) { + if (type == GetNullString() ) type_ |= 1 << kNullSchemaType; + else if (type == GetBooleanString()) type_ |= 1 << kBooleanSchemaType; + else if (type == GetObjectString() ) type_ |= 1 << kObjectSchemaType; + else if (type == GetArrayString() ) type_ |= 1 << kArraySchemaType; + else if (type == GetStringString() ) type_ |= 1 << kStringSchemaType; + else if (type == GetIntegerString()) type_ |= 1 << kIntegerSchemaType; + else if (type == GetNumberString() ) type_ |= (1 << kNumberSchemaType) | (1 << kIntegerSchemaType); + } + + bool CreateParallelValidator(Context& context) const { + if (enum_ || context.arrayUniqueness) + context.hasher = context.factory.CreateHasher(); + + if (validatorCount_) { + RAPIDJSON_ASSERT(context.validators == 0); + context.validators = static_cast(context.factory.MallocState(sizeof(ISchemaValidator*) * validatorCount_)); + context.validatorCount = validatorCount_; + + if (allOf_.schemas) + CreateSchemaValidators(context, allOf_); + + if (anyOf_.schemas) + CreateSchemaValidators(context, anyOf_); + + if (oneOf_.schemas) + CreateSchemaValidators(context, oneOf_); + + if (not_) + context.validators[notValidatorIndex_] = context.factory.CreateSchemaValidator(*not_); + + if (hasSchemaDependencies_) { + for (SizeType i = 0; i < propertyCount_; i++) + if (properties_[i].dependenciesSchema) + context.validators[properties_[i].dependenciesValidatorIndex] = context.factory.CreateSchemaValidator(*properties_[i].dependenciesSchema); + } + } + + return true; + } + + void CreateSchemaValidators(Context& context, const SchemaArray& schemas) const { + for (SizeType i = 0; i < schemas.count; i++) + context.validators[schemas.begin + i] = context.factory.CreateSchemaValidator(*schemas.schemas[i]); + } + + // O(n) + bool FindPropertyIndex(const ValueType& name, SizeType* outIndex) const { + SizeType len = name.GetStringLength(); + const Ch* str = name.GetString(); + for (SizeType index = 0; index < propertyCount_; index++) + if (properties_[index].name.GetStringLength() == len && + (std::memcmp(properties_[index].name.GetString(), str, sizeof(Ch) * len) == 0)) + { + *outIndex = index; + return true; + } + return false; + } + + bool CheckInt(Context& context, int64_t i) const { + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (!minimum_.IsNull()) { + if (minimum_.IsInt64()) { + if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + else if (minimum_.IsUint64()) { + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64() + } + else if (!CheckDoubleMinimum(context, static_cast(i))) + return false; + } + + if (!maximum_.IsNull()) { + if (maximum_.IsInt64()) { + if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + else if (maximum_.IsUint64()) + /* do nothing */; // i <= max(int64_t) < maximum_.GetUint64() + else if (!CheckDoubleMaximum(context, static_cast(i))) + return false; + } + + if (!multipleOf_.IsNull()) { + if (multipleOf_.IsUint64()) { + if (static_cast(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + else if (!CheckDoubleMultipleOf(context, static_cast(i))) + return false; + } + + return true; + } + + bool CheckUint(Context& context, uint64_t i) const { + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (!minimum_.IsNull()) { + if (minimum_.IsUint64()) { + if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + else if (minimum_.IsInt64()) + /* do nothing */; // i >= 0 > minimum.Getint64() + else if (!CheckDoubleMinimum(context, static_cast(i))) + return false; + } + + if (!maximum_.IsNull()) { + if (maximum_.IsUint64()) { + if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + else if (maximum_.IsInt64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_ + else if (!CheckDoubleMaximum(context, static_cast(i))) + return false; + } + + if (!multipleOf_.IsNull()) { + if (multipleOf_.IsUint64()) { + if (i % multipleOf_.GetUint64() != 0) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + else if (!CheckDoubleMultipleOf(context, static_cast(i))) + return false; + } + + return true; + } + + bool CheckDoubleMinimum(Context& context, double d) const { + if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + return true; + } + + bool CheckDoubleMaximum(Context& context, double d) const { + if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + return true; + } + + bool CheckDoubleMultipleOf(Context& context, double d) const { + double a = std::abs(d), b = std::abs(multipleOf_.GetDouble()); + double q = std::floor(a / b); + double r = a - q * b; + if (r > 0.0) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + return true; + } + + struct Property { + Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {} + ~Property() { AllocatorType::Free(dependencies); } + SValue name; + const SchemaType* schema; + const SchemaType* dependenciesSchema; + SizeType dependenciesValidatorIndex; + bool* dependencies; + bool required; + }; + + struct PatternProperty { + PatternProperty() : schema(), pattern() {} + ~PatternProperty() { + if (pattern) { + pattern->~RegexType(); + AllocatorType::Free(pattern); + } + } + const SchemaType* schema; + RegexType* pattern; + }; + + AllocatorType* allocator_; + uint64_t* enum_; + SizeType enumCount_; + SchemaArray allOf_; + SchemaArray anyOf_; + SchemaArray oneOf_; + const SchemaType* not_; + unsigned type_; // bitmask of kSchemaType + SizeType validatorCount_; + SizeType notValidatorIndex_; + + Property* properties_; + const SchemaType* additionalPropertiesSchema_; + PatternProperty* patternProperties_; + SizeType patternPropertyCount_; + SizeType propertyCount_; + SizeType minProperties_; + SizeType maxProperties_; + bool additionalProperties_; + bool hasDependencies_; + bool hasRequired_; + bool hasSchemaDependencies_; + + const SchemaType* additionalItemsSchema_; + const SchemaType* itemsList_; + const SchemaType** itemsTuple_; + SizeType itemsTupleCount_; + SizeType minItems_; + SizeType maxItems_; + bool additionalItems_; + bool uniqueItems_; + + RegexType* pattern_; + SizeType minLength_; + SizeType maxLength_; + + SValue minimum_; + SValue maximum_; + SValue multipleOf_; + bool exclusiveMinimum_; + bool exclusiveMaximum_; +}; + +template +struct TokenHelper { + RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { + *documentStack.template Push() = '/'; + char buffer[21]; + size_t length = static_cast((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer); + for (size_t i = 0; i < length; i++) + *documentStack.template Push() = buffer[i]; + } +}; + +// Partial specialized version for char to prevent buffer copying. +template +struct TokenHelper { + RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { + #ifdef WIN32 + #pragma warning(push) + #pragma warning(disable: 4127) // Conditional Expression is Constant + #endif // WIN32 + if (sizeof(SizeType) == 4) { + #ifdef WIN32 + #pragma warning(pop) + #endif // WIN32 + char *buffer = documentStack.template Push(1 + 10); // '/' + uint + *buffer++ = '/'; + const char* end = internal::u32toa(index, buffer); + documentStack.template Pop(static_cast(10 - (end - buffer))); + } + else { + char *buffer = documentStack.template Push(1 + 20); // '/' + uint64 + *buffer++ = '/'; + const char* end = internal::u64toa(index, buffer); + documentStack.template Pop(static_cast(20 - (end - buffer))); + } + } +}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// IGenericRemoteSchemaDocumentProvider + +template +class IGenericRemoteSchemaDocumentProvider { +public: + typedef typename SchemaDocumentType::Ch Ch; + + virtual ~IGenericRemoteSchemaDocumentProvider() {} + virtual const SchemaDocumentType* GetRemoteDocument(const Ch* uri, SizeType length) = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericSchemaDocument + +//! JSON schema document. +/*! + A JSON schema document is a compiled version of a JSON schema. + It is basically a tree of internal::Schema. + + \note This is an immutable class (i.e. its instance cannot be modified after construction). + \tparam ValueT Type of JSON value (e.g. \c Value ), which also determine the encoding. + \tparam Allocator Allocator type for allocating memory of this document. +*/ +template +class GenericSchemaDocument { +public: + typedef ValueT ValueType; + typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProviderType; + typedef Allocator AllocatorType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + typedef internal::Schema SchemaType; + typedef GenericPointer PointerType; + friend class internal::Schema; + template + friend class GenericSchemaValidator; + + //! Constructor. + /*! + Compile a JSON document into schema document. + + \param document A JSON document as source. + \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null. + \param allocator An optional allocator instance for allocating memory. Can be null. + */ + explicit GenericSchemaDocument(const ValueType& document, IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) : + remoteProvider_(remoteProvider), + allocator_(allocator), + ownAllocator_(), + root_(), + schemaMap_(allocator, kInitialSchemaMapSize), + schemaRef_(allocator, kInitialSchemaRefSize) + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + + // Generate root schema, it will call CreateSchema() to create sub-schemas, + // And call AddRefSchema() if there are $ref. + CreateSchemaRecursive(&root_, PointerType(), document, document); + + // Resolve $ref + while (!schemaRef_.Empty()) { + SchemaRefEntry* refEntry = schemaRef_.template Pop(1); + if (const SchemaType* s = GetSchema(refEntry->target)) { + if (refEntry->schema) + *refEntry->schema = s; + + // Create entry in map if not exist + if (!GetSchema(refEntry->source)) { + new (schemaMap_.template Push()) SchemaEntry(refEntry->source, const_cast(s), false, allocator_); + } + } + refEntry->~SchemaRefEntry(); + } + + RAPIDJSON_ASSERT(root_ != 0); + + schemaRef_.ShrinkToFit(); // Deallocate all memory for ref + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericSchemaDocument(GenericSchemaDocument&& rhs) RAPIDJSON_NOEXCEPT : + remoteProvider_(rhs.remoteProvider_), + allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + root_(rhs.root_), + schemaMap_(std::move(rhs.schemaMap_)), + schemaRef_(std::move(rhs.schemaRef_)) + { + rhs.remoteProvider_ = 0; + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + } +#endif + + //! Destructor + ~GenericSchemaDocument() { + while (!schemaMap_.Empty()) + schemaMap_.template Pop(1)->~SchemaEntry(); + + RAPIDJSON_DELETE(ownAllocator_); + } + + //! Get the root schema. + const SchemaType& GetRoot() const { return *root_; } + +private: + //! Prohibit copying + GenericSchemaDocument(const GenericSchemaDocument&); + //! Prohibit assignment + GenericSchemaDocument& operator=(const GenericSchemaDocument&); + + struct SchemaRefEntry { + SchemaRefEntry(const PointerType& s, const PointerType& t, const SchemaType** outSchema, Allocator *allocator) : source(s, allocator), target(t, allocator), schema(outSchema) {} + PointerType source; + PointerType target; + const SchemaType** schema; + }; + + struct SchemaEntry { + SchemaEntry(const PointerType& p, SchemaType* s, bool o, Allocator* allocator) : pointer(p, allocator), schema(s), owned(o) {} + ~SchemaEntry() { + if (owned) { + schema->~SchemaType(); + Allocator::Free(schema); + } + } + PointerType pointer; + SchemaType* schema; + bool owned; + }; + + void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { + if (schema) + *schema = SchemaType::GetTypeless(); + + if (v.GetType() == kObjectType) { + const SchemaType* s = GetSchema(pointer); + if (!s) + CreateSchema(schema, pointer, v, document); + + for (typename ValueType::ConstMemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr) + CreateSchemaRecursive(0, pointer.Append(itr->name, allocator_), itr->value, document); + } + else if (v.GetType() == kArrayType) + for (SizeType i = 0; i < v.Size(); i++) + CreateSchemaRecursive(0, pointer.Append(i, allocator_), v[i], document); + } + + void CreateSchema(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { + RAPIDJSON_ASSERT(pointer.IsValid()); + if (v.IsObject()) { + if (!HandleRefSchema(pointer, schema, v, document)) { + SchemaType* s = new (allocator_->Malloc(sizeof(SchemaType))) SchemaType(this, pointer, v, document, allocator_); + new (schemaMap_.template Push()) SchemaEntry(pointer, s, true, allocator_); + if (schema) + *schema = s; + } + } + } + + bool HandleRefSchema(const PointerType& source, const SchemaType** schema, const ValueType& v, const ValueType& document) { + static const Ch kRefString[] = { '$', 'r', 'e', 'f', '\0' }; + static const ValueType kRefValue(kRefString, 4); + + typename ValueType::ConstMemberIterator itr = v.FindMember(kRefValue); + if (itr == v.MemberEnd()) + return false; + + if (itr->value.IsString()) { + SizeType len = itr->value.GetStringLength(); + if (len > 0) { + const Ch* s = itr->value.GetString(); + SizeType i = 0; + while (i < len && s[i] != '#') // Find the first # + i++; + + if (i > 0) { // Remote reference, resolve immediately + if (remoteProvider_) { + if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i - 1)) { + PointerType pointer(&s[i], len - i, allocator_); + if (pointer.IsValid()) { + if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) { + if (schema) + *schema = sc; + return true; + } + } + } + } + } + else if (s[i] == '#') { // Local reference, defer resolution + PointerType pointer(&s[i], len - i, allocator_); + if (pointer.IsValid()) { + if (const ValueType* nv = pointer.Get(document)) + if (HandleRefSchema(source, schema, *nv, document)) + return true; + + new (schemaRef_.template Push()) SchemaRefEntry(source, pointer, schema, allocator_); + return true; + } + } + } + } + return false; + } + + const SchemaType* GetSchema(const PointerType& pointer) const { + for (const SchemaEntry* target = schemaMap_.template Bottom(); target != schemaMap_.template End(); ++target) + if (pointer == target->pointer) + return target->schema; + return 0; + } + + PointerType GetPointer(const SchemaType* schema) const { + for (const SchemaEntry* target = schemaMap_.template Bottom(); target != schemaMap_.template End(); ++target) + if (schema == target->schema) + return target->pointer; + return PointerType(); + } + + static const size_t kInitialSchemaMapSize = 64; + static const size_t kInitialSchemaRefSize = 64; + + IRemoteSchemaDocumentProviderType* remoteProvider_; + Allocator *allocator_; + Allocator *ownAllocator_; + const SchemaType* root_; //!< Root schema. + internal::Stack schemaMap_; // Stores created Pointer -> Schemas + internal::Stack schemaRef_; // Stores Pointer from $ref and schema which holds the $ref +}; + +//! GenericSchemaDocument using Value type. +typedef GenericSchemaDocument SchemaDocument; +//! IGenericRemoteSchemaDocumentProvider using SchemaDocument. +typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProvider; + +/////////////////////////////////////////////////////////////////////////////// +// GenericSchemaValidator + +//! JSON Schema Validator. +/*! + A SAX style JSON schema validator. + It uses a \c GenericSchemaDocument to validate SAX events. + It delegates the incoming SAX events to an output handler. + The default output handler does nothing. + It can be reused multiple times by calling \c Reset(). + + \tparam SchemaDocumentType Type of schema document. + \tparam OutputHandler Type of output handler. Default handler does nothing. + \tparam StateAllocator Allocator for storing the internal validation states. +*/ +template < + typename SchemaDocumentType, + typename OutputHandler = BaseReaderHandler, + typename StateAllocator = CrtAllocator> +class GenericSchemaValidator : + public internal::ISchemaStateFactory, + public internal::ISchemaValidator +{ +public: + typedef typename SchemaDocumentType::SchemaType SchemaType; + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename SchemaType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + + //! Constructor without output handler. + /*! + \param schemaDocument The schema document to conform to. + \param allocator Optional allocator for storing internal validation states. + \param schemaStackCapacity Optional initial capacity of schema path stack. + \param documentStackCapacity Optional initial capacity of document path stack. + */ + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(schemaDocument.GetRoot()), + outputHandler_(GetNullHandler()), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(0) +#endif + { + } + + //! Constructor with output handler. + /*! + \param schemaDocument The schema document to conform to. + \param allocator Optional allocator for storing internal validation states. + \param schemaStackCapacity Optional initial capacity of schema path stack. + \param documentStackCapacity Optional initial capacity of document path stack. + */ + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + OutputHandler& outputHandler, + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(schemaDocument.GetRoot()), + outputHandler_(outputHandler), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(0) +#endif + { + } + + //! Destructor. + ~GenericSchemaValidator() { + Reset(); + RAPIDJSON_DELETE(ownStateAllocator_); + } + + //! Reset the internal states. + void Reset() { + while (!schemaStack_.Empty()) + PopSchema(); + documentStack_.Clear(); + valid_ = true; + } + + //! Checks whether the current state is valid. + // Implementation of ISchemaValidator + virtual bool IsValid() const { return valid_; } + + //! Gets the JSON pointer pointed to the invalid schema. + PointerType GetInvalidSchemaPointer() const { + return schemaStack_.Empty() ? PointerType() : schemaDocument_->GetPointer(&CurrentSchema()); + } + + //! Gets the keyword of invalid schema. + const Ch* GetInvalidSchemaKeyword() const { + return schemaStack_.Empty() ? 0 : CurrentContext().invalidKeyword; + } + + //! Gets the JSON pointer pointed to the invalid value. + PointerType GetInvalidDocumentPointer() const { + return documentStack_.Empty() ? PointerType() : PointerType(documentStack_.template Bottom(), documentStack_.GetSize() / sizeof(Ch)); + } + +#if RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \ +RAPIDJSON_MULTILINEMACRO_BEGIN\ + *documentStack_.template Push() = '\0';\ + documentStack_.template Pop(1);\ + internal::PrintInvalidDocument(documentStack_.template Bottom());\ +RAPIDJSON_MULTILINEMACRO_END +#else +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() +#endif + +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_(method, arg1)\ + if (!valid_) return false; \ + if (!BeginValue() || !CurrentSchema().method arg1) {\ + RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_();\ + return valid_ = false;\ + } + +#define RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2)\ + for (Context* context = schemaStack_.template Bottom(); context != schemaStack_.template End(); context++) {\ + if (context->hasher)\ + static_cast(context->hasher)->method arg2;\ + if (context->validators)\ + for (SizeType i_ = 0; i_ < context->validatorCount; i_++)\ + static_cast(context->validators[i_])->method arg2;\ + if (context->patternPropertiesValidators)\ + for (SizeType i_ = 0; i_ < context->patternPropertiesValidatorCount; i_++)\ + static_cast(context->patternPropertiesValidators[i_])->method arg2;\ + } + +#define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\ + return valid_ = EndValue() && outputHandler_.method arg2 + +#define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \ + RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\ + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\ + RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2) + + bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext() ), ( )); } + bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); } + bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); } + bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); } + bool Int64(int64_t i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int64, (CurrentContext(), i), (i)); } + bool Uint64(uint64_t u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint64, (CurrentContext(), u), (u)); } + bool Double(double d) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Double, (CurrentContext(), d), (d)); } + bool RawNumber(const Ch* str, SizeType length, bool copy) + { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } + bool String(const Ch* str, SizeType length, bool copy) + { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } + + bool StartObject() { + RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext())); + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ()); + return valid_ = outputHandler_.StartObject(); + } + + bool Key(const Ch* str, SizeType len, bool copy) { + if (!valid_) return false; + AppendToken(str, len); + if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy)); + return valid_ = outputHandler_.Key(str, len, copy); + } + + bool EndObject(SizeType memberCount) { + if (!valid_) return false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndObject, (memberCount)); + if (!CurrentSchema().EndObject(CurrentContext(), memberCount)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_END_(EndObject, (memberCount)); + } + + bool StartArray() { + RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext())); + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ()); + return valid_ = outputHandler_.StartArray(); + } + + bool EndArray(SizeType elementCount) { + if (!valid_) return false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndArray, (elementCount)); + if (!CurrentSchema().EndArray(CurrentContext(), elementCount)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_END_(EndArray, (elementCount)); + } + +#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_ +#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_ +#undef RAPIDJSON_SCHEMA_HANDLE_PARALLEL_ +#undef RAPIDJSON_SCHEMA_HANDLE_VALUE_ + + // Implementation of ISchemaStateFactory + virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) { + return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, +#if RAPIDJSON_SCHEMA_VERBOSE + depth_ + 1, +#endif + &GetStateAllocator()); + } + + virtual void DestroySchemaValidator(ISchemaValidator* validator) { + GenericSchemaValidator* v = static_cast(validator); + v->~GenericSchemaValidator(); + StateAllocator::Free(v); + } + + virtual void* CreateHasher() { + return new (GetStateAllocator().Malloc(sizeof(HasherType))) HasherType(&GetStateAllocator()); + } + + virtual uint64_t GetHashCode(void* hasher) { + return static_cast(hasher)->GetHashCode(); + } + + virtual void DestroryHasher(void* hasher) { + HasherType* h = static_cast(hasher); + h->~HasherType(); + StateAllocator::Free(h); + } + + virtual void* MallocState(size_t size) { + return GetStateAllocator().Malloc(size); + } + + virtual void FreeState(void* p) { + return StateAllocator::Free(p); + } + +private: + typedef typename SchemaType::Context Context; + typedef GenericValue, StateAllocator> HashCodeArray; + typedef internal::Hasher HasherType; + + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + const SchemaType& root, +#if RAPIDJSON_SCHEMA_VERBOSE + unsigned depth, +#endif + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(root), + outputHandler_(GetNullHandler()), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(depth) +#endif + { + } + + StateAllocator& GetStateAllocator() { + if (!stateAllocator_) + stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator()); + return *stateAllocator_; + } + + bool BeginValue() { + if (schemaStack_.Empty()) + PushSchema(root_); + else { + if (CurrentContext().inArray) + internal::TokenHelper, Ch>::AppendIndexToken(documentStack_, CurrentContext().arrayElementIndex); + + if (!CurrentSchema().BeginValue(CurrentContext())) + return false; + + SizeType count = CurrentContext().patternPropertiesSchemaCount; + const SchemaType** sa = CurrentContext().patternPropertiesSchemas; + typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType; + bool valueUniqueness = CurrentContext().valueUniqueness; + if (CurrentContext().valueSchema) + PushSchema(*CurrentContext().valueSchema); + + if (count > 0) { + CurrentContext().objectPatternValidatorType = patternValidatorType; + ISchemaValidator**& va = CurrentContext().patternPropertiesValidators; + SizeType& validatorCount = CurrentContext().patternPropertiesValidatorCount; + va = static_cast(MallocState(sizeof(ISchemaValidator*) * count)); + for (SizeType i = 0; i < count; i++) + va[validatorCount++] = CreateSchemaValidator(*sa[i]); + } + + CurrentContext().arrayUniqueness = valueUniqueness; + } + return true; + } + + bool EndValue() { + if (!CurrentSchema().EndValue(CurrentContext())) + return false; + +#if RAPIDJSON_SCHEMA_VERBOSE + GenericStringBuffer sb; + schemaDocument_->GetPointer(&CurrentSchema()).Stringify(sb); + + *documentStack_.template Push() = '\0'; + documentStack_.template Pop(1); + internal::PrintValidatorPointers(depth_, sb.GetString(), documentStack_.template Bottom()); +#endif + + uint64_t h = CurrentContext().arrayUniqueness ? static_cast(CurrentContext().hasher)->GetHashCode() : 0; + + PopSchema(); + + if (!schemaStack_.Empty()) { + Context& context = CurrentContext(); + if (context.valueUniqueness) { + HashCodeArray* a = static_cast(context.arrayElementHashCodes); + if (!a) + CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType); + for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr) + if (itr->GetUint64() == h) + RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString()); + a->PushBack(h, GetStateAllocator()); + } + } + + // Remove the last token of document pointer + while (!documentStack_.Empty() && *documentStack_.template Pop(1) != '/') + ; + + return true; + } + + void AppendToken(const Ch* str, SizeType len) { + documentStack_.template Reserve(1 + len * 2); // worst case all characters are escaped as two characters + *documentStack_.template PushUnsafe() = '/'; + for (SizeType i = 0; i < len; i++) { + if (str[i] == '~') { + *documentStack_.template PushUnsafe() = '~'; + *documentStack_.template PushUnsafe() = '0'; + } + else if (str[i] == '/') { + *documentStack_.template PushUnsafe() = '~'; + *documentStack_.template PushUnsafe() = '1'; + } + else + *documentStack_.template PushUnsafe() = str[i]; + } + } + + RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push()) Context(*this, &schema); } + + RAPIDJSON_FORCEINLINE void PopSchema() { + Context* c = schemaStack_.template Pop(1); + if (HashCodeArray* a = static_cast(c->arrayElementHashCodes)) { + a->~HashCodeArray(); + StateAllocator::Free(a); + } + c->~Context(); + } + + const SchemaType& CurrentSchema() const { return *schemaStack_.template Top()->schema; } + Context& CurrentContext() { return *schemaStack_.template Top(); } + const Context& CurrentContext() const { return *schemaStack_.template Top(); } + + static OutputHandler& GetNullHandler() { + static OutputHandler nullHandler; + return nullHandler; + } + + static const size_t kDefaultSchemaStackCapacity = 1024; + static const size_t kDefaultDocumentStackCapacity = 256; + const SchemaDocumentType* schemaDocument_; + const SchemaType& root_; + OutputHandler& outputHandler_; + StateAllocator* stateAllocator_; + StateAllocator* ownStateAllocator_; + internal::Stack schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *) + internal::Stack documentStack_; //!< stack to store the current path of validating document (Ch) + bool valid_; +#if RAPIDJSON_SCHEMA_VERBOSE + unsigned depth_; +#endif +}; + +typedef GenericSchemaValidator SchemaValidator; + +/////////////////////////////////////////////////////////////////////////////// +// SchemaValidatingReader + +//! A helper class for parsing with validation. +/*! + This helper class is a functor, designed as a parameter of \ref GenericDocument::Populate(). + + \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept. + \tparam SourceEncoding Encoding of the input stream. + \tparam SchemaDocumentType Type of schema document. + \tparam StackAllocator Allocator type for stack. +*/ +template < + unsigned parseFlags, + typename InputStream, + typename SourceEncoding, + typename SchemaDocumentType = SchemaDocument, + typename StackAllocator = CrtAllocator> +class SchemaValidatingReader { +public: + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename InputStream::Ch Ch; + + //! Constructor + /*! + \param is Input stream. + \param sd Schema document. + */ + SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), isValid_(true) {} + + template + bool operator()(Handler& handler) { + GenericReader reader; + GenericSchemaValidator validator(sd_, handler); + parseResult_ = reader.template Parse(is_, validator); + + isValid_ = validator.IsValid(); + if (isValid_) { + invalidSchemaPointer_ = PointerType(); + invalidSchemaKeyword_ = 0; + invalidDocumentPointer_ = PointerType(); + } + else { + invalidSchemaPointer_ = validator.GetInvalidSchemaPointer(); + invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword(); + invalidDocumentPointer_ = validator.GetInvalidDocumentPointer(); + } + + return parseResult_; + } + + const ParseResult& GetParseResult() const { return parseResult_; } + bool IsValid() const { return isValid_; } + const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; } + const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; } + const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; } + +private: + InputStream& is_; + const SchemaDocumentType& sd_; + + ParseResult parseResult_; + PointerType invalidSchemaPointer_; + const Ch* invalidSchemaKeyword_; + PointerType invalidDocumentPointer_; + bool isValid_; +}; + +RAPIDJSON_NAMESPACE_END +RAPIDJSON_DIAG_POP + +#endif // RAPIDJSON_SCHEMA_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/stream.h b/sql-odbc/libraries/rapidjson/include/rapidjson/stream.h new file mode 100644 index 0000000000..fef82c252f --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/stream.h @@ -0,0 +1,179 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "rapidjson.h" + +#ifndef RAPIDJSON_STREAM_H_ +#define RAPIDJSON_STREAM_H_ + +#include "encodings.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Stream + +/*! \class rapidjson::Stream + \brief Concept for reading and writing characters. + + For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd(). + + For write-only stream, only need to implement Put() and Flush(). + +\code +concept Stream { + typename Ch; //!< Character type of the stream. + + //! Read the current character from stream without moving the read cursor. + Ch Peek() const; + + //! Read the current character from stream and moving the read cursor to next character. + Ch Take(); + + //! Get the current read cursor. + //! \return Number of characters read from start. + size_t Tell(); + + //! Begin writing operation at the current read pointer. + //! \return The begin writer pointer. + Ch* PutBegin(); + + //! Write a character. + void Put(Ch c); + + //! Flush the buffer. + void Flush(); + + //! End the writing operation. + //! \param begin The begin write pointer returned by PutBegin(). + //! \return Number of characters written. + size_t PutEnd(Ch* begin); +} +\endcode +*/ + +//! Provides additional information for stream. +/*! + By using traits pattern, this type provides a default configuration for stream. + For custom stream, this type can be specialized for other configuration. + See TEST(Reader, CustomStringStream) in readertest.cpp for example. +*/ +template +struct StreamTraits { + //! Whether to make local copy of stream for optimization during parsing. + /*! + By default, for safety, streams do not use local copy optimization. + Stream that can be copied fast should specialize this, like StreamTraits. + */ + enum { copyOptimization = 0 }; +}; + +//! Reserve n characters for writing to a stream. +template +inline void PutReserve(Stream& stream, size_t count) { + (void)stream; + (void)count; +} + +//! Write character to a stream, presuming buffer is reserved. +template +inline void PutUnsafe(Stream& stream, typename Stream::Ch c) { + stream.Put(c); +} + +//! Put N copies of a character to a stream. +template +inline void PutN(Stream& stream, Ch c, size_t n) { + PutReserve(stream, n); + for (size_t i = 0; i < n; i++) + PutUnsafe(stream, c); +} + +/////////////////////////////////////////////////////////////////////////////// +// StringStream + +//! Read-only string stream. +/*! \note implements Stream concept +*/ +template +struct GenericStringStream { + typedef typename Encoding::Ch Ch; + + GenericStringStream(const Ch *src) : src_(src), head_(src) {} + + Ch Peek() const { return *src_; } + Ch Take() { return *src_++; } + size_t Tell() const { return static_cast(src_ - head_); } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + const Ch* src_; //!< Current read position. + const Ch* head_; //!< Original head of the string. +}; + +template +struct StreamTraits > { + enum { copyOptimization = 1 }; +}; + +//! String stream with UTF8 encoding. +typedef GenericStringStream > StringStream; + +/////////////////////////////////////////////////////////////////////////////// +// InsituStringStream + +//! A read-write string stream. +/*! This string stream is particularly designed for in-situ parsing. + \note implements Stream concept +*/ +template +struct GenericInsituStringStream { + typedef typename Encoding::Ch Ch; + + GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {} + + // Read + Ch Peek() { return *src_; } + Ch Take() { return *src_++; } + size_t Tell() { return static_cast(src_ - head_); } + + // Write + void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; } + + Ch* PutBegin() { return dst_ = src_; } + size_t PutEnd(Ch* begin) { return static_cast(dst_ - begin); } + void Flush() {} + + Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; } + void Pop(size_t count) { dst_ -= count; } + + Ch* src_; + Ch* dst_; + Ch* head_; +}; + +template +struct StreamTraits > { + enum { copyOptimization = 1 }; +}; + +//! Insitu string stream with UTF8 encoding. +typedef GenericInsituStringStream > InsituStringStream; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_STREAM_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/stringbuffer.h b/sql-odbc/libraries/rapidjson/include/rapidjson/stringbuffer.h new file mode 100644 index 0000000000..78f34d2098 --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/stringbuffer.h @@ -0,0 +1,117 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_STRINGBUFFER_H_ +#define RAPIDJSON_STRINGBUFFER_H_ + +#include "stream.h" +#include "internal/stack.h" + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS +#include // std::move +#endif + +#include "internal/stack.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory output stream. +/*! + \tparam Encoding Encoding of the stream. + \tparam Allocator type for allocating memory buffer. + \note implements Stream concept +*/ +template +class GenericStringBuffer { +public: + typedef typename Encoding::Ch Ch; + + GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {} + GenericStringBuffer& operator=(GenericStringBuffer&& rhs) { + if (&rhs != this) + stack_ = std::move(rhs.stack_); + return *this; + } +#endif + + void Put(Ch c) { *stack_.template Push() = c; } + void PutUnsafe(Ch c) { *stack_.template PushUnsafe() = c; } + void Flush() {} + + void Clear() { stack_.Clear(); } + void ShrinkToFit() { + // Push and pop a null terminator. This is safe. + *stack_.template Push() = '\0'; + stack_.ShrinkToFit(); + stack_.template Pop(1); + } + + void Reserve(size_t count) { stack_.template Reserve(count); } + Ch* Push(size_t count) { return stack_.template Push(count); } + Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe(count); } + void Pop(size_t count) { stack_.template Pop(count); } + + const Ch* GetString() const { + // Push and pop a null terminator. This is safe. + *stack_.template Push() = '\0'; + stack_.template Pop(1); + + return stack_.template Bottom(); + } + + size_t GetSize() const { return stack_.GetSize(); } + + static const size_t kDefaultCapacity = 256; + mutable internal::Stack stack_; + +private: + // Prohibit copy constructor & assignment operator. + GenericStringBuffer(const GenericStringBuffer&); + GenericStringBuffer& operator=(const GenericStringBuffer&); +}; + +//! String buffer with UTF8 encoding +typedef GenericStringBuffer > StringBuffer; + +template +inline void PutReserve(GenericStringBuffer& stream, size_t count) { + stream.Reserve(count); +} + +template +inline void PutUnsafe(GenericStringBuffer& stream, typename Encoding::Ch c) { + stream.PutUnsafe(c); +} + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(GenericStringBuffer >& stream, char c, size_t n) { + std::memset(stream.stack_.Push(n), c, n * sizeof(c)); +} + +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_STRINGBUFFER_H_ diff --git a/sql-odbc/libraries/rapidjson/include/rapidjson/writer.h b/sql-odbc/libraries/rapidjson/include/rapidjson/writer.h new file mode 100644 index 0000000000..94f22dd5fc --- /dev/null +++ b/sql-odbc/libraries/rapidjson/include/rapidjson/writer.h @@ -0,0 +1,610 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_WRITER_H_ +#define RAPIDJSON_WRITER_H_ + +#include "stream.h" +#include "internal/stack.h" +#include "internal/strfunc.h" +#include "internal/dtoa.h" +#include "internal/itoa.h" +#include "stringbuffer.h" +#include // placement new + +#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) +#include +#pragma intrinsic(_BitScanForward) +#endif +#ifdef RAPIDJSON_SSE42 +#include +#elif defined(RAPIDJSON_SSE2) +#include +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(unreachable-code) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// WriteFlag + +/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS + \ingroup RAPIDJSON_CONFIG + \brief User-defined kWriteDefaultFlags definition. + + User can define this as any \c WriteFlag combinations. +*/ +#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS +#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags +#endif + +//! Combination of writeFlags +enum WriteFlag { + kWriteNoFlags = 0, //!< No flags are set. + kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings. + kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN. + kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS +}; + +//! JSON writer +/*! Writer implements the concept Handler. + It generates JSON text by events to an output os. + + User may programmatically calls the functions of a writer to generate JSON text. + + On the other side, a writer can also be passed to objects that generates events, + + for example Reader::Parse() and Document::Accept(). + + \tparam OutputStream Type of output stream. + \tparam SourceEncoding Encoding of source string. + \tparam TargetEncoding Encoding of output stream. + \tparam StackAllocator Type of allocator for allocating memory of stack. + \note implements Handler concept +*/ +template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> +class Writer { +public: + typedef typename SourceEncoding::Ch Ch; + + static const int kDefaultMaxDecimalPlaces = 324; + + //! Constructor + /*! \param os Output stream. + \param stackAllocator User supplied allocator. If it is null, it will create a private one. + \param levelDepth Initial capacity of stack. + */ + explicit + Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) : + os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} + + explicit + Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) : + os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} + + //! Reset the writer with a new stream. + /*! + This function reset the writer with a new stream and default settings, + in order to make a Writer object reusable for output multiple JSONs. + + \param os New output stream. + \code + Writer writer(os1); + writer.StartObject(); + // ... + writer.EndObject(); + + writer.Reset(os2); + writer.StartObject(); + // ... + writer.EndObject(); + \endcode + */ + void Reset(OutputStream& os) { + os_ = &os; + hasRoot_ = false; + level_stack_.Clear(); + } + + //! Checks whether the output is a complete JSON. + /*! + A complete JSON has a complete root object or array. + */ + bool IsComplete() const { + return hasRoot_ && level_stack_.Empty(); + } + + int GetMaxDecimalPlaces() const { + return maxDecimalPlaces_; + } + + //! Sets the maximum number of decimal places for double output. + /*! + This setting truncates the output with specified number of decimal places. + + For example, + + \code + writer.SetMaxDecimalPlaces(3); + writer.StartArray(); + writer.Double(0.12345); // "0.123" + writer.Double(0.0001); // "0.0" + writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent) + writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent) + writer.EndArray(); + \endcode + + The default setting does not truncate any decimal places. You can restore to this setting by calling + \code + writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces); + \endcode + */ + void SetMaxDecimalPlaces(int maxDecimalPlaces) { + maxDecimalPlaces_ = maxDecimalPlaces; + } + + /*!@name Implementation of Handler + \see Handler + */ + //@{ + + bool Null() { Prefix(kNullType); return EndValue(WriteNull()); } + bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return EndValue(WriteBool(b)); } + bool Int(int i) { Prefix(kNumberType); return EndValue(WriteInt(i)); } + bool Uint(unsigned u) { Prefix(kNumberType); return EndValue(WriteUint(u)); } + bool Int64(int64_t i64) { Prefix(kNumberType); return EndValue(WriteInt64(i64)); } + bool Uint64(uint64_t u64) { Prefix(kNumberType); return EndValue(WriteUint64(u64)); } + + //! Writes the given \c double value to the stream + /*! + \param d The value to be written. + \return Whether it is succeed. + */ + bool Double(double d) { Prefix(kNumberType); return EndValue(WriteDouble(d)); } + + bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + Prefix(kNumberType); + return EndValue(WriteString(str, length)); + } + + bool String(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + Prefix(kStringType); + return EndValue(WriteString(str, length)); + } + +#if RAPIDJSON_HAS_STDSTRING + bool String(const std::basic_string& str) { + return String(str.data(), SizeType(str.size())); + } +#endif + + bool StartObject() { + Prefix(kObjectType); + new (level_stack_.template Push()) Level(false); + return WriteStartObject(); + } + + bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } + + bool EndObject(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); + RAPIDJSON_ASSERT(!level_stack_.template Top()->inArray); + level_stack_.template Pop(1); + return EndValue(WriteEndObject()); + } + + bool StartArray() { + Prefix(kArrayType); + new (level_stack_.template Push()) Level(true); + return WriteStartArray(); + } + + bool EndArray(SizeType elementCount = 0) { + (void)elementCount; + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); + RAPIDJSON_ASSERT(level_stack_.template Top()->inArray); + level_stack_.template Pop(1); + return EndValue(WriteEndArray()); + } + //@} + + /*! @name Convenience extensions */ + //@{ + + //! Simpler but slower overload. + bool String(const Ch* str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } + + //@} + + //! Write a raw JSON value. + /*! + For user to write a stringified JSON as a value. + + \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. + \param length Length of the json. + \param type Type of the root of json. + */ + bool RawValue(const Ch* json, size_t length, Type type) { Prefix(type); return EndValue(WriteRawValue(json, length)); } + +protected: + //! Information for each nested level + struct Level { + Level(bool inArray_) : valueCount(0), inArray(inArray_) {} + size_t valueCount; //!< number of values in this level + bool inArray; //!< true if in array, otherwise in object + }; + + static const size_t kDefaultLevelDepth = 32; + + bool WriteNull() { + PutReserve(*os_, 4); + PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true; + } + + bool WriteBool(bool b) { + if (b) { + PutReserve(*os_, 4); + PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e'); + } + else { + PutReserve(*os_, 5); + PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e'); + } + return true; + } + + bool WriteInt(int i) { + char buffer[11]; + const char* end = internal::i32toa(i, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteUint(unsigned u) { + char buffer[10]; + const char* end = internal::u32toa(u, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteInt64(int64_t i64) { + char buffer[21]; + const char* end = internal::i64toa(i64, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteUint64(uint64_t u64) { + char buffer[20]; + char* end = internal::u64toa(u64, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteDouble(double d) { + if (internal::Double(d).IsNanOrInf()) { + if (!(writeFlags & kWriteNanAndInfFlag)) + return false; + if (internal::Double(d).IsNan()) { + PutReserve(*os_, 3); + PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); + return true; + } + if (internal::Double(d).Sign()) { + PutReserve(*os_, 9); + PutUnsafe(*os_, '-'); + } + else + PutReserve(*os_, 8); + PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); + PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); + return true; + } + + char buffer[25]; + char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); + PutReserve(*os_, static_cast(end - buffer)); + for (char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteString(const Ch* str, SizeType length) { + static const typename TargetEncoding::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + static const char escape[256] = { +#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + //0 1 2 3 4 5 6 7 8 9 A B C D E F + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00 + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10 + 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20 + Z16, Z16, // 30~4F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50 + Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF +#undef Z16 + }; + + if (TargetEncoding::supportUnicode) + PutReserve(*os_, 2 + length * 6); // "\uxxxx..." + else + PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..." + + PutUnsafe(*os_, '\"'); + GenericStringStream is(str); + while (ScanWriteUnescapedString(is, length)) { + const Ch c = is.Peek(); + if (!TargetEncoding::supportUnicode && static_cast(c) >= 0x80) { + // Unicode escaping + unsigned codepoint; + if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint))) + return false; + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, 'u'); + if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) { + PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint ) & 15]); + } + else { + RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF); + // Surrogate pair + unsigned s = codepoint - 0x010000; + unsigned lead = (s >> 10) + 0xD800; + unsigned trail = (s & 0x3FF) + 0xDC00; + PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(lead ) & 15]); + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, 'u'); + PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(trail ) & 15]); + } + } + else if ((sizeof(Ch) == 1 || static_cast(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast(c)])) { + is.Take(); + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, static_cast(escape[static_cast(c)])); + if (escape[static_cast(c)] == 'u') { + PutUnsafe(*os_, '0'); + PutUnsafe(*os_, '0'); + PutUnsafe(*os_, hexDigits[static_cast(c) >> 4]); + PutUnsafe(*os_, hexDigits[static_cast(c) & 0xF]); + } + } + else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? + Transcoder::Validate(is, *os_) : + Transcoder::TranscodeUnsafe(is, *os_)))) + return false; + } + PutUnsafe(*os_, '\"'); + return true; + } + + bool ScanWriteUnescapedString(GenericStringStream& is, size_t length) { + return RAPIDJSON_LIKELY(is.Tell() < length); + } + + bool WriteStartObject() { os_->Put('{'); return true; } + bool WriteEndObject() { os_->Put('}'); return true; } + bool WriteStartArray() { os_->Put('['); return true; } + bool WriteEndArray() { os_->Put(']'); return true; } + + bool WriteRawValue(const Ch* json, size_t length) { + PutReserve(*os_, length); + for (size_t i = 0; i < length; i++) { + RAPIDJSON_ASSERT(json[i] != '\0'); + PutUnsafe(*os_, json[i]); + } + return true; + } + + void Prefix(Type type) { + (void)type; + if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root + Level* level = level_stack_.template Top(); + if (level->valueCount > 0) { + if (level->inArray) + os_->Put(','); // add comma if it is not the first element in array + else // in object + os_->Put((level->valueCount % 2 == 0) ? ',' : ':'); + } + if (!level->inArray && level->valueCount % 2 == 0) + RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name + level->valueCount++; + } + else { + RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root. + hasRoot_ = true; + } + } + + // Flush the value if it is the top level one. + bool EndValue(bool ret) { + if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text + os_->Flush(); + return ret; + } + + OutputStream* os_; + internal::Stack level_stack_; + int maxDecimalPlaces_; + bool hasRoot_; + +private: + // Prohibit copy constructor & assignment operator. + Writer(const Writer&); + Writer& operator=(const Writer&); +}; + +// Full specialization for StringStream to prevent memory copying + +template<> +inline bool Writer::WriteInt(int i) { + char *buffer = os_->Push(11); + const char* end = internal::i32toa(i, buffer); + os_->Pop(static_cast(11 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteUint(unsigned u) { + char *buffer = os_->Push(10); + const char* end = internal::u32toa(u, buffer); + os_->Pop(static_cast(10 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteInt64(int64_t i64) { + char *buffer = os_->Push(21); + const char* end = internal::i64toa(i64, buffer); + os_->Pop(static_cast(21 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteUint64(uint64_t u) { + char *buffer = os_->Push(20); + const char* end = internal::u64toa(u, buffer); + os_->Pop(static_cast(20 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteDouble(double d) { + if (internal::Double(d).IsNanOrInf()) { + // Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag). + if (!(kWriteDefaultFlags & kWriteNanAndInfFlag)) + return false; + if (internal::Double(d).IsNan()) { + PutReserve(*os_, 3); + PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); + return true; + } + if (internal::Double(d).Sign()) { + PutReserve(*os_, 9); + PutUnsafe(*os_, '-'); + } + else + PutReserve(*os_, 8); + PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); + PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); + return true; + } + + char *buffer = os_->Push(25); + char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); + os_->Pop(static_cast(25 - (end - buffer))); + return true; +} + +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) +template<> +inline bool Writer::ScanWriteUnescapedString(StringStream& is, size_t length) { + if (length < 16) + return RAPIDJSON_LIKELY(is.Tell() < length); + + if (!RAPIDJSON_LIKELY(is.Tell() < length)) + return false; + + const char* p = is.src_; + const char* end = is.head_ + length; + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + const char* endAligned = reinterpret_cast(reinterpret_cast(end) & static_cast(~15)); + if (nextAligned > end) + return true; + + while (p != nextAligned) + if (*p < 0x20 || *p == '\"' || *p == '\\') { + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); + } + else + os_->PutUnsafe(*p++); + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (; p != endAligned; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + SizeType len; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + len = offset; +#else + len = static_cast(__builtin_ffs(r) - 1); +#endif + char* q = reinterpret_cast(os_->PushUnsafe(len)); + for (size_t i = 0; i < len; i++) + q[i] = p[i]; + + p += len; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s); + } + + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); +} +#endif // defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) + +RAPIDJSON_NAMESPACE_END + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/sql-odbc/libraries/rapidjson/license.txt b/sql-odbc/libraries/rapidjson/license.txt new file mode 100644 index 0000000000..22ed8e29ba --- /dev/null +++ b/sql-odbc/libraries/rapidjson/license.txt @@ -0,0 +1,57 @@ +Tencent is pleased to support the open source community by making RapidJSON available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please note that RapidJSON source code is licensed under the MIT License, except for the third-party components listed below which are subject to different license terms. Your integration of RapidJSON into your own projects may require compliance with the MIT License, as well as the other licenses applicable to the third-party components included within RapidJSON. To avoid the problematic JSON license in your own projects, it's sufficient to exclude the bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + +Open Source Software Licensed Under the BSD License: +-------------------------------------------------------------------- + +The msinttypes r29 +Copyright (c) 2006-2013 Alexander Chemeris +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Open Source Software Licensed Under the JSON License: +-------------------------------------------------------------------- + +json.org +Copyright (c) 2002 JSON.org +All Rights Reserved. + +JSON_checker +Copyright (c) 2002 JSON.org +All Rights Reserved. + + +Terms of the JSON License: +--------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +Terms of the MIT License: +-------------------------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/sql-odbc/opendistro-elasticsearch-odbc.release-notes.md b/sql-odbc/opendistro-elasticsearch-odbc.release-notes.md new file mode 100644 index 0000000000..070d15b31c --- /dev/null +++ b/sql-odbc/opendistro-elasticsearch-odbc.release-notes.md @@ -0,0 +1,13 @@ +## 2020-05-05, Version 1.7 + +### Notable Changes + +This is the first release of OpenDistro For ELasticsearch ODBC driver. + +OpenDistro ODBC provides a driver for ODBC connectivity for OpenDistro SQL plugin. The driver has been developed from scratch and offers the following features in this initial release: + +* ODBC API implementation as per ODBC 3.51 specifications +* Support for MacOS and Windows installers +* Support for HTTP BASIC and AWS SIGV4 authentication mechanisms +* Full support for Elasticsearch Datatypes: BOOLEAN, BYTE, SHORT, INTEGER, LONG, HALF_FLOAT, FLOAT, DOUBLE, SCALED_FLOAT, KEYWORD, TEXT + diff --git a/sql-odbc/release-notes/sql-odbc.release-notes-1.7.0.0.md b/sql-odbc/release-notes/sql-odbc.release-notes-1.7.0.0.md new file mode 100644 index 0000000000..d364e473ca --- /dev/null +++ b/sql-odbc/release-notes/sql-odbc.release-notes-1.7.0.0.md @@ -0,0 +1,48 @@ +## 2020-05-05, Version 1.7 + +This is the first release of OpenDistro For ELasticsearch ODBC driver. + +OpenDistro ODBC provides a driver for ODBC connectivity for OpenDistro SQL plugin. The driver has been developed from scratch and offers the following features in this initial release: + +* ODBC API implementation as per ODBC 3.51 specifications +* Support for MacOS and Windows installers +* Support for HTTP BASIC and AWS SIGV4 authentication mechanisms +* Full support for Elasticsearch Datatypes: BOOLEAN, BYTE, SHORT, INTEGER, LONG, HALF_FLOAT, FLOAT, DOUBLE, SCALED_FLOAT, KEYWORD, TEXT + +### Features + +* Feature[#7](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/7): Add support for connection string abbreviations + +* Feature[#2](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/2): Connection string refactoring and registry updates + +* Feature[#27](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/27): Simple Windows Installer + + +* Feature[#78](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/78): Add fetch_size for pagination support + + +### Documentation + +* [Pagination support design document](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/40) +* [Update README for authentication & encryption configuration options](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/33) +* [Instructions for signing installers](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/84) + + + +### BugFixes + +* [Fix AWS authentication for Tableau on Mac](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/9) + +* [Mac installer fixes](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/73) + + +* [Fix General installer components](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/69) + + + + + + + + + diff --git a/sql-odbc/release-notes/sql-odbc.release-notes-1.8.0.0.md b/sql-odbc/release-notes/sql-odbc.release-notes-1.8.0.0.md new file mode 100644 index 0000000000..40911b2fc9 --- /dev/null +++ b/sql-odbc/release-notes/sql-odbc.release-notes-1.8.0.0.md @@ -0,0 +1,9 @@ +## 2020-05-18, Version 1.8 + +### Features + +* Feature[#81](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/82): Add Tableau connector source files + +### Documentation + +* [Add supported OS version in README.md](https://github.com/opendistro-for-elasticsearch/sql-odbc/pull/88) diff --git a/sql-odbc/release-notes/sql-odbc.release-notes-1.9.0.0.md b/sql-odbc/release-notes/sql-odbc.release-notes-1.9.0.0.md new file mode 100644 index 0000000000..c79ca74b1e --- /dev/null +++ b/sql-odbc/release-notes/sql-odbc.release-notes-1.9.0.0.md @@ -0,0 +1,22 @@ +## 2020-06-24, Version 1.9 + +### Features + +* Feature [#96](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/96): Updating tableau connector files +* Feature [#99](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/99): Add 32bit support for driver +* Feature [#101](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/101): Pagination support +* Feature [#107](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/107): Add support for building with code coverage +* Feature [#109](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/109): Remove support for NOW in tableau connector +* Feature [#112](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/112): Updating SQLRowCount function support +* Feature [#114](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/114): Remove old driver files before installing Mac driver +* Feature [#119](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/119): Add CAP_SUPPRESS_GET_SERVER_TIME instead of removing support for NOW() in tableau connector +* Feature [#120](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/120): Use a queue which is created with a capacity while getting result pages + + +### Documentation + +* Documentation [#93](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/93): Update build instructions +* Documentation [#116](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/116): Update README + +### BugFixes +* BugFix [#118](https:github.com/opendistro-for-elasticsearch/sql-odbc/pull/118): Fix ODBC administrator GUI on windows diff --git a/sql-odbc/run_cppcheck.bat b/sql-odbc/run_cppcheck.bat new file mode 100644 index 0000000000..af78df35e7 --- /dev/null +++ b/sql-odbc/run_cppcheck.bat @@ -0,0 +1,19 @@ +:: +:: Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"). +:: You may not use this file except in compliance with the License. +:: A copy of the License is located at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: or in the "license" file accompanying this file. This file is distributed +:: on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +:: express or implied. See the License for the specific language governing +:: permissions and limitations under the License. +:: + +:: --force: force checks all define combinations (default max is 12) +:: --suppress=objectIndex: seemingly false-positive (TODO: investigate this further) +:: -iaws-sdk-cpp: avoid checking AWS C++ SDK source files in our repo +cppcheck.exe --force --suppress=objectIndex -iaws-sdk-cpp .\src\ 2> cppcheck-results.log \ No newline at end of file diff --git a/sql-odbc/run_cppcheck.sh b/sql-odbc/run_cppcheck.sh new file mode 100755 index 0000000000..bff5c66da1 --- /dev/null +++ b/sql-odbc/run_cppcheck.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +RESULTS_FILE=cppcheck-results.log + +# --force: force checks all define combinations (default max is 12) +# -iaws-sdk-cpp: avoid checking AWS C++ SDK source files in our repo +# -UWIN32: do not check WIN32-defined codepaths; this would throw errors on Mac +cppcheck --force -iaws-sdk-cpp -UWIN32 ./src 2> ${RESULTS_FILE} + +if [ -s ${RESULTS_FILE} ]; then + echo "!! Cppcheck errors found! Check ${RESULTS_FILE} for details." + exit 1 +else + echo "No Cppcheck errors found." +fi \ No newline at end of file diff --git a/sql-odbc/run_test_runner.bat b/sql-odbc/run_test_runner.bat new file mode 100644 index 0000000000..3f08b46ddd --- /dev/null +++ b/sql-odbc/run_test_runner.bat @@ -0,0 +1,32 @@ +:: +:: Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +:: +:: Licensed under the Apache License, Version 2.0 (the "License"). +:: You may not use this file except in compliance with the License. +:: A copy of the License is located at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: or in the "license" file accompanying this file. This file is distributed +:: on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +:: express or implied. See the License for the specific language governing +:: permissions and limitations under the License. +:: + +set PROJECT_DIR=%CD% +set TEST_RUNNER_DIR=%PROJECT_DIR%\src\TestRunner +set WORKING_DIR=%PROJECT_DIR%\bin64\Release + +cd %WORKING_DIR% + +py -m pip install mako + +py %TEST_RUNNER_DIR%\test_runner.py -i %TEST_RUNNER_DIR%\mako_template.html -o test_output.html -e %TEST_RUNNER_DIR%\test_exclude_list.txt + +set ERROR_CODE=%ERRORLEVEL% + +cd %PROJECT_DIR% + +echo %ERROR_CODE% + +EXIT /b %ERROR_CODE% \ No newline at end of file diff --git a/sql-odbc/run_test_runner.sh b/sql-odbc/run_test_runner.sh new file mode 100755 index 0000000000..46de52f0b8 --- /dev/null +++ b/sql-odbc/run_test_runner.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + + +PROJECT_DIR=$(pwd) +TEST_RUNNER_DIR=${PROJECT_DIR}/src/TestRunner +WORKING_DIR=${PROJECT_DIR}/bin64 + +cd ${WORKING_DIR} + +pip3 install mako + +python3 ${TEST_RUNNER_DIR}/test_runner.py -i ${TEST_RUNNER_DIR}/mako_template.html -o ${PROJECT_DIR}/test_output.html -e ${TEST_RUNNER_DIR}/test_exclude_list.txt + +ERROR_CODE=$? + +cd .. + +exit ${ERROR_CODE} diff --git a/sql-odbc/src/CMakeLists.txt b/sql-odbc/src/CMakeLists.txt new file mode 100644 index 0000000000..d1ebcbcbfd --- /dev/null +++ b/sql-odbc/src/CMakeLists.txt @@ -0,0 +1,177 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +# Pre 3.16 versions of Windows set MD/MT incorrectly and cause linker 'warnings' which are actually serious issues +if(WIN32) +cmake_minimum_required(VERSION 3.16) +else() +cmake_minimum_required(VERSION 3.13) +endif() + +project(global_make_list) + +include("${CMAKE_CURRENT_SOURCE_DIR}/modules/code-coverage.cmake") +add_code_coverage_all_targets(EXCLUDE libraries aws-cpp-sdk googletest IntegrationTests) + +# This wasn't being picked up on mac, causes some symbol errors +if(APPLE) + set(CMAKE_CXX_STANDARD 20) + add_definitions(-DHAVE_STRLCAT) + # Do not use shared libraries for AWS SDK. + option(BUILD_SHARED_LIBS "Build shared libraries" OFF) + # Set BUILD_WITH_TESTS to OFF before building installer package for size optimization. + option(BUILD_WITH_TESTS "Enable testing" ON) +else() + set(CMAKE_CXX_STANDARD 17) + # Use shared libraries for AWS SDK. + option(BUILD_SHARED_LIBS "Build shared libraries" ON) + # Set BUILD_WITH_TESTS to OFF before building installer package for size optimization. + option(BUILD_WITH_TESTS "Enable testing" ON) +endif() + +if(MSVC) + add_compile_options(/W4 /WX) +else() + add_compile_options(-Wall -Wextra -pedantic -Werror) +endif() + +if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(BITNESS 64) + set(BITNESS_NAME "x64") + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../lib64") + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../lib64") + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../bin64") +elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(BITNESS 32) + set(BITNESS_NAME "x86") + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../lib32") + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../lib32") + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../bin32") +endif() + +if(NOT WIN32) + # Unix builds require autoconf + option(AUTOCONF_ENABLE "Enable autoconf" ON) + configure_file(autoconf.h.in generated/autoconf.h @ONLY) +endif() + +# Base directories +set(PROJECT_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/..") +set(ODFEODBC_SRC "${CMAKE_CURRENT_SOURCE_DIR}/odfesqlodbc") +set(ODFEENLIST_SRC "${CMAKE_CURRENT_SOURCE_DIR}/odfeenlist") +set(INTEGRATION_TESTS "${CMAKE_CURRENT_SOURCE_DIR}/IntegrationTests") +set(UNIT_TESTS "${CMAKE_CURRENT_SOURCE_DIR}/UnitTests") +set(LIBRARY_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../libraries) +set(INSTALL_SRC "${CMAKE_CURRENT_SOURCE_DIR}/installer") +set(DSN_INSTALLER_SRC "${CMAKE_CURRENT_SOURCE_DIR}/DSNInstaller") + +# ODBC Driver version +set(DRIVER_PACKAGE_VERSION "1.9.0.0") +set(DRIVER_PACKAGE_VERSION_COMMA_SEPARATED "1,9,0,0") +add_compile_definitions( ES_ODBC_VERSION="${DRIVER_PACKAGE_VERSION}" + # Comma separated version is required for odbc administrator's driver file. + ES_ODBC_DRVFILE_VERSION=${DRIVER_PACKAGE_VERSION_COMMA_SEPARATED} ) + +# Extensions of base directories +set(PERFORMANCE_TESTS "${CMAKE_CURRENT_SOURCE_DIR}/PerformanceTests") +set(UT_HELPER "${UNIT_TESTS}/UTHelper") +set(IT_HELPER "${INTEGRATION_TESTS}/ITODBCHelper") +set(RABBIT_SRC ${LIBRARY_DIRECTORY}/rabbit/include) +set(RAPIDJSON_SRC ${LIBRARY_DIRECTORY}/rapidjson/include) +set(VLD_SRC ${LIBRARY_DIRECTORY}/VisualLeakDetector/include) + +# Without this symbols will be exporting to Unix but not Windows +set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) + +# Set path for AWS SDK +set(aws-cpp-sdk-base "${CMAKE_CURRENT_SOURCE_DIR}/aws-sdk-cpp") +set(aws-cpp-sdk-core_DIR "${PROJECT_ROOT}/sdk-build${BITNESS}/AWSSDK/lib/cmake/aws-cpp-sdk-core") +set(aws-c-event-stream_DIR "${PROJECT_ROOT}/sdk-build${BITNESS}/AWSSDK/lib/aws-c-event-stream/cmake") +set(aws-c-common_DIR "${PROJECT_ROOT}/sdk-build${BITNESS}/AWSSDK/lib/aws-c-common/cmake") +set(aws-checksums_DIR "${PROJECT_ROOT}/sdk-build${BITNESS}/AWSSDK/lib/aws-checksums/cmake") + +if (WIN32) + find_package(AWSSDK REQUIRED core) +endif() + +# General compiler definitions +add_compile_definitions ( _SILENCE_TR1_NAMESPACE_DEPRECATION_WARNING + UNICODE_SUPPORT + DYNAMIC_LOAD + _MBCS + _CRT_SECURE_NO_DEPRECATE + _USRDLL + _SILENCE_CXX17_ITERATOR_BASE_CLASS_DEPRECATION_WARNING + _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING + # USE_SSL + ) + +# Platform specific compiler definitions +if (WIN32 AND BITNESS EQUAL 64) + # Windows specific + add_compile_definitions ( _WINDLL + _WIN64 + _WINDOWS + WIN_EXPORT + ) +elseif (WIN32 AND BITNESS EQUAL 32) + # Windows specific + add_compile_definitions ( _WINDLL + _WIN32 + _WINDOWS + WIN_EXPORT + ) +elseif(APPLE) + # macOS specific + add_compile_definitions ( WITH_IODBC + SQLCOLATTRIBUTE_SQLLEN + ) + # iODBC includes + include_directories(/usr/local/include) +elseif(UNIX) + # Unix specific + add_compile_definitions ( WITH_UNIXODBC + SQLCOLATTRIBUTE_SQLLEN + ) +endif() + +if(BUILD_WITH_TESTS) + # GTest import + include(gtest/googletest.cmake) + fetch_googletest( + ${PROJECT_SOURCE_DIR}/gtest + ${PROJECT_BINARY_DIR}/googletest + ) + enable_testing() +endif() + +# Projects to build +if (APPLE) + add_subdirectory(${aws-cpp-sdk-base}) +endif() +add_subdirectory(${ODFEODBC_SRC}) +add_subdirectory(${ODFEENLIST_SRC}) +add_subdirectory(${INSTALL_SRC}) + +# Only build & package DSN installer for Mac platforms +if(APPLE) + add_subdirectory(${DSN_INSTALLER_SRC}) +endif() + +if(BUILD_WITH_TESTS) + add_subdirectory(${INTEGRATION_TESTS}) + add_subdirectory(${UNIT_TESTS}) + add_subdirectory(${PERFORMANCE_TESTS}) +endif() diff --git a/sql-odbc/src/DSNInstaller/CMakeLists.txt b/sql-odbc/src/DSNInstaller/CMakeLists.txt new file mode 100644 index 0000000000..d75132f91f --- /dev/null +++ b/sql-odbc/src/DSNInstaller/CMakeLists.txt @@ -0,0 +1,23 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(dsn_installer) + +set(SOURCE_FILES dsn_installer.cpp) + +add_executable(dsn_installer ${SOURCE_FILES}) + +target_link_libraries(dsn_installer iodbcinst) +target_compile_definitions(dsn_installer PUBLIC _UNICODE UNICODE) \ No newline at end of file diff --git a/sql-odbc/src/DSNInstaller/dsn_installer.cpp b/sql-odbc/src/DSNInstaller/dsn_installer.cpp new file mode 100644 index 0000000000..d51355a0ce --- /dev/null +++ b/sql-odbc/src/DSNInstaller/dsn_installer.cpp @@ -0,0 +1,225 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include +#include +#include +#include +#include +#include +#include +// clang-format on + +// Necessary for installing driver, since the driver description needs to +// maintain null characters. +using namespace std::string_literals; + +std::wstring driver_name = L"ODFE SQL ODBC Driver"; +std::wstring driver_filename = L"libodfesqlodbc.dylib"; +std::wstring dsn_name = L"ODFE SQL ODBC DSN"; +std::wstring dsn_ini_filename = L"odfesqlodbc.ini"; + +std::wstring driver_name_placeholder = L"%DRIVER_NAME%"; +std::wstring driver_path_placeholder = L"%DRIVER_PATH%"; +std::wstring setup_path_placeholder = L"%SETUP_PATH%"; + +std::vector< std::pair< std::wstring, std::wstring > > dsn_options = { + {L"Driver", driver_path_placeholder}, + {L"Host", L"localhost"}, + {L"Port", L"9200"}, + {L"User", L""}, + {L"Password", L""}, + {L"Auth", L"NONE"}, + {L"UseSSL", L"0"}, + {L"ResponseTimeout", L"10"}}; + +void print_error_message(DWORD error_code, wchar_t *error_message) { + switch (error_code) { + case ODBC_ERROR_GENERAL_ERR: + printf("\t[GENERAL_ERR] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_BUFF_LEN: + printf("\t[INVALID_BUFF_LEN] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_HWND: + printf("\t[INVALID_HWND] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_STR: + printf("\t[INVALID_STR] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_REQUEST_TYPE: + printf("\t[INVALID_REQUEST_TYPE] %S\n", error_message); + break; + case ODBC_ERROR_COMPONENT_NOT_FOUND: + printf("\t[COMPONENT_NOT_FOUND] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_NAME: + printf("\t[INVALID_NAME] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_KEYWORD_VALUE: + printf("\t[INVALID_KEYWORD_VALUE] %S\n", error_message); + break; + case ODBC_ERROR_INVALID_PATH: + printf("\t[INVALID_PATH] %S\n", error_message); + break; + default: + printf("\t%d\n", error_code); + } +} + +void print_installer_error() { + int ret = 0; + + WORD error_message_max_length = SQL_MAX_MESSAGE_LENGTH; + DWORD out_error_code; + wchar_t error_message[SQL_MAX_MESSAGE_LENGTH]; + WORD error_message_num_bytes; + + WORD in_error_rec = 0; // (1-8) + do { + printf("Error %d:\n", ++in_error_rec); + ret = SQLInstallerErrorW(in_error_rec, &out_error_code, error_message, + error_message_max_length, + &error_message_num_bytes); + print_error_message(out_error_code, error_message); + } while (ret != SQL_NO_DATA); +} + +void replace_placeholder(std::wstring &source, std::wstring placeholder, + std::wstring contents) { + size_t index = source.find(placeholder); + if (index != std::string::npos) { + source.replace(index, placeholder.size(), contents); + } +} + +bool install_driver(std::wstring install_path) { + std::wstring driver_install_str = + L"%DRIVER_NAME%\0" + L"Driver=%DRIVER_PATH%\0" + L"Setup=%SETUP_PATH%\0\0"s; + std::wstring driver_path = install_path + driver_filename; + + replace_placeholder(driver_install_str, driver_name_placeholder, + driver_name); + replace_placeholder(driver_install_str, driver_path_placeholder, + driver_path); + replace_placeholder(driver_install_str, setup_path_placeholder, + driver_path); + + SQLWCHAR out_path[512]; + WORD out_path_length = 512; + WORD num_out_path_bytes; + DWORD out_usage_count = 0; + bool success = + SQLInstallDriverExW(driver_install_str.c_str(), install_path.c_str(), + out_path, out_path_length, &num_out_path_bytes, + ODBC_INSTALL_COMPLETE, &out_usage_count); + if (!success) { + print_installer_error(); + return false; + } + + return success; +} + +bool install_dsn() { + bool success = SQLWriteDSNToIniW(dsn_name.c_str(), driver_name.c_str()); + if (!success) { + print_installer_error(); + return false; + } + return success; +} + +bool add_properties_to_dsn( + std::vector< std::pair< std::wstring, std::wstring > > options, + std::wstring driver_path) { + bool success = false; + for (auto dsn_config_option : options) { + std::wstring key = dsn_config_option.first; + std::wstring value = dsn_config_option.second; + + if (value.find(driver_path_placeholder, 0) != std::string::npos) { + replace_placeholder(value, driver_path_placeholder, driver_path); + } + + success = SQLWritePrivateProfileStringW(dsn_name.c_str(), key.c_str(), + value.c_str(), + dsn_ini_filename.c_str()); + if (!success) { + print_installer_error(); + return false; + } + } + return success; +} + +bool uninstall_driver() { + bool remove_dsns = true; + DWORD out_usage_count = 0; + + bool success = + SQLRemoveDriverW(driver_name.c_str(), remove_dsns, &out_usage_count); + if (!success) { + print_installer_error(); + return false; + } + return success; +} + +int main(int argc, char *argv[]) { + // Get install path from args + if (!argv || argc != 2) { + printf("Error! Driver path not supplied\n"); + return 1; + } + std::wstring user_install_path = + std::wstring_convert< std::codecvt_utf8_utf16< wchar_t >, wchar_t >{} + .from_bytes(argv[1]); + + printf("User install path: %S\n", user_install_path.c_str()); + if (!user_install_path.compare(L"uninstall")) { + bool uninstall_driver_success = uninstall_driver(); + return uninstall_driver_success; + } + + // Install Driver entry + printf("Installing Driver entry...\n"); + bool install_driver_success = install_driver(user_install_path); + if (!install_driver_success) { + return 1; + } + + // Add DSN entry + printf("Adding DSN entry...\n"); + bool install_dsn_success = install_dsn(); + if (!install_dsn_success) { + return 1; + } + + // Add DSN properties + printf("Adding DSN properties...\n"); + bool add_properties_success = + add_properties_to_dsn(dsn_options, user_install_path + driver_filename); + if (!add_properties_success) { + return 1; + } + + printf("Finished adding DSN!\n"); + return 0; +} diff --git a/sql-odbc/src/IntegrationTests/CMakeLists.txt b/sql-odbc/src/IntegrationTests/CMakeLists.txt new file mode 100644 index 0000000000..9b3cce758d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/CMakeLists.txt @@ -0,0 +1,39 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(integration_tests) + +set(HELPER_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCHelper") +set(CATALOG_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCCatalog") +set(CONNECTION_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCConnection") +set(DESCRIPTOR_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCDescriptors") +set(EXECUTION_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCExecution") +set(INFO_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCInfo") +set(RESULTS_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCResults") +set(TABLEAU_QUERIES_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCTableauQueries") +set(AWS_AUTH_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCAwsAuth") +set(PAGINATION_ITEST "${CMAKE_CURRENT_SOURCE_DIR}/ITODBCPagination") + +# Projects to build +add_subdirectory(${HELPER_ITEST}) +add_subdirectory(${CATALOG_ITEST}) +add_subdirectory(${CONNECTION_ITEST}) +add_subdirectory(${DESCRIPTOR_ITEST}) +add_subdirectory(${EXECUTION_ITEST}) +add_subdirectory(${INFO_ITEST}) +add_subdirectory(${RESULTS_ITEST}) +add_subdirectory(${TABLEAU_QUERIES_ITEST}) +add_subdirectory(${AWS_AUTH_ITEST}) +add_subdirectory(${PAGINATION_ITEST}) \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/CMakeLists.txt new file mode 100644 index 0000000000..69ea4b7bb8 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_aws_auth) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_aws_auth.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} + ${AWSSDK_INCLUDE_DIR} + ) + +# Generate executable +add_executable(itodbc_aws_auth ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(itodbc_aws_auth odfesqlodbc itodbc_helper ut_helper gtest_main aws-cpp-sdk-core) +target_compile_definitions(itodbc_aws_auth PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/pch.cpp new file mode 100644 index 0000000000..29d39801fe --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/pch.cpp @@ -0,0 +1,22 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/pch.h b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/pch.h new file mode 100644 index 0000000000..d09e7faad5 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/pch.h @@ -0,0 +1,24 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_aws_auth_dsn.reg b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_aws_auth_dsn.reg new file mode 100644 index 0000000000..30458e432d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_aws_auth_dsn.reg @@ -0,0 +1,17 @@ +Windows Registry Editor Version 5.00 + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBCINST.INI\ODBC Drivers] +"ElasticsearchODBC"="Installed" + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBCINST.INI\ElasticsearchODBC] +"Driver"="\\bin64\\Release\\odfesqlodbc.dll" +"Setup"="\\bin64\\Release\\odfesqlodbc.dll" + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources] +"test_aws_auth_dsn"="ElasticsearchODBC" + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBC.INI\test_aws_auth_dsn] +"host"="https://search-sept-cdg-david-test-domain-gouok3seqeupz64smuvfxyddui.eu-west-3.es.amazonaws.com" +"auth"="AWS_SIGV4" +"region"="eu-west-3" +"useSSL"="0" diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbc.ini b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbc.ini new file mode 100644 index 0000000000..c5116456f0 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbc.ini @@ -0,0 +1,8 @@ +[ODBC Data Sources] +test_aws_auth_dsn = ElasticsearchODBC + +[test_aws_auth_dsn] +host = https://search-sept-cdg-david-test-domain-gouok3seqeupz64smuvfxyddui.eu-west-3.es.amazonaws.com +auth = AWS_SIGV4 +region = eu-west-3 +useSSL = 0 \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbc_aws_auth.cpp b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbc_aws_auth.cpp new file mode 100644 index 0000000000..999efaca34 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbc_aws_auth.cpp @@ -0,0 +1,132 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "it_odbc_helper.h" +// clang-format on + +std::wstring dsn_name = L"test_aws_auth_dsn"; +std::wstring aws_auth_conn_string = + L"Driver={Elasticsearch};DataBase=database_name;" + L"Host=https://" + L"search-bit-quill-cx3hpfoxvasohujxkllmgjwqde.us-west-2." + L"es.amazonaws.com;" + L"Auth=AWS_SIGV4;Region=us-west-2;LogLevel=1"; +std::wstring aws_auth_conn_string_invalid_region = + L"Driver={Elasticsearch};DataBase=database_name;" + L"Host=https://" + L"search-bit-quill-cx3hpfoxvasohujxkllmgjwqde.us-west-2." + L"es.amazonaws.com;" + L"Auth=AWS_SIGV4;Region=us-west-3;LogLevel=1"; +std::wstring aws_auth_conn_string_invalid_authtype = + L"Driver={Elasticsearch};DataBase=database_name;" + L"Host=https://" + L"search-bit-quill-cx3hpfoxvasohujxkllmgjwqde.us-west-2." + L"es.amazonaws.com;" + L"Auth=AWS;Region=us-west-2;LogLevel=1"; + +class TestAwsAuthConnection : public testing::Test { + public: + TestAwsAuthConnection(){ + } + + void SetUp() override { + } + + void AllocConnection() { + ASSERT_NO_FATAL_FAILURE(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &m_env)); + ASSERT_NO_FATAL_FAILURE(SQLSetEnvAttr(m_env, SQL_ATTR_ODBC_VERSION, (void*)SQL_OV_ODBC3, 0)); + ASSERT_NO_FATAL_FAILURE(SQLAllocHandle(SQL_HANDLE_DBC, m_env, &m_conn)); + } + + void TearDown() override { + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_DBC, m_conn); + SQLFreeHandle(SQL_HANDLE_ENV,m_env); + } + + ~TestAwsAuthConnection() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHENV m_env = SQL_NULL_HENV; + + private: + void TestBody() override { + } +}; + +TEST(TestAwsAuthConnection, SqlConnectSuccess) { + SQLRETURN ret = SQL_ERROR; + TestAwsAuthConnection test; + ASSERT_NO_FATAL_FAILURE(test.AllocConnection()); + ret = SQLConnect(test.m_conn, (SQLTCHAR*)dsn_name.c_str(), SQL_NTS, + (SQLTCHAR*)NULL, 0, (SQLTCHAR*)NULL, 0); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); +} + +TEST(TestAwsAuthConnection, SqlDriverConnectSuccess) { + SQLRETURN ret; + TestAwsAuthConnection test; + SQLTCHAR out_conn_string[1024]; + SQLSMALLINT out_conn_string_length; + ASSERT_NO_FATAL_FAILURE(test.AllocConnection()); + ret = SQLDriverConnect(test.m_conn, NULL, + (SQLTCHAR*)aws_auth_conn_string.c_str(), SQL_NTS, + out_conn_string, IT_SIZEOF(out_conn_string), + &out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); +} + +TEST(TestAwsAuthConnection, SqlDriverConnectInvalidRegion) { + SQLRETURN ret; + TestAwsAuthConnection test; + SQLTCHAR out_conn_string[1024]; + SQLSMALLINT out_conn_string_length; + ASSERT_NO_FATAL_FAILURE(test.AllocConnection()); + ret = SQLDriverConnect(test.m_conn, NULL, + (SQLTCHAR*)aws_auth_conn_string_invalid_region.c_str(), + SQL_NTS, out_conn_string, IT_SIZEOF(out_conn_string), + &out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_EQ(SQL_ERROR,ret); +} + +TEST(TestAwsAuthConnection, SqlDriverConnectInvalidAuthType) { + SQLRETURN ret; + TestAwsAuthConnection test; + SQLTCHAR out_conn_string[1024]; + SQLSMALLINT out_conn_string_length; + ASSERT_NO_FATAL_FAILURE(test.AllocConnection()); + ret = SQLDriverConnect(test.m_conn, NULL, + (SQLTCHAR*)aws_auth_conn_string_invalid_authtype.c_str(), + SQL_NTS, out_conn_string, IT_SIZEOF(out_conn_string), + &out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_EQ(SQL_ERROR, ret); +} + +int main(int argc, char** argv) { + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + int failures = RUN_ALL_TESTS(); + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbcinst.ini b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbcinst.ini new file mode 100644 index 0000000000..283dd72937 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCAwsAuth/test_odbcinst.ini @@ -0,0 +1,6 @@ +[ODBC Drivers] +ElasticsearchODBC = Installed + +[ElasticsearchODBC] +Driver = /lib64/libodfesqlodbc.dylib +Setup = /lib64/libodfesqlodbc.dylib \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCCatalog/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCCatalog/CMakeLists.txt new file mode 100644 index 0000000000..45736a09f8 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCCatalog/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_catalog) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_catalog.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_catalog ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_catalog PUBLIC AUTO ALL) +target_link_libraries(itodbc_catalog odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_catalog PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCCatalog/packages.config b/sql-odbc/src/IntegrationTests/ITODBCCatalog/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCCatalog/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCCatalog/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCCatalog/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCCatalog/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCCatalog/pch.h b/sql-odbc/src/IntegrationTests/ITODBCCatalog/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCCatalog/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCCatalog/test_odbc_catalog.cpp b/sql-odbc/src/IntegrationTests/ITODBCCatalog/test_odbc_catalog.cpp new file mode 100644 index 0000000000..217bc42d9e --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCCatalog/test_odbc_catalog.cpp @@ -0,0 +1,653 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#define NOMINMAX 1 +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +// clang-format on + +// General test constants and structures +#define BIND_SIZE 512 +typedef struct bind_info { + SQLUSMALLINT ordinal; + SQLSMALLINT target_type; + SQLPOINTER target; + SQLLEN buffer_len; + SQLLEN out_len; + bind_info(SQLUSMALLINT _ordinal, SQLSMALLINT _target_type) { + ordinal = _ordinal; + target_type = _target_type; + out_len = 0; + data.resize(BIND_SIZE, '\0'); + buffer_len = data.size(); + target = data.data(); + } + std::string AsString() { + switch (target_type) { + case SQL_C_CHAR: + return reinterpret_cast< char* >(data.data()); + break; + case SQL_C_LONG: + return std::to_string( + *reinterpret_cast< unsigned long* >(data.data())); + break; + case SQL_C_SLONG: + return std::to_string( + *reinterpret_cast< signed long* >(data.data())); + break; + case SQL_C_SHORT: + return std::to_string( + *reinterpret_cast< signed short* >(data.data())); + break; + case SQL_C_SSHORT: + return std::to_string( + *reinterpret_cast< unsigned short* >(data.data())); + break; + default: + return "Unknown conversion type (" + std::to_string(target_type) + + ")"; + break; + } + } + + private: + std::vector< SQLCHAR > data; +} bind_info; + +// Column test constants and macro +const std::vector< std::string > flights_column_name = { + "FlightNum", "Origin", "OriginLocation", "DestLocation", + "FlightDelay", "DistanceMiles", "FlightTimeMin", "OriginWeather", + "dayOfWeek", "AvgTicketPrice", "Carrier", "FlightDelayMin", + "OriginRegion", "DestAirportID", "FlightDelayType", "timestamp", + "Dest", "FlightTimeHour", "Cancelled", "DistanceKilometers", + "OriginCityName", "DestWeather", "OriginCountry", "DestCountry", + "DestRegion", "DestCityName", "OriginAirportID"}; +const std::vector< std::string > flights_data_type = { + "keyword", "keyword", "geo_point", "geo_point", "boolean", "float", + "float", "keyword", "integer", "float", "keyword", "integer", + "keyword", "keyword", "keyword", "date", "keyword", "keyword", + "boolean", "float", "keyword", "keyword", "keyword", "keyword", + "keyword", "keyword", "keyword"}; +const std::string flights_catalog_odfe = "odfe-cluster"; +const std::string flights_catalog_elas = "elasticsearch"; +const std::string flights_table_name = "kibana_sample_data_flights"; +const std::string flights_decimal_digits = "10"; +const std::string flights_num_prec_radix = "2"; + +class TestSQLColumns : public testing::Test { + public: + TestSQLColumns() { + } + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + } + void TearDown() { + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + ~TestSQLColumns() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +#define TEST_SQL_COLUMNS(test_name, catalog_patt, schema_patt, table_patt, \ + column_patt, enable_pattern, empty) \ + TEST_F(TestSQLColumns, test_name) { \ + EXPECT_EQ(SQL_SUCCESS, SQLSetStmtAttr(m_hstmt, SQL_ATTR_METADATA_ID, \ + (void*)(!enable_pattern), 0)); \ + SQLColumns(m_hstmt, catalog_patt, SQL_NTS, schema_patt, SQL_NTS, \ + table_patt, SQL_NTS, column_patt, SQL_NTS); \ + size_t result_count = 0; \ + SQLRETURN ret; \ + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) \ + result_count++; \ + EXPECT_EQ(ret, SQL_NO_DATA); \ + if (empty) \ + EXPECT_EQ(result_count, static_cast< size_t >(0)); \ + else \ + EXPECT_FALSE(result_count == 0); \ + } + +// Table test constants and macro +typedef struct table_data { + std::string catalog_name; + std::string schema_name; + std::string table_name; + std::string table_type; + std::string remarks; +} table_data; + +const std::vector< table_data > table_data_filtered{ + {"", "", "kibana_sample_data_ecommerce", "BASE TABLE", ""}, + {"", "", "kibana_sample_data_flights", "BASE TABLE", ""}, + {"", "", "kibana_sample_data_types", "BASE TABLE", ""}}; +const std::vector< table_data > table_data_single{ + {"", "", "kibana_sample_data_flights", "BASE TABLE", ""}}; +const std::vector< table_data > table_data_all{ + {"", "", "kibana_sample_data_ecommerce", "BASE TABLE", ""}, + {"", "", "kibana_sample_data_flights", "BASE TABLE", ""}, + {"", "", "kibana_sample_data_types", "BASE TABLE", ""}, +}; +const std::vector< table_data > excel_table_data_all{ + {"", "", "kibana_sample_data_ecommerce", "TABLE", ""}, + {"", "", "kibana_sample_data_flights", "TABLE", ""}, + {"", "", "kibana_sample_data_types", "TABLE", ""}, +}; +const std::vector< table_data > table_data_types{ + {"", "", "", "BASE TABLE", ""}}; +const std::vector< table_data > table_data_schemas{{"", "", "", "", ""}}; +const std::vector< table_data > table_data_catalogs{ + {"odfe-cluster", "", "", "", ""}}; + +class TestSQLTables : public testing::Test { + public: + TestSQLTables() { + } + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + } + void TearDown() { + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + ~TestSQLTables() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +void CheckTableData(SQLHSTMT m_hstmt, + const std::vector< table_data >& expected_tables) { + std::vector< bind_info > binds; + binds.push_back(bind_info(1, SQL_C_CHAR)); + binds.push_back(bind_info(2, SQL_C_CHAR)); + binds.push_back(bind_info(3, SQL_C_CHAR)); + binds.push_back(bind_info(4, SQL_C_CHAR)); + + for (auto& it : binds) + SQLBindCol(m_hstmt, it.ordinal, it.target_type, it.target, + it.buffer_len, &it.out_len); + + SQLRETURN ret = SQL_ERROR; + if (expected_tables.empty()) { + // Verify that there is at least one table row. + size_t result_count = 0; + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) { + result_count++; + } + EXPECT_TRUE(result_count != 0); + } else { + // Fetch list of table rows from the Statement. + std::vector< table_data > server_tables; + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) { + table_data table = {binds[0].AsString(), binds[1].AsString(), + binds[2].AsString(), binds[3].AsString(), ""}; + server_tables.emplace_back(table); + } + + // Make sure that all expected tables are found. + for (auto expected_table : expected_tables) { + EXPECT_TRUE(std::any_of(server_tables.begin(), server_tables.end(), + [&](const table_data& d) { + return d.table_name + == expected_table.table_name; + })); + } + } + EXPECT_EQ(ret, SQL_NO_DATA); +} + +#define TEST_SQL_TABLES(test_name, catalog, schema, table, table_type, exp, \ + enable_pattern, empty) \ + TEST_F(TestSQLTables, test_name) { \ + EXPECT_EQ(SQL_SUCCESS, SQLSetStmtAttr(m_hstmt, SQL_ATTR_METADATA_ID, \ + (void*)(!enable_pattern), 0)); \ + EXPECT_TRUE(SQL_SUCCEEDED(SQLTables(m_hstmt, catalog, SQL_NTS, schema, \ + SQL_NTS, table, SQL_NTS, \ + table_type, SQL_NTS))); \ + if (empty) { \ + size_t result_count = 0; \ + SQLRETURN ret; \ + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) \ + result_count++; \ + EXPECT_EQ(ret, SQL_NO_DATA); \ + EXPECT_EQ(result_count, static_cast< size_t >(0)); \ + } else \ + CheckTableData(m_hstmt, exp); \ + } + +class TestSQLCatalogKeys : public testing::Test { + public: + TestSQLCatalogKeys() { + } + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + } + void TearDown() { + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + ~TestSQLCatalogKeys() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +#define TEST_SQL_KEYS(test_name, test_function, ...) \ + TEST_F(TestSQLCatalogKeys, test_name) { \ + EXPECT_TRUE(SQL_SUCCEEDED(test_function(m_hstmt, __VA_ARGS__))); \ + size_t result_count = 0; \ + SQLRETURN ret; \ + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) \ + result_count++; \ + EXPECT_EQ(ret, SQL_NO_DATA); \ + EXPECT_EQ(result_count, static_cast< size_t >(0)); \ + } + +// SQL Tables Tests +// NULL test +TEST_SQL_TABLES(Null, NULL, NULL, NULL, NULL, table_data_all, true, false); + +// Catalog tests +TEST_SQL_TABLES(WildCatalogs, (SQLTCHAR*)L"%", (SQLTCHAR*)L"", (SQLTCHAR*)L"", + NULL, table_data_catalogs, false, false) + +// Schema tests +TEST_SQL_TABLES(WildSchema, (SQLTCHAR*)L"", (SQLTCHAR*)L"%", (SQLTCHAR*)L"", + NULL, table_data_schemas, false, false) + +// Table tests +TEST_SQL_TABLES(ValidTable, NULL, NULL, (SQLTCHAR*)L"kibana_sample_data%", NULL, + table_data_filtered, true, false); +TEST_SQL_TABLES(SingleTable, NULL, NULL, + (SQLTCHAR*)L"kibana_sample_data_flights", NULL, + table_data_single, false, false); +TEST_SQL_TABLES(WildTable, NULL, NULL, (SQLTCHAR*)L"%", NULL, table_data_all, + true, false); +TEST_SQL_TABLES(InvalidTable, NULL, NULL, (SQLTCHAR*)L"invalid_table", NULL, {}, + false, true); + +// Table types tests +TEST_SQL_TABLES(ValidTableType, (SQLTCHAR*)L"", (SQLTCHAR*)L"", (SQLTCHAR*)L"", + (SQLTCHAR*)L"%", table_data_types, false, false) +TEST_SQL_TABLES(InvalidTableType, (SQLTCHAR*)L"", (SQLTCHAR*)L"", + (SQLTCHAR*)L"", (SQLTCHAR*)L"invalid_table_type", + table_data_types, false, true) + +// Excel SQLTables test +TEST_SQL_TABLES(ExcelSQLTables, (SQLTCHAR*)L"%", NULL, NULL, + (SQLTCHAR*)L"TABLE,VIEW", excel_table_data_all, false, false); + +// SQL Columns Tests +// NULL test +TEST_SQL_COLUMNS(Null, NULL, NULL, NULL, NULL, true, false) + +// Table tests +TEST_SQL_COLUMNS(ValidTable, NULL, NULL, (SQLTCHAR*)L"kibana_%", NULL, true, + false) +TEST_SQL_COLUMNS(InvalidTable, NULL, NULL, (SQLTCHAR*)L"invalid_table", NULL, + true, true) + +// Column tests +TEST_SQL_COLUMNS(ValidColumn, NULL, NULL, NULL, (SQLTCHAR*)L"FlightNum", true, + false) +TEST_SQL_COLUMNS(InvalidColumn, NULL, NULL, NULL, (SQLTCHAR*)L"invalid_column", + true, true) + +// Table and column tests +TEST_SQL_COLUMNS(ValidTable_ValidColumn, NULL, NULL, (SQLTCHAR*)L"kibana_%", + NULL, true, false) +TEST_SQL_COLUMNS(ValidTable_InvalidColumn, NULL, NULL, (SQLTCHAR*)L"kibana_%", + (SQLTCHAR*)L"invalid_column", true, true) +TEST_SQL_COLUMNS(InvalidTable_ValidColumn, NULL, NULL, + (SQLTCHAR*)L"invalid_table", (SQLTCHAR*)L"FlightNum", true, + true) +TEST_SQL_COLUMNS(InvalidTable_InvalidColumn, NULL, NULL, + (SQLTCHAR*)L"invalid_table", (SQLTCHAR*)L"invalid_column", + true, true) + +// Data validation +TEST_F(TestSQLColumns, FlightsValidation) { + EXPECT_EQ(SQL_SUCCESS, SQLColumns(m_hstmt, NULL, SQL_NTS, NULL, SQL_NTS, + (SQLTCHAR*)L"kibana_sample_data_flights", + SQL_NTS, NULL, SQL_NTS)); + std::vector< bind_info > binds; + binds.push_back(bind_info(1, SQL_C_CHAR)); + binds.push_back(bind_info(2, SQL_C_CHAR)); + binds.push_back(bind_info(3, SQL_C_CHAR)); + binds.push_back(bind_info(4, SQL_C_CHAR)); + binds.push_back(bind_info(5, SQL_C_SSHORT)); + binds.push_back(bind_info(6, SQL_C_CHAR)); + binds.push_back(bind_info(8, SQL_C_SLONG)); + binds.push_back(bind_info(9, SQL_C_SSHORT)); + binds.push_back(bind_info(10, SQL_C_SSHORT)); + binds.push_back(bind_info(11, SQL_C_SSHORT)); + binds.push_back(bind_info(12, SQL_C_CHAR)); + binds.push_back(bind_info(13, SQL_C_CHAR)); + binds.push_back(bind_info(14, SQL_C_SSHORT)); + binds.push_back(bind_info(15, SQL_C_SSHORT)); + binds.push_back(bind_info(16, SQL_C_SLONG)); + binds.push_back(bind_info(17, SQL_C_SLONG)); + binds.push_back(bind_info(18, SQL_C_CHAR)); + + for (auto& it : binds) + SQLBindCol(m_hstmt, it.ordinal, it.target_type, it.target, + it.buffer_len, &it.out_len); + + size_t column_idx = 0; + while ((SQL_SUCCESS == SQLFetch(m_hstmt)) + && (column_idx < std::min(flights_column_name.size(), + flights_data_type.size()))) { + size_t ordinal = 0; + for (auto& it : binds) { + ordinal++; + switch (ordinal) { + case 1: + EXPECT_TRUE((it.AsString() == flights_catalog_elas) + || (it.AsString() == flights_catalog_odfe)); + break; + case 3: + EXPECT_EQ(it.AsString(), flights_table_name); + break; + case 4: + EXPECT_EQ(it.AsString(), flights_column_name[column_idx]); + break; + case 6: + EXPECT_EQ(it.AsString(), flights_data_type[column_idx]); + break; + case 9: + EXPECT_EQ(it.AsString(), flights_decimal_digits); + break; + case 10: + EXPECT_EQ(it.AsString(), flights_num_prec_radix); + break; + case 16: + EXPECT_EQ(it.AsString(), std::to_string(column_idx + 1)); + break; + default: + EXPECT_TRUE( + ((it.AsString() == "0") || (it.AsString() == ""))); + break; + } + } + column_idx++; + } + EXPECT_EQ(column_idx, static_cast< size_t >(27)); +} + +// We expect an empty result set for PrimaryKeys and ForeignKeys +// Tableau specified catalog and table +// NULL args +TEST_SQL_KEYS(PrimaryKeys_NULL, SQLPrimaryKeys, NULL, SQL_NTS, NULL, SQL_NTS, + NULL, SQL_NTS) +TEST_SQL_KEYS(ForeignKeys_NULL, SQLForeignKeys, NULL, SQL_NTS, NULL, SQL_NTS, + NULL, SQL_NTS, NULL, SQL_NTS, NULL, SQL_NTS, NULL, SQL_NTS) + +// Catalog specified +TEST_SQL_KEYS(PrimaryKeys_Catalog, SQLPrimaryKeys, NULL, SQL_NTS, + (SQLTCHAR*)L"odfe-cluster", SQL_NTS, NULL, SQL_NTS) +TEST_SQL_KEYS(ForeignKeys_Catalog, SQLForeignKeys, NULL, SQL_NTS, NULL, SQL_NTS, + NULL, SQL_NTS, NULL, SQL_NTS, (SQLTCHAR*)L"odfe-cluster", SQL_NTS, + NULL, SQL_NTS) + +// Table specified +TEST_SQL_KEYS(PrimaryKeys_Table, SQLPrimaryKeys, NULL, SQL_NTS, NULL, SQL_NTS, + (SQLTCHAR*)L"kibana_sample_data_flights", SQL_NTS) +TEST_SQL_KEYS(ForeignKeys_Table, SQLForeignKeys, NULL, SQL_NTS, NULL, SQL_NTS, + NULL, SQL_NTS, NULL, SQL_NTS, NULL, SQL_NTS, + (SQLTCHAR*)L"kibana_sample_data_flights", SQL_NTS) + +// Catalog and table specified +TEST_SQL_KEYS(PrimaryKeys_CatalogTable, SQLPrimaryKeys, NULL, SQL_NTS, + (SQLTCHAR*)L"odfe-cluster", SQL_NTS, + (SQLTCHAR*)L"kibana_sample_data_flights", SQL_NTS) +TEST_SQL_KEYS(ForeignKeys_CatalogTable, SQLForeignKeys, NULL, SQL_NTS, NULL, + SQL_NTS, NULL, SQL_NTS, NULL, SQL_NTS, (SQLTCHAR*)L"odfe-cluster", + SQL_NTS, (SQLTCHAR*)L"kibana_sample_data_flights", SQL_NTS) + +// GetTypeInfo expected output struct +typedef struct sample_data_getTypeInfo_struct { + std::string TYPE_NAME; + SQLSMALLINT DATA_TYPE; + SQLINTEGER COLUMN_SIZE; + std::string LITERAL_PREFIX; + std::string LITERAL_SUFFIX; + std::string CREATE_PARAMS; + SQLSMALLINT NULLABLE; + SQLSMALLINT CASE_SENSITIVE; + SQLSMALLINT SEARCHABLE; + SQLSMALLINT UNSIGNED_ATTRIBUTE; + SQLSMALLINT FIXED_PREC_SCALE; + SQLSMALLINT AUTO_UNIQUE_VALUE; + std::string LOCAL_TYPE_NAME; + SQLSMALLINT MINIMUM_SCALE; + SQLSMALLINT MAXIMUM_SCALE; + SQLSMALLINT SQL_DATA_TYPE; + SQLSMALLINT SQL_DATETIME_SUB; + SQLINTEGER NUM_PREC_RADIX; + SQLSMALLINT INTERVAL_PRECISION; +} sample_data_getTypeInfo_struct; + +// GetTypeInfo expected output +const std::vector< sample_data_getTypeInfo_struct > sample_data_all_types_info{ + {"boolean", SQL_BIT, 1, "", "", "", 2, 0, 3, 1, 0, 0, "", 0, 0, SQL_BIT, 0, + 10, 0}, + {"byte", SQL_TINYINT, 3, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, + SQL_TINYINT, 0, 10, 0}, + {"short", SQL_SMALLINT, 5, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, + SQL_SMALLINT, 0, 10, 0}, + {"keyword", SQL_WVARCHAR, 256, "\"", "\"", "", 2, 1, 3, 1, 0, 0, "", 0, 0, + SQL_WVARCHAR, 0, 10, 0}, + {"text", SQL_WVARCHAR, 2147483647, "\"", "\"", "", 2, 1, 3, 1, 0, 0, "", 0, + 0, SQL_WVARCHAR, 0, 10, 0}, + {"nested", SQL_WVARCHAR, 0, "\"", "\"", "", 2, 0, 3, 1, 0, 0, "", 0, 0, + SQL_WVARCHAR, 0, 10, 0}, + {"object", SQL_WVARCHAR, 0, "\"", "\"", "", 2, 0, 3, 1, 0, 0, "", 0, 0, + SQL_WVARCHAR, 0, 10, 0}, + {"integer", SQL_INTEGER, 10, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, + SQL_INTEGER, 0, 10, 0}, + {"double", SQL_DOUBLE, 15, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, + SQL_DOUBLE, 0, 10, 0}, + {"scaled_float", SQL_DOUBLE, 15, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, + SQL_DOUBLE, 0, 10, 0}, + {"long", SQL_BIGINT, 19, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, SQL_BIGINT, + 0, 10, 0}, + {"half_float", SQL_REAL, 7, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, + SQL_REAL, 0, 10, 0}, + {"float", SQL_REAL, 7, "", "", "", 2, 0, 3, 0, 0, 0, "", 0, 0, SQL_REAL, 0, + 10, 0}, + {"date", SQL_TYPE_TIMESTAMP, 24, "", "", "", 2, 0, 3, 1, 0, 0, "", 0, 0, + SQL_TYPE_TIMESTAMP, 0, 10, 0}}; + +const std::vector< sample_data_getTypeInfo_struct > + sample_data_single_type_multiple_row{ + {"keyword", SQL_WVARCHAR, 256, "\"", "\"", "", 2, 1, 3, 1, 0, 0, "", 0, + 0, SQL_WVARCHAR, 0, 10, 0}, + {"text", SQL_WVARCHAR, 2147483647, "\"", "\"", "", 2, 1, 3, 1, 0, 0, "", + 0, 0, SQL_WVARCHAR, 0, 10, 0}, + {"nested", SQL_WVARCHAR, 0, "\"", "\"", "", 2, 0, 3, 1, 0, 0, "", 0, 0, + SQL_WVARCHAR, 0, 10, 0}, + {"object", SQL_WVARCHAR, 0, "\"", "\"", "", 2, 0, 3, 1, 0, 0, "", 0, 0, + SQL_WVARCHAR, 0, 10, 0}}; + +const std::vector< sample_data_getTypeInfo_struct > + sample_data_single_type_info{{"boolean", SQL_BIT, 1, "", "", "", 2, 0, 3, 1, + 0, 0, "", 0, 0, SQL_BIT, 0, 10, 0}}; + +const std::vector< sample_data_getTypeInfo_struct > sample_data_empty{}; + +void CheckGetTypeInfoData( + SQLHSTMT m_hstmt, + const std::vector< sample_data_getTypeInfo_struct >& sample_data) { + std::vector< bind_info > binds; + binds.push_back(bind_info(1, SQL_C_CHAR)); + binds.push_back(bind_info(2, SQL_C_SHORT)); + binds.push_back(bind_info(3, SQL_C_LONG)); + binds.push_back(bind_info(4, SQL_C_CHAR)); + binds.push_back(bind_info(5, SQL_C_CHAR)); + binds.push_back(bind_info(6, SQL_C_CHAR)); + binds.push_back(bind_info(7, SQL_C_SHORT)); + binds.push_back(bind_info(8, SQL_C_SHORT)); + binds.push_back(bind_info(9, SQL_C_SHORT)); + binds.push_back(bind_info(10, SQL_C_SHORT)); + binds.push_back(bind_info(11, SQL_C_SHORT)); + binds.push_back(bind_info(12, SQL_C_SHORT)); + binds.push_back(bind_info(13, SQL_C_CHAR)); + binds.push_back(bind_info(14, SQL_C_SHORT)); + binds.push_back(bind_info(15, SQL_C_SHORT)); + binds.push_back(bind_info(16, SQL_C_SHORT)); + binds.push_back(bind_info(17, SQL_C_SHORT)); + binds.push_back(bind_info(18, SQL_C_LONG)); + binds.push_back(bind_info(19, SQL_C_SHORT)); + + for (auto& it : binds) + SQLBindCol(m_hstmt, it.ordinal, it.target_type, it.target, + it.buffer_len, &it.out_len); + + SQLRETURN ret = SQL_ERROR; + if (sample_data.empty()) { + size_t result_count = 0; + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) + result_count++; + EXPECT_TRUE(result_count != 0); + } else { + size_t result_count = 0; + for (; ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) + && (result_count < sample_data.size()); + result_count++) { + auto it = + std::find_if(sample_data.begin(), sample_data.end(), + [&](const sample_data_getTypeInfo_struct& d) { + return d.TYPE_NAME == binds[0].AsString(); + }); + ASSERT_NE(it, sample_data.end()); + EXPECT_EQ(binds[0].AsString(), it->TYPE_NAME); + EXPECT_EQ(binds[1].AsString(), std::to_string(it->DATA_TYPE)); + EXPECT_EQ(binds[2].AsString(), std::to_string(it->COLUMN_SIZE)); + EXPECT_EQ(binds[3].AsString(), it->LITERAL_PREFIX); + EXPECT_EQ(binds[4].AsString(), it->LITERAL_SUFFIX); + EXPECT_EQ(binds[5].AsString(), it->CREATE_PARAMS); + EXPECT_EQ(binds[6].AsString(), std::to_string(it->NULLABLE)); + EXPECT_EQ(binds[7].AsString(), std::to_string(it->CASE_SENSITIVE)); + EXPECT_EQ(binds[8].AsString(), std::to_string(it->SEARCHABLE)); + EXPECT_EQ(binds[9].AsString(), + std::to_string(it->UNSIGNED_ATTRIBUTE)); + EXPECT_EQ(binds[10].AsString(), + std::to_string(it->FIXED_PREC_SCALE)); + EXPECT_EQ(binds[11].AsString(), + std::to_string(it->AUTO_UNIQUE_VALUE)); + EXPECT_EQ(binds[12].AsString(), it->LOCAL_TYPE_NAME); + EXPECT_EQ(binds[13].AsString(), std::to_string(it->MINIMUM_SCALE)); + EXPECT_EQ(binds[14].AsString(), std::to_string(it->MAXIMUM_SCALE)); + EXPECT_EQ(binds[15].AsString(), std::to_string(it->SQL_DATA_TYPE)); + EXPECT_EQ(binds[16].AsString(), + std::to_string(it->SQL_DATETIME_SUB)); + EXPECT_EQ(binds[17].AsString(), std::to_string(it->NUM_PREC_RADIX)); + EXPECT_EQ(binds[18].AsString(), + std::to_string(it->INTERVAL_PRECISION)); + } + EXPECT_EQ(result_count, sample_data.size()); + } + EXPECT_EQ(ret, SQL_NO_DATA); +} + +class TestSQLGetTypeInfo : public testing::Test { + public: + TestSQLGetTypeInfo() { + } + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + } + void TearDown() { + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + ~TestSQLGetTypeInfo() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +#define TEST_SQL_GET_TYPE_INFO(test_name, data_type, empty, exp_out) \ + TEST_F(TestSQLGetTypeInfo, test_name) { \ + EXPECT_TRUE(SQL_SUCCEEDED(SQLGetTypeInfo(m_hstmt, data_type))); \ + if (empty) { \ + size_t result_count = 0; \ + SQLRETURN ret; \ + while ((ret = SQLFetch(m_hstmt)) == SQL_SUCCESS) \ + result_count++; \ + EXPECT_EQ(ret, SQL_NO_DATA); \ + EXPECT_EQ(result_count, static_cast< size_t >(0)); \ + } else \ + CheckGetTypeInfoData(m_hstmt, exp_out); \ + } + +TEST_SQL_GET_TYPE_INFO(AllTypes, SQL_ALL_TYPES, 0, sample_data_all_types_info) + +TEST_SQL_GET_TYPE_INFO(SingleTypeMultipleRows, SQL_WVARCHAR, 0, + sample_data_single_type_multiple_row) + +TEST_SQL_GET_TYPE_INFO(SingleType, SQL_BIT, 0, sample_data_single_type_info) + +TEST_SQL_GET_TYPE_INFO(UnsupportedType, SQL_DECIMAL, 1, sample_data_empty) + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_catalog > leaks_itodbc_catalog"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCConnection/CMakeLists.txt new file mode 100644 index 0000000000..9094b2ceef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_connection) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_connection.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_connection ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_connection PUBLIC AUTO ALL) +target_link_libraries(itodbc_connection odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_connection PUBLIC _UNICODE UNICODE) + diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/packages.config b/sql-odbc/src/IntegrationTests/ITODBCConnection/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCConnection/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/pch.h b/sql-odbc/src/IntegrationTests/ITODBCConnection/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/test_dsn.reg b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_dsn.reg new file mode 100644 index 0000000000..59b0f91c2b --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_dsn.reg @@ -0,0 +1,21 @@ +Windows Registry Editor Version 5.00 + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBCINST.INI\ODBC Drivers] +"ElasticsearchODBC"="Installed" + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBCINST.INI\ElasticsearchODBC] +"Driver"="\\bin64\\Release\\odfesqlodbc.dll" +"Setup"="\\bin64\\Release\\odfesqlodbc.dll" + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources] +"test_dsn"="ElasticsearchODBC" + +[HKEY_LOCAL_MACHINE\SOFTWARE\ODBC\ODBC.INI\test_dsn] +"host"="localhost" +"port"="9200" +"user"="admin" +"password"="admin" +"auth"="BASIC" +"useSSL"="0" +"responseTimeout"="10" + diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbc.ini b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbc.ini new file mode 100644 index 0000000000..f1b38ef215 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbc.ini @@ -0,0 +1,11 @@ +[ODBC Data Sources] +test_dsn = ElasticsearchODBC + +[test_dsn] +host = localhost +port = 9200 +user = admin +password = admin +auth = BASIC +useSSL = 0 +responseTimeout = 10 \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbc_connection.cpp b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbc_connection.cpp new file mode 100644 index 0000000000..1779835f33 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbc_connection.cpp @@ -0,0 +1,523 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" + +#ifdef WIN32 +#include +#endif +#include +#include +#include +#include +// clang-format on + +#define IT_SIZEOF(x) (NULL == (x) ? 0 : (sizeof((x)) / sizeof((x)[0]))) + +// SQLConnect constants +const SQLCHAR invalid_dsn_charwidth[]{"test_dsn"}; +std::wstring wdsn_name = L"test_dsn"; +std::wstring user = L"admin"; +std::wstring pass = L"admin"; + +// SQLDriverConnect constants +std::wstring dsn_conn_string = L"DSN=test_dsn"; + +class TestSQLConnect : public testing::Test { + public: + TestSQLConnect() { + } + + void SetUp() { + AllocConnection(&m_env, &m_conn, true, true); + } + + void TearDown() { + if (SQL_NULL_HDBC != m_conn) { + SQLFreeHandle(SQL_HANDLE_DBC, m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLConnect() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; +}; + +TEST_F(TestSQLConnect, SqlSuccess) { + SQLRETURN ret = SQLConnect( + m_conn, (SQLTCHAR*)wdsn_name.c_str(), SQL_NTS, (SQLTCHAR*)user.c_str(), + static_cast< SQLSMALLINT >(user.length()), (SQLTCHAR*)pass.c_str(), + static_cast< SQLSMALLINT >(pass.length())); + + LogAnyDiagnostics(SQL_HANDLE_DBC, m_conn, ret); + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLConnect, SqlError) { + SQLRETURN ret = SQLConnect( + m_conn, (SQLTCHAR*)invalid_dsn_charwidth, SQL_NTS, + (SQLTCHAR*)user.c_str(), static_cast< SQLSMALLINT >(user.length()), + (SQLTCHAR*)pass.c_str(), static_cast< SQLSMALLINT >(pass.length())); + + LogAnyDiagnostics(SQL_HANDLE_DBC, m_conn, ret); + EXPECT_EQ(SQL_ERROR, ret); +} + +class TestSQLDriverConnect : public testing::Test { + public: + TestSQLDriverConnect() { + } + + void SetUp() { + AllocConnection(&m_env, &m_conn, true, true); + } + + void TearDown() { + if (SQL_NULL_HDBC != m_conn) { + SQLFreeHandle(SQL_HANDLE_DBC, m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLDriverConnect() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLTCHAR m_out_conn_string[1024]; + SQLSMALLINT m_out_conn_string_length; +}; + +TEST_F(TestSQLDriverConnect, DSNConnectionString) { + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)dsn_conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, SqlDriverPrompt) { + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_PROMPT); + + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, SqlDriverComplete) { + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, SqlDriverCompleteRequired) { + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE_REQUIRED); + + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, SqlDriverNoprompt) { + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_NOPROMPT); + + EXPECT_EQ(SQL_SUCCESS, ret); +} + +// TODO #41 - Revisit when parser code +// This should return SQL_SUCCESS_WITH_INFO +TEST_F(TestSQLDriverConnect, InvalidDriver) { + std::wstring invalid_driver_conn_string = + use_ssl ? L"Driver=xxxx;" + L"host=https://localhost;port=5432;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver=xxxx;" + L"host=localhost;port=5432;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)invalid_driver_conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_EQ(SQL_ERROR, ret); + } + +TEST_F(TestSQLDriverConnect, InvalidHost) { + std::wstring invalid_host_conn_string = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=1;" + : L"Driver={Elasticsearch ODBC};" + L"host=8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=1;"; + + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)invalid_host_conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_EQ(SQL_ERROR, ret); +} + +TEST_F(TestSQLDriverConnect, InvalidPort) { + std::wstring invalid_port_conn_string = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=5432;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=5432;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)invalid_port_conn_string.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_EQ(SQL_ERROR, ret); +} + +// TODO #41 - Revisit when parser code +// This should return SQL_SUCCESS_WITH_INFO (SQLSTATE 01S00 - Invalid connection +// string attribute) +TEST_F(TestSQLDriverConnect, UnsupportedKeyword) { + std::wstring unsupported_keyword_conn_string = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=5432;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;extra=1" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=5432;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;extra=1"; + + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)unsupported_keyword_conn_string.c_str(), + SQL_NTS, m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + EXPECT_EQ(SQL_ERROR, ret); +} + +TEST_F(TestSQLDriverConnect, ConnStringAbbrevsUID) { + std::wstring abbrev_str = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=9200;" + L"UID=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=9200;" + L"UID=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)abbrev_str.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_NOPROMPT); + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, ConnStringAbbrevsPWD) { + std::wstring abbrev_str = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=9200;" + L"user=admin;PWD=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=9200;" + L"user=admin;PWD=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)abbrev_str.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_NOPROMPT); + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, ConnStringAbbrevsUIDPWD) { + std::wstring abbrev_str = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=9200;" + L"UID=admin;PWD=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=9200;" + L"UID=admin;PWD=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)abbrev_str.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_NOPROMPT); + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, ConnStringAbbrevsServer) { + std::wstring abbrev_str = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"server=https://localhost;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"server=localhost;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)abbrev_str.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_NOPROMPT); + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, ConnStringAbbrevsServerUIDPWD) { + std::wstring abbrev_str = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"server=https://localhost;port=9200;" + L"UID=admin;PWD=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"server=localhost;port=9200;" + L"UID=admin;PWD=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + + SQLRETURN ret = + SQLDriverConnect(m_conn, NULL, (SQLTCHAR*)abbrev_str.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_NOPROMPT); + EXPECT_EQ(SQL_SUCCESS, ret); +} + +TEST_F(TestSQLDriverConnect, Timeout1Second) { + std::wstring one_second_timeout = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=1;" + : L"Driver={Elasticsearch ODBC};" + L"host=8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=1;"; + + auto start = std::chrono::steady_clock::now(); + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)one_second_timeout.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + auto end = std::chrono::steady_clock::now(); + auto time = + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count(); + std::cout << "TIME: " << time << std::endl; + EXPECT_EQ(SQL_ERROR, ret); +#ifdef WIN32 + // Windows rounds up to nearest 4s with timeout, another user reported this + // issue: + // https://social.msdn.microsoft.com/Forums/vstudio/en-US/42ae1b2f-b120-4b46-9417-e594c3d02a5f/does-winhttpsettimeouts-support-small-timeouts?forum=vcgeneral + EXPECT_GT(time, 3400); + EXPECT_LT(time, 4500); +#else + EXPECT_GT(time, 500); + EXPECT_LT(time, 1500); +#endif +} + +TEST_F(TestSQLDriverConnect, Timeout3Second) { + std::wstring one_second_timeout = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=3;" + : L"Driver={Elasticsearch ODBC};" + L"host=8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=3;"; + + auto start = std::chrono::steady_clock::now(); + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)one_second_timeout.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + auto end = std::chrono::steady_clock::now(); + auto time = + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count(); + std::cout << "TIME: " << time << std::endl; + EXPECT_EQ(SQL_ERROR, ret); +#ifdef WIN32 + // Windows rounds up to nearest 4s with timeout, another user reported this + // issue: + // https://social.msdn.microsoft.com/Forums/vstudio/en-US/42ae1b2f-b120-4b46-9417-e594c3d02a5f/does-winhttpsettimeouts-support-small-timeouts?forum=vcgeneral + EXPECT_GT(time, 3500); + EXPECT_LT(time, 4500); +#else + EXPECT_GT(time, 2500); + EXPECT_LT(time, 3500); +#endif +} + +TEST_F(TestSQLDriverConnect, Timeout7Second) { + std::wstring seven_second_timeout = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=7;" + : L"Driver={Elasticsearch ODBC};" + L"host=8.8.8.8;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=7;"; + + auto start = std::chrono::steady_clock::now(); + SQLRETURN ret = SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)seven_second_timeout.c_str(), SQL_NTS, + m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_COMPLETE); + auto end = std::chrono::steady_clock::now(); + auto time = + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count(); + std::cout << "TIME: " << time << std::endl; + EXPECT_EQ(SQL_ERROR, ret); +#ifdef WIN32 + // Windows rounds up to nearest 4s with timeout, another user reported this + // issue: + // https://social.msdn.microsoft.com/Forums/vstudio/en-US/42ae1b2f-b120-4b46-9417-e594c3d02a5f/does-winhttpsettimeouts-support-small-timeouts?forum=vcgeneral + EXPECT_GT(time, 7500); + EXPECT_LT(time, 8500); +#else + EXPECT_GT(time, 6500); + EXPECT_LT(time, 7500); +#endif +} + +class TestSQLDisconnect : public testing::Test { + public: + TestSQLDisconnect() { + } + + void SetUp() { + } + + void TearDown() { + if (m_conn != SQL_NULL_HDBC) { + SQLFreeHandle(SQL_HANDLE_DBC, m_conn); + } + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLDisconnect() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; +}; + +TEST_F(TestSQLDisconnect, TestSuccess) { + ASSERT_NO_THROW(ITDriverConnect((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, true, true)); + EXPECT_EQ(SQL_SUCCESS, SQLDisconnect(m_conn)); +} + +TEST_F(TestSQLDisconnect, TestReconnectOnce) { + for (int i = 0; i <= 1; i++) { + ASSERT_NO_THROW((ITDriverConnect((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, true, true))); + EXPECT_EQ(SQL_SUCCESS, SQLDisconnect(m_conn)); + } +} + +TEST_F(TestSQLDisconnect, TestReconnectMultipleTimes) { + for (int i = 0; i <= 10; i++) { + ASSERT_NO_THROW((ITDriverConnect((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, true, true))); + EXPECT_EQ(SQL_SUCCESS, SQLDisconnect(m_conn)); + } +} + +TEST_F(TestSQLDisconnect, TestDisconnectWithoutConnect) { + ASSERT_NO_THROW(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &m_env)); + ASSERT_NO_THROW(SQLAllocHandle(SQL_HANDLE_DBC, m_env, &m_conn)); + EXPECT_EQ(SQL_ERROR, SQLDisconnect(m_conn)); +} + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_connection > leaks_itodbc_connection"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbcinst.ini b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbcinst.ini new file mode 100644 index 0000000000..283dd72937 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCConnection/test_odbcinst.ini @@ -0,0 +1,6 @@ +[ODBC Drivers] +ElasticsearchODBC = Installed + +[ElasticsearchODBC] +Driver = /lib64/libodfesqlodbc.dylib +Setup = /lib64/libodfesqlodbc.dylib \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCDescriptors/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/CMakeLists.txt new file mode 100644 index 0000000000..602d7493a5 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_descriptors) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_descriptors.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_descriptors ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_descriptors PUBLIC AUTO ALL) +target_link_libraries(itodbc_descriptors odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_descriptors PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCDescriptors/packages.config b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCDescriptors/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCDescriptors/pch.h b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCDescriptors/test_odbc_descriptors.cpp b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/test_odbc_descriptors.cpp new file mode 100644 index 0000000000..3929558337 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCDescriptors/test_odbc_descriptors.cpp @@ -0,0 +1,621 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +// clang-format on + +const std::wstring data_set = L"kibana_sample_data_flights"; +const std::wstring single_col = L"Origin"; +const std::wstring single_row = L"1"; +const uint64_t multi_row_cnt = 25; +const uint64_t multi_col_cnt = 25; +const uint64_t single_col_cnt = 1; +const std::wstring multi_col = L"*"; + +inline void ExecuteQuery(const std::wstring& column, + const std::wstring& dataset, const std::wstring& count, + SQLHSTMT* hstmt) { + std::wstring statement = QueryBuilder(column, dataset, count); + SQLRETURN ret = SQLExecDirect(*hstmt, (SQLTCHAR*)statement.c_str(), + (SQLINTEGER)statement.length()); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); +} + +class TestSQLCopyDesc : public testing::Test { + public: + TestSQLCopyDesc() { + } + + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + SQLAllocHandle(SQL_HANDLE_STMT, m_conn, &m_hstmt_copy); + } + + void TearDown() { + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLCopyDesc() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + SQLHSTMT m_hstmt_copy = SQL_NULL_HSTMT; + SQLHDESC m_ard_hdesc = SQL_NULL_HDESC; + SQLHDESC m_ard_hdesc_copy = SQL_NULL_HDESC; + SQLHDESC m_ird_hdesc_copy = SQL_NULL_HDESC; +}; + +TEST_F(TestSQLCopyDesc, TestCopyArdToArd) { + ExecuteQuery(multi_col, data_set, std::to_wstring(multi_row_cnt), &m_hstmt); + + SQLGetStmtAttr(m_hstmt, SQL_ATTR_APP_ROW_DESC, &m_ard_hdesc, 0, NULL); + SQLGetStmtAttr(m_hstmt_copy, SQL_ATTR_APP_ROW_DESC, &m_ard_hdesc_copy, 0, + NULL); + + EXPECT_EQ(SQL_SUCCESS, SQLCopyDesc(m_ard_hdesc, m_ard_hdesc_copy)); +} + +TEST_F(TestSQLCopyDesc, TestNotCopyArdToIrd) { + ExecuteQuery(multi_col, data_set, std::to_wstring(multi_row_cnt), &m_hstmt); + + SQLGetStmtAttr(m_hstmt, SQL_ATTR_APP_ROW_DESC, &m_ard_hdesc, 0, NULL); + SQLGetStmtAttr(m_hstmt_copy, SQL_ATTR_IMP_ROW_DESC, &m_ird_hdesc_copy, 0, + NULL); + + EXPECT_EQ(SQL_ERROR, SQLCopyDesc(m_ard_hdesc, m_ird_hdesc_copy)); + EXPECT_TRUE(CheckSQLSTATE(SQL_HANDLE_DESC, m_ird_hdesc_copy, + SQLSTATE_GENERAL_ERROR, true)); +} + +class TestSQLSetDescField : public testing::Test { + public: + TestSQLSetDescField() { + } + + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + SQLGetStmtAttr(m_hstmt, SQL_ATTR_APP_ROW_DESC, &m_ard_hdesc, 0, NULL); + SQLGetStmtAttr(m_hstmt, SQL_ATTR_IMP_ROW_DESC, &m_ird_hdesc, 0, NULL); + } + + void TearDown() { + if (m_ard_hdesc != SQL_NULL_HDESC) { + SQLFreeHandle(SQL_HANDLE_DESC, m_ard_hdesc); + } + if (m_ird_hdesc != SQL_NULL_HDESC) { + SQLFreeHandle(SQL_HANDLE_DESC, m_ird_hdesc); + } + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLSetDescField() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + SQLHDESC m_ard_hdesc = SQL_NULL_HDESC; + SQLHDESC m_ird_hdesc = SQL_NULL_HDESC; + SQLSMALLINT m_rec_number = 0; + SQLSMALLINT m_field_identifier; + SQLINTEGER m_buffer_length = SQL_NTS; +}; + +// Template for tests of SQLSetDescField +#define TEST_SQL_SET_DESC_FIELD(test_name, identifier, buffer_length, rec_num, \ + value_ptr_assignment, expected_val, hdesc, \ + check_state) \ + TEST_F(TestSQLSetDescField, test_name) { \ + ExecuteQuery(multi_col, data_set, std::to_wstring(multi_row_cnt), \ + &m_hstmt); \ + m_field_identifier = identifier; \ + m_buffer_length = buffer_length; \ + m_rec_number = rec_num; \ + value_ptr_assignment; \ + EXPECT_EQ(expected_val, \ + SQLSetDescField(hdesc, m_rec_number, m_field_identifier, \ + (SQLPOINTER)m_value_ptr, m_buffer_length)); \ + if (check_state) \ + EXPECT_TRUE( \ + CheckSQLSTATE(SQL_HANDLE_DESC, hdesc, \ + SQLSTATE_INVALID_DESCRIPTOR_FIELD_IDENTIFIER)); \ + } +#ifdef WIN32 +#pragma warning(push) +// This warning detects an attempt to assign a 32-bit value to a 64-bit pointer +// type +#pragma warning(disable : 4312) +#elif defined(__APPLE__) +// This warning detects casts from integer types to void*. +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wint-to-void-pointer-cast" +#endif // WIN32 + +// Descriptor Header Fields Tests + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_ALLOC_TYPE, SQL_DESC_ALLOC_TYPE, + SQL_IS_SMALLINT, 0, + SQLSMALLINT m_value_ptr = SQL_DESC_ALLOC_USER; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_ARRAY_SIZE, SQL_DESC_ARRAY_SIZE, SQL_NTS, + 0, SQLULEN m_value_ptr = single_col_cnt; + , SQL_SUCCESS, m_ard_hdesc, 0); + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_ARRAY_STATUS_PTR, + SQL_DESC_ARRAY_STATUS_PTR, SQL_NTS, 0, SQLUSMALLINT foo; + SQLUSMALLINT* m_value_ptr = &foo; + , SQL_SUCCESS, m_ard_hdesc, 0); + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_BIND_OFFSET_PTR, SQL_DESC_BIND_OFFSET_PTR, + SQL_NTS, 0, SQLLEN foo; + SQLLEN* m_value_ptr = &foo; + , SQL_SUCCESS, m_ard_hdesc, 0); + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_BIND_TYPE, SQL_DESC_BIND_TYPE, SQL_NTS, 0, + SQLINTEGER m_value_ptr = SQL_BIND_BY_COLUMN; + , SQL_SUCCESS, m_ard_hdesc, 0); + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_COUNT, SQL_DESC_COUNT, SQL_IS_SMALLINT, 0, + SQLSMALLINT m_value_ptr = 25; + , SQL_SUCCESS, m_ard_hdesc, 0); + +TEST_SQL_SET_DESC_FIELD(Test_SQL_DESC_ROWS_PROCESSED_PTR, + SQL_DESC_ROWS_PROCESSED_PTR, SQL_NTS, 0, SQLULEN foo; + SQLULEN* m_value_ptr = &foo; + , SQL_SUCCESS, m_ird_hdesc, 0); + +// Descriptor Record Fields Tests + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_AUTO_UNIQUE_VALUE, + SQL_DESC_AUTO_UNIQUE_VALUE, SQL_NTS, 1, + SQLINTEGER m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_BASE_COLUMN_NAME, + SQL_DESC_BASE_COLUMN_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = "Origin"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_BASE_TABLE_NAME, + SQL_DESC_BASE_TABLE_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = "kibana_sample_data_flights"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_CASE_SENSITIVE, + SQL_DESC_CASE_SENSITIVE, SQL_NTS, 1, + SQLINTEGER m_value_ptr = 1; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_CATALOG_NAME, + SQL_DESC_CATALOG_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = ""; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_CONCISE_TYPE, + SQL_DESC_CONCISE_TYPE, SQL_IS_INTEGER, 1, + SQLSMALLINT m_value_ptr = SQL_WLONGVARCHAR; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_DATA_PTR, SQL_DESC_DATA_PTR, + SQL_IS_POINTER, 1, SQLPOINTER m_value_ptr = NULL; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_DATETIME_INTERVAL_CODE, + SQL_DESC_DATETIME_INTERVAL_CODE, SQL_IS_SMALLINT, 1, + SQLSMALLINT m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_DATETIME_INTERVAL_PRECISION, + SQL_DESC_DATETIME_INTERVAL_PRECISION, SQL_IS_INTEGER, 1, + SQLINTEGER m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_DISPLAY_SIZE, + SQL_DESC_DISPLAY_SIZE, SQL_IS_POINTER, 1, + SQLLEN m_value_ptr = 32766; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_FIXED_PREC_SCALE, + SQL_DESC_FIXED_PREC_SCALE, SQL_IS_INTEGER, 1, + SQLSMALLINT m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_INDICATOR_PTR, + SQL_DESC_INDICATOR_PTR, SQL_IS_INTEGER, 1, + SQLLEN m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_LABEL, SQL_DESC_LABEL, + SQL_NTS, 1, SQLCHAR m_value_ptr[255] = "Origin"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_LENGTH, SQL_DESC_LENGTH, + SQL_IS_INTEGER, 1, SQLULEN m_value_ptr = 32766; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_LITERAL_PREFIX, + SQL_DESC_LITERAL_PREFIX, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = ""; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_LITERAL_SUFFIX, + SQL_DESC_LITERAL_SUFFIX, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = ""; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_LOCAL_TYPE_NAME, + SQL_DESC_LOCAL_TYPE_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = "varchar"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_NAME, SQL_DESC_NAME, + SQL_NTS, 1, SQLCHAR m_value_ptr[255] = "Origin"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_NULLABLE, SQL_DESC_NULLABLE, + SQL_IS_SMALLINT, 1, SQLSMALLINT m_value_ptr = 1; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_NUM_PREC_RADIX, + SQL_DESC_NUM_PREC_RADIX, SQL_IS_INTEGER, 1, + SQLINTEGER m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_OCTET_LENGTH, + SQL_DESC_OCTET_LENGTH, SQL_NTS, 1, + SQLLEN m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_OCTET_LENGTH_PTR, + SQL_DESC_OCTET_LENGTH_PTR, SQL_IS_INTEGER, 1, + SQLLEN m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_PARAMETER_TYPE, + SQL_DESC_PARAMETER_TYPE, SQL_IS_SMALLINT, 1, + SQLSMALLINT m_value_ptr = 1; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_PRECISION, + SQL_DESC_PRECISION, SQL_IS_SMALLINT, 1, + SQLSMALLINT m_value_ptr = 30585; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_ROWVER, SQL_DESC_ROWVER, + SQL_NTS, 1, SQLSMALLINT m_value_ptr = 1; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_SCALE, SQL_DESC_SCALE, + SQL_IS_SMALLINT, 1, SQLSMALLINT m_value_ptr = 0; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_SCHEMA_NAME, + SQL_DESC_SCHEMA_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = ""; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_SEARCHABLE, + SQL_DESC_SEARCHABLE, SQL_IS_SMALLINT, 1, + SQLSMALLINT m_value_ptr = SQL_PRED_SEARCHABLE; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_TABLE_NAME, + SQL_DESC_TABLE_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = "kibana_sample_data_flights"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_TYPE, SQL_DESC_TYPE, + SQL_IS_SMALLINT, 1, + SQLSMALLINT m_value_ptr = SQL_WLONGVARCHAR; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_TYPE_NAME, + SQL_DESC_TYPE_NAME, SQL_NTS, 1, + SQLCHAR m_value_ptr[255] = "varchar"; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_UNNAMED, SQL_DESC_UNNAMED, + SQL_IS_SMALLINT, 1, SQLSMALLINT m_value_ptr = SQL_NAMED; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_UNSIGNED, SQL_DESC_UNSIGNED, + SQL_IS_SMALLINT, 1, SQLSMALLINT m_value_ptr = SQL_TRUE; + , SQL_ERROR, m_ird_hdesc, 1); + +TEST_SQL_SET_DESC_FIELD(TestUndefinedError_SQL_DESC_UPDATABLE, + SQL_DESC_UPDATABLE, SQL_IS_SMALLINT, 1, + SQLSMALLINT m_value_ptr = SQL_ATTR_READONLY; + , SQL_ERROR, m_ird_hdesc, 1); + +class TestSQLGetDescField : public testing::Test { + public: + TestSQLGetDescField() { + } + + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + SQLGetStmtAttr(m_hstmt, SQL_ATTR_APP_ROW_DESC, &m_ard_hdesc, 0, NULL); + SQLGetStmtAttr(m_hstmt, SQL_ATTR_IMP_ROW_DESC, &m_ird_hdesc, 0, NULL); + } + + void TearDown() { + if (m_ard_hdesc != SQL_NULL_HDESC) { + SQLFreeHandle(SQL_HANDLE_DESC, m_ard_hdesc); + } + if (m_ird_hdesc != SQL_NULL_HDESC) { + SQLFreeHandle(SQL_HANDLE_DESC, m_ird_hdesc); + } + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLGetDescField() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + SQLHDESC m_ard_hdesc = SQL_NULL_HDESC; + SQLHDESC m_ird_hdesc = SQL_NULL_HDESC; + SQLSMALLINT m_rec_number = 0; + SQLSMALLINT m_field_identifier; + SQLINTEGER m_buffer_length = 0; + SQLINTEGER m_string_length_ptr = 0; +}; + +// Template for tests of SQLGetDescField +#define TEST_SQL_GET_DESC_FIELD(test_name, identifier, buffer_length, rec_num, \ + value_ptr_assignment, expected_val, hdesc, \ + check_state, check_data, check_data_value) \ + TEST_F(TestSQLGetDescField, test_name) { \ + ExecuteQuery(multi_col, data_set, std::to_wstring(multi_row_cnt), \ + &m_hstmt); \ + m_field_identifier = identifier; \ + m_buffer_length = buffer_length; \ + m_rec_number = rec_num; \ + value_ptr_assignment; \ + EXPECT_EQ(expected_val, \ + SQLGetDescField(hdesc, m_rec_number, m_field_identifier, \ + &m_value_ptr, m_buffer_length, \ + &m_string_length_ptr)); \ + if (check_state) \ + EXPECT_TRUE( \ + CheckSQLSTATE(SQL_HANDLE_DESC, hdesc, \ + SQLSTATE_INVALID_DESCRIPTOR_FIELD_IDENTIFIER)); \ + if (check_data) \ + EXPECT_EQ((uint64_t)check_data_value, (uint64_t)m_value_ptr); \ + } + +// Descriptor Header Fields Tests + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_ALLOC_TYPE, SQL_DESC_ALLOC_TYPE, 0, 0, + SQLSMALLINT m_value_ptr = 0; + , SQL_SUCCESS, m_ird_hdesc, 0, 1, SQL_DESC_ALLOC_AUTO); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_ARRAY_SIZE, SQL_DESC_ARRAY_SIZE, 0, 0, + SQLULEN m_value_ptr = 0; + , SQL_SUCCESS, m_ard_hdesc, 0, 1, single_col_cnt); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_ARRAY_STATUS_PTR, + SQL_DESC_ARRAY_STATUS_PTR, 0, 0, + SQLUSMALLINT* m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_BIND_OFFSET_PTR, SQL_DESC_BIND_OFFSET_PTR, + 0, 0, SQLLEN* m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_BIND_TYPE, SQL_DESC_BIND_TYPE, 0, 0, + SQLINTEGER m_value_ptr = 0; + , SQL_SUCCESS, m_ard_hdesc, 0, 1, SQL_BIND_BY_COLUMN); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_COUNT, SQL_DESC_COUNT, 0, 0, + SQLSMALLINT m_value_ptr = 0; + , SQL_SUCCESS, m_ard_hdesc, 0, 1, multi_col_cnt); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_ROWS_PROCESSED_PTR, + SQL_DESC_ROWS_PROCESSED_PTR, 0, 0, SQLULEN* m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +// Descriptor Record Fields Tests + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_AUTO_UNIQUE_VALUE, + SQL_DESC_AUTO_UNIQUE_VALUE, 0, 1, + SQLINTEGER m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_BASE_COLUMN_NAME, + SQL_DESC_BASE_COLUMN_NAME, 255, 1, + SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_BASE_TABLE_NAME, SQL_DESC_BASE_TABLE_NAME, + 255, 1, SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_CASE_SENSITIVE, SQL_DESC_CASE_SENSITIVE, + 0, 1, SQLINTEGER m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_CATALOG_NAME, SQL_DESC_CATALOG_NAME, 255, + 1, SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_CONCISE_TYPE, SQL_DESC_CONCISE_TYPE, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_DATA_PTR, SQL_DESC_DATA_PTR, 0, 1, + SQLPOINTER m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_DATETIME_INTERVAL_CODE, + SQL_DESC_DATETIME_INTERVAL_CODE, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +// This field contains the interval leading precision if the SQL_DESC_TYPE field +// is SQL_INTERVAL. As SQL_INTERVAL support is disabled because some +// applications are unhappy with it, this test should return SQL_ERROR as +// DESC_INVALID_DESCRIPTOR_IDENTIFIER +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_DATETIME_INTERVAL_PRECISION, + SQL_DESC_DATETIME_INTERVAL_PRECISION, 0, 1, + SQLINTEGER m_value_ptr = 0; + , SQL_ERROR, m_ard_hdesc, 1, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_DISPLAY_SIZE, SQL_DESC_DISPLAY_SIZE, 0, 1, + SQLLEN m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_FIXED_PREC_SCALE, + SQL_DESC_FIXED_PREC_SCALE, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_INDICATOR_PTR, SQL_DESC_INDICATOR_PTR, 0, + 1, SQLLEN m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_LABEL, SQL_DESC_LABEL, 255, 1, + SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_LENGTH, SQL_DESC_LENGTH, 0, 1, + SQLULEN m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_LITERAL_PREFIX, SQL_DESC_LITERAL_PREFIX, + 255, 1, SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_LITERAL_SUFFIX, SQL_DESC_LITERAL_SUFFIX, + 255, 1, SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_LOCAL_TYPE_NAME, SQL_DESC_LOCAL_TYPE_NAME, + 255, 1, SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_NAME, SQL_DESC_NAME, 255, 1, + SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_NULLABLE, SQL_DESC_NULLABLE, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_NUM_PREC_RADIX, SQL_DESC_NUM_PREC_RADIX, + 0, 1, SQLINTEGER m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_OCTET_LENGTH, SQL_DESC_OCTET_LENGTH, 0, 1, + SQLLEN m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_OCTET_LENGTH_PTR, + SQL_DESC_OCTET_LENGTH_PTR, 0, 1, SQLLEN m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_PRECISION, SQL_DESC_PRECISION, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ard_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_ROWVER, SQL_DESC_ROWVER, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_SCALE, SQL_DESC_SCALE, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_SCHEMA_NAME, SQL_DESC_SCHEMA_NAME, 255, 1, + SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_SEARCHABLE, SQL_DESC_SEARCHABLE, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_TABLE_NAME, SQL_DESC_TABLE_NAME, 255, 1, + SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_TYPE, SQL_DESC_TYPE, 255, 1, + SQLSMALLINT m_value_ptr = 0; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_TYPE_NAME, SQL_DESC_TYPE_NAME, 255, 1, + SQLCHAR m_value_ptr[255]; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_UNNAMED, SQL_DESC_UNNAMED, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_UNSIGNED, SQL_DESC_UNSIGNED, 0, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); + +TEST_SQL_GET_DESC_FIELD(Test_SQL_DESC_UPDATABLE, SQL_DESC_UPDATABLE, 255, 1, + SQLSMALLINT m_value_ptr; + , SQL_SUCCESS, m_ird_hdesc, 0, 0, 0); +#ifdef WIN32 +#pragma warning(pop) +#elif __APPLE__ +#pragma clang diagnostic pop +#endif + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_descriptors > leaks_itodbc_descriptors"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCExecution/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCExecution/CMakeLists.txt new file mode 100644 index 0000000000..65d75dce18 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCExecution/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_execution) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_execution.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_execution ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(itodbc_execution odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_execution PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCExecution/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCExecution/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCExecution/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCExecution/pch.h b/sql-odbc/src/IntegrationTests/ITODBCExecution/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCExecution/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCExecution/test_odbc_execution.cpp b/sql-odbc/src/IntegrationTests/ITODBCExecution/test_odbc_execution.cpp new file mode 100644 index 0000000000..b3d1463337 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCExecution/test_odbc_execution.cpp @@ -0,0 +1,404 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" + +#ifdef WIN32 +#include +#else +#endif +#include +#include +#include +#include +#include +// clang-format on + +class TestSQLExecute : public testing::Test { + public: + TestSQLExecute() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + CloseCursor(&m_hstmt, true, true); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLExecute() { + // cleanup any pending stuff, but no exceptions allowed + } + + std::wstring m_query = + L"SELECT Origin FROM kibana_sample_data_flights LIMIT 5"; + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLPrepare : public testing::Test { + public: + TestSQLPrepare() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + CloseCursor(&m_hstmt, true, true); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLPrepare() { + // cleanup any pending stuff, but no exceptions allowed + } + + std::wstring m_query = + L"SELECT Origin FROM kibana_sample_data_flights LIMIT 5"; + std::wstring m_1_col = + L"SELECT Origin FROM kibana_sample_data_flights LIMIT 5"; + std::wstring m_2_col = + L"SELECT Origin, AvgTicketPrice FROM kibana_sample_data_flights LIMIT " + L"5"; + std::wstring m_all_col = + L"SELECT * FROM kibana_sample_data_flights LIMIT 5"; + const SQLSMALLINT m_1_col_cnt = 1; + const SQLSMALLINT m_2_col_cnt = 2; + const SQLSMALLINT m_all_col_cnt = 25; + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLExecDirect : public testing::Test { + public: + TestSQLExecDirect() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + CloseCursor(&m_hstmt, true, true); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLExecDirect() { + // cleanup any pending stuff, but no exceptions allowed + } + + std::wstring m_query = + L"SELECT Origin FROM kibana_sample_data_flights LIMIT 5"; + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLSetCursorName : public testing::Test { + public: + TestSQLSetCursorName() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + CloseCursor(&m_hstmt, true, true); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLSetCursorName() { + // cleanup any pending stuff, but no exceptions allowed + } + + std::wstring m_cursor_name = L"test_cursor"; + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLGetCursorName : public testing::Test { + public: + TestSQLGetCursorName() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + ASSERT_EQ(SQLSetCursorName(m_hstmt, (SQLTCHAR*)m_cursor_name.c_str(), + SQL_NTS), + SQL_SUCCESS); + } + + void TearDown() { + CloseCursor(&m_hstmt, true, true); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLGetCursorName() { + // cleanup any pending stuff, but no exceptions allowed + } + + std::wstring m_cursor_name = L"test_cursor"; + SQLSMALLINT m_wrong_buffer_length = 1; + SQLTCHAR m_cursor_name_buf[20]; + SQLSMALLINT m_cursor_name_length; + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLCancel : public testing::Test { + public: + TestSQLCancel() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + CloseCursor(&m_hstmt, true, true); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLCancel() { + // cleanup any pending stuff, but no exceptions allowed + } + + typedef struct SQLCancelInfo { + SQLHDBC hstmt; + SQLRETURN ret_code; + } SQLCancelInfo; + + const long long m_min_time_diff = 20; + std::wstring m_query = + L"SELECT * FROM kibana_sample_data_flights AS f WHERE " + L"f.Origin=f.Origin"; + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +TEST_F(TestSQLExecute, NoPrepareCallError) { + SQLRETURN ret = SQLExecute(m_hstmt); + EXPECT_EQ(SQL_ERROR, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLExecute, Success) { + SQLRETURN ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_EQ(SQL_SUCCESS, ret); + ret = SQLExecute(m_hstmt); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLExecute, ResetPrepareError) { + SQLRETURN ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_EQ(SQL_SUCCESS, ret); + ret = SQLPrepare(m_hstmt, NULL, SQL_NTS); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_EQ(SQL_ERROR, ret); + ret = SQLExecute(m_hstmt); + EXPECT_EQ(SQL_ERROR, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLPrepare, Success) { + SQLRETURN ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLPrepare, PrepareMetadata) { + SQLRETURN ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_all_col.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + SQLSMALLINT column_count = 0; + EXPECT_TRUE(SQL_SUCCEEDED(SQLNumResultCols(m_hstmt, &column_count))); + EXPECT_EQ(column_count, m_all_col_cnt); + EXPECT_TRUE(SQL_SUCCEEDED(SQLFreeStmt(m_hstmt, SQL_CLOSE))); + + ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_2_col.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(SQLNumResultCols(m_hstmt, &column_count))); + EXPECT_EQ(column_count, m_2_col_cnt); + EXPECT_TRUE(SQL_SUCCEEDED(SQLFreeStmt(m_hstmt, SQL_CLOSE))); + + ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_all_col.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(SQLNumResultCols(m_hstmt, &column_count))); + EXPECT_EQ(column_count, m_all_col_cnt); + EXPECT_TRUE(SQL_SUCCEEDED(SQLFreeStmt(m_hstmt, SQL_CLOSE))); + + ret = SQLPrepare(m_hstmt, (SQLTCHAR*)m_1_col.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(SQLNumResultCols(m_hstmt, &column_count))); + EXPECT_EQ(column_count, m_1_col_cnt); + EXPECT_TRUE(SQL_SUCCEEDED(SQLFreeStmt(m_hstmt, SQL_CLOSE))); +} + +TEST_F(TestSQLPrepare, NullQueryError) { + SQLRETURN ret = SQLPrepare(m_hstmt, NULL, SQL_NTS); + EXPECT_EQ(SQL_ERROR, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLExecDirect, Success) { + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLExecDirect, NullQueryError) { + SQLRETURN ret = SQLExecDirect(m_hstmt, NULL, SQL_NTS); + EXPECT_EQ(SQL_ERROR, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLSetCursorName, Success) { + SQLRETURN ret = + SQLSetCursorName(m_hstmt, (SQLTCHAR*)m_cursor_name.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLGetCursorName, Success) { + SQLRETURN ret = + SQLGetCursorName(m_hstmt, m_cursor_name_buf, + IT_SIZEOF(m_cursor_name_buf), &m_cursor_name_length); + EXPECT_EQ(SQL_SUCCESS, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLGetCursorName, WrongLengthForCursorName) { + SQLRETURN ret = + SQLGetCursorName(m_hstmt, m_cursor_name_buf, m_wrong_buffer_length, + &m_cursor_name_length); + EXPECT_EQ(SQL_SUCCESS_WITH_INFO, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +TEST_F(TestSQLCancel, NULLHandle) { + SQLRETURN ret_exec = SQLCancel(NULL); + EXPECT_EQ(ret_exec, SQL_INVALID_HANDLE); +} + +// This test will fail because we are not cancelling in flight queries at this time. +#if 0 +TEST_F(TestSQLCancel, QueryInProgress) { + // Create lambda thread + auto f = [](SQLCancelInfo* info) { + Sleep(10); + info->ret_code = SQLCancel(info->hstmt); + }; + + // Launch cancel thread + SQLCancelInfo cancel_info; + cancel_info.hstmt = m_hstmt; + cancel_info.ret_code = SQL_ERROR; + std::thread thread_object(f, &cancel_info); + + // Time ExecDirect execution + auto start = std::chrono::steady_clock::now(); + SQLRETURN ret_exec = + SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + auto end = std::chrono::steady_clock::now(); + auto time = + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count(); + + // Join thread + thread_object.join(); + + // Check return codes and time diff + ASSERT_LE(m_min_time_diff, time); + EXPECT_EQ(ret_exec, SQL_ERROR); + EXPECT_EQ(cancel_info.ret_code, SQL_SUCCESS); +} +#endif + +TEST_F(TestSQLCancel, QueryNotSent) { + SQLRETURN ret_exec = SQLCancel(m_hstmt); + EXPECT_EQ(ret_exec, SQL_SUCCESS); +} + +TEST_F(TestSQLCancel, QueryFinished) { + SQLRETURN ret_exec = + SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + ASSERT_EQ(ret_exec, SQL_SUCCESS); + + ret_exec = SQLCancel(m_hstmt); + EXPECT_EQ(ret_exec, SQL_SUCCESS); +} + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_execution > leaks_itodbc_execution"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCHelper/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCHelper/CMakeLists.txt new file mode 100644 index 0000000000..02c003030c --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCHelper/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_helper) + +# Source, headers, and include dirs +set(SOURCE_FILES it_odbc_helper.cpp) +set(HEADER_FILES it_odbc_helper.h) +include_directories( + ${UT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} + ) + +# Generate dll (SHARED) +add_library(itodbc_helper SHARED ${SOURCE_FILES} ${HEADER_FILES}) + +# Library dependencies +target_link_libraries(itodbc_helper odfesqlodbc ut_helper gtest_main) +target_compile_definitions(itodbc_helper PUBLIC _UNICODE UNICODE) \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCHelper/it_odbc_helper.cpp b/sql-odbc/src/IntegrationTests/ITODBCHelper/it_odbc_helper.cpp new file mode 100644 index 0000000000..203acff2be --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCHelper/it_odbc_helper.cpp @@ -0,0 +1,204 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "it_odbc_helper.h" + +#include +#include + +#define EXECUTION_HANDLER(throw_on_error, log_diag, handle_type, handle, \ + ret_code, statement, error_msg) \ + do { \ + (ret_code) = (statement); \ + if ((log_diag)) \ + LogAnyDiagnostics((handle_type), (handle), (ret_code)); \ + if ((throw_on_error) && !SQL_SUCCEEDED((ret_code))) \ + throw std::runtime_error((error_msg)); \ + } while (0); + +void AllocConnection(SQLHENV* db_environment, SQLHDBC* db_connection, + bool throw_on_error, bool log_diag) { + SQLRETURN ret_code; + EXECUTION_HANDLER( + throw_on_error, log_diag, SQL_HANDLE_ENV, *db_environment, ret_code, + SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, db_environment), + "Failed to allocate handle for environment."); + EXECUTION_HANDLER(throw_on_error, log_diag, SQL_ATTR_ODBC_VERSION, + *db_environment, ret_code, + SQLSetEnvAttr(*db_environment, SQL_ATTR_ODBC_VERSION, + (void*)SQL_OV_ODBC3, 0), + "Failed to set attributes for environment."); + EXECUTION_HANDLER( + throw_on_error, log_diag, SQL_HANDLE_DBC, *db_connection, ret_code, + SQLAllocHandle(SQL_HANDLE_DBC, *db_environment, db_connection), + "Failed to allocate handle for db connection."); +} + +void ITDriverConnect(SQLTCHAR* connection_string, SQLHENV* db_environment, + SQLHDBC* db_connection, bool throw_on_error, + bool log_diag) { + SQLRETURN ret_code; + EXECUTION_HANDLER( + throw_on_error, log_diag, SQL_HANDLE_ENV, *db_environment, ret_code, + SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, db_environment), + "Failed to allocate handle for environment."); + EXECUTION_HANDLER(throw_on_error, log_diag, SQL_ATTR_ODBC_VERSION, + *db_environment, ret_code, + SQLSetEnvAttr(*db_environment, SQL_ATTR_ODBC_VERSION, + (void*)SQL_OV_ODBC3, 0), + "Failed to set attributes for environment."); + EXECUTION_HANDLER( + throw_on_error, log_diag, SQL_HANDLE_DBC, *db_connection, ret_code, + SQLAllocHandle(SQL_HANDLE_DBC, *db_environment, db_connection), + "Failed to allocate handle for db connection."); + + SQLTCHAR out_conn_string[1024]; + SQLSMALLINT out_conn_string_length; + + EXECUTION_HANDLER( + throw_on_error, log_diag, SQL_HANDLE_DBC, *db_connection, ret_code, + SQLDriverConnect(*db_connection, NULL, connection_string, SQL_NTS, + out_conn_string, IT_SIZEOF(out_conn_string), + &out_conn_string_length, SQL_DRIVER_COMPLETE), + "Failed to connect to driver."); +} + +void AllocStatement(SQLTCHAR* connection_string, SQLHENV* db_environment, + SQLHDBC* db_connection, SQLHSTMT* h_statement, + bool throw_on_error, bool log_diag) { + SQLRETURN ret_code; + ITDriverConnect(connection_string, db_environment, db_connection, + throw_on_error, log_diag); + EXECUTION_HANDLER( + throw_on_error, log_diag, SQL_HANDLE_STMT, h_statement, ret_code, + SQLAllocHandle(SQL_HANDLE_STMT, *db_connection, h_statement), + "Failed to allocate handle for statement."); +} + +void LogAnyDiagnostics(SQLSMALLINT handle_type, SQLHANDLE handle, SQLRETURN ret, + SQLTCHAR* msg_return, const SQLSMALLINT sz) { + if (handle == NULL) { + printf("Failed to log diagnostics, handle is NULL\n"); + return; + } + + // Only log diagnostics when there's something to log. + switch (ret) { + case SQL_SUCCESS_WITH_INFO: + printf("SQL_SUCCESS_WITH_INFO: "); + break; + case SQL_ERROR: + printf("SQL_ERROR: "); + break; + default: + return; + } + + SQLRETURN diag_ret; + SQLTCHAR sqlstate[6]; + SQLINTEGER native_error_code; + SQLTCHAR diag_message[SQL_MAX_MESSAGE_LENGTH]; + SQLSMALLINT message_length; + + SQLSMALLINT rec_number = 0; + do { + rec_number++; + diag_ret = SQLGetDiagRec( + handle_type, handle, rec_number, sqlstate, &native_error_code, + msg_return == NULL ? diag_message : msg_return, + msg_return == NULL ? IT_SIZEOF(diag_message) : sz, &message_length); + if (diag_ret == SQL_INVALID_HANDLE) + printf("Invalid handle\n"); + else if (SQL_SUCCEEDED(diag_ret)) + printf("SQLState: %S: %S\n", sqlstate, + (msg_return == NULL) ? diag_message : msg_return); + } while (diag_ret == SQL_SUCCESS); + + if (diag_ret == SQL_NO_DATA && rec_number == 1) + printf("No error information\n"); +} + +bool CheckSQLSTATE(SQLSMALLINT handle_type, SQLHANDLE handle, + SQLWCHAR* expected_sqlstate, bool log_message) { + (void)log_message; + + SQLWCHAR sqlstate[6] = {0}; + SQLINTEGER native_diag_code; + SQLWCHAR diag_message[SQL_MAX_MESSAGE_LENGTH] = {0}; + SQLSMALLINT message_length; + + SQLSMALLINT record_number = 0; + SQLRETURN diag_ret; + do { + record_number++; + diag_ret = SQLGetDiagRec(handle_type, handle, record_number, sqlstate, + &native_diag_code, diag_message, + IT_SIZEOF(diag_message), &message_length); + + if (1) { + std::wcout << "SQLState " << sqlstate << ": " << diag_message + << std::endl; + } + // Only return if this SQLSTATE is the expected state, otherwise keep + // checking + if (std::wstring(sqlstate) == std::wstring(expected_sqlstate)) { + return true; + } + } while (diag_ret == SQL_SUCCESS); + + // Could not find expected SQLSTATE in available diagnostic records + return false; +} + +bool CheckSQLSTATE(SQLSMALLINT handle_type, SQLHANDLE handle, + SQLWCHAR* expected_sqlstate) { + return CheckSQLSTATE(handle_type, handle, expected_sqlstate, false); +} + +std::wstring QueryBuilder(const std::wstring& column, + const std::wstring& dataset, + const std::wstring& count) { + return L"SELECT " + column + L" FROM " + dataset + L" LIMIT " + count; +} + +std::wstring QueryBuilder(const std::wstring& column, + const std::wstring& dataset) { + return L"SELECT " + column + L" FROM " + dataset; +} + +void CloseCursor(SQLHSTMT* h_statement, bool throw_on_error, bool log_diag) { + SQLRETURN ret_code; + EXECUTION_HANDLER(throw_on_error, log_diag, SQL_HANDLE_STMT, *h_statement, + ret_code, SQLCloseCursor(*h_statement), + "Failed to set allocate handle for statement."); +} + +// std::wstring SQLTCHAR_to_string(SQLTCHAR* src) { +// // std::wstring _src = (char16_t*)src; +// return wstring_to_string(_src); +// } + +std::string u16string_to_string(const std::u16string& src) { + return std::wstring_convert< std::codecvt_utf8_utf16< char16_t >, + char16_t >{} + .to_bytes(src); +} + +std::u16string string_to_u16string(const std::string& src) { + return std::wstring_convert< std::codecvt_utf8_utf16< char16_t >, + char16_t >{} + .from_bytes(src); +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCHelper/it_odbc_helper.h b/sql-odbc/src/IntegrationTests/ITODBCHelper/it_odbc_helper.h new file mode 100644 index 0000000000..f9835be378 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCHelper/it_odbc_helper.h @@ -0,0 +1,83 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef IT_ODBC_HELPER_H +#define IT_ODBC_HELPER_H + +#ifdef WIN32 +#include +#endif +#include +#include + +#include +#include + +#include "unit_test_helper.h" + +// SQLSTATEs +#define SQLSTATE_STRING_DATA_RIGHT_TRUNCATED (SQLWCHAR*)L"01004" +#define SQLSTATE_INVALID_DESCRIPTOR_INDEX (SQLWCHAR*)L"07009" +#define SQLSTATE_GENERAL_ERROR (SQLWCHAR*)L"HY000" +#define SQLSTATE_INVALID_DESCRIPTOR_FIELD_IDENTIFIER (SQLWCHAR*)L"HY091" + +#define IT_SIZEOF(x) (NULL == (x) ? 0 : (sizeof((x)) / sizeof((x)[0]))) + +std::vector< std::pair< std::wstring, std::wstring > > conn_str_pair = { + {L"Driver", L"{Elasticsearch ODBC}"}, + {L"host", (use_ssl ? L"https://localhost" : L"localhost")}, + {L"port", L"9200"}, + {L"user", L"admin"}, + {L"password", L"admin"}, + {L"auth", L"BASIC"}, + {L"useSSL", (use_ssl ? L"1" : L"0")}, + {L"hostnameVerification", L"0"}, + {L"logLevel", L"0"}, + {L"logOutput", L"C:\\"}, + {L"responseTimeout", L"10"}, + {L"fetchSize", L"0"}}; + +std::wstring conn_string = []() { + std::wstring temp; + for (auto it : conn_str_pair) + temp += it.first + L"=" + it.second + L";"; + return temp; +}(); + +void AllocConnection(SQLHENV* db_environment, SQLHDBC* db_connection, + bool throw_on_error, bool log_diag); +void ITDriverConnect(SQLTCHAR* connection_string, SQLHENV* db_environment, + SQLHDBC* db_connection, bool throw_on_error, + bool log_diag); +void AllocStatement(SQLTCHAR* connection_string, SQLHENV* db_environment, + SQLHDBC* db_connection, SQLHSTMT* h_statement, + bool throw_on_error, bool log_diag); +void LogAnyDiagnostics(SQLSMALLINT handle_type, SQLHANDLE handle, SQLRETURN ret, + SQLTCHAR* msg_return = NULL, const SQLSMALLINT sz = 0); +bool CheckSQLSTATE(SQLSMALLINT handle_type, SQLHANDLE handle, + SQLWCHAR* expected_sqlstate, bool log_message); +bool CheckSQLSTATE(SQLSMALLINT handle_type, SQLHANDLE handle, + SQLWCHAR* expected_sqlstate); +std::wstring QueryBuilder(const std::wstring& column, + const std::wstring& dataset, + const std::wstring& count); +std::wstring QueryBuilder(const std::wstring& column, + const std::wstring& dataset); +void CloseCursor(SQLHSTMT* h_statement, bool throw_on_error, bool log_diag); +std::string u16string_to_string(const std::u16string& src); +std::u16string string_to_u16string(const std::string& src); + +#endif diff --git a/sql-odbc/src/IntegrationTests/ITODBCInfo/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCInfo/CMakeLists.txt new file mode 100644 index 0000000000..d5876507eb --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCInfo/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_info) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_info.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_info ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_info PUBLIC AUTO ALL) +target_link_libraries(itodbc_info odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_info PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCInfo/packages.config b/sql-odbc/src/IntegrationTests/ITODBCInfo/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCInfo/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCInfo/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCInfo/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCInfo/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCInfo/pch.h b/sql-odbc/src/IntegrationTests/ITODBCInfo/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCInfo/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCInfo/test_odbc_info.cpp b/sql-odbc/src/IntegrationTests/ITODBCInfo/test_odbc_info.cpp new file mode 100644 index 0000000000..c194d2a24c --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCInfo/test_odbc_info.cpp @@ -0,0 +1,307 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +#include "version.h" +#include +#ifndef WIN32 +#include +#endif +// clang-format on + +class TestSQLGetInfo : public testing::Test { + public: + TestSQLGetInfo() { + } + + void SetUp() { + ITDriverConnect((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, true, + true); + } + + void TearDown() { + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLGetInfo() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; +}; + +// 1 for ver1 >= ver2, 0 for ver1 < ver2, -1 for error +int Ver1GEVer2(std::wstring ver_1_str, std::wstring ver_2_str) { + auto VersionSplit = [&](std::vector< unsigned long >& output, + std::wstring& input, std::wstring delim) { + try { + size_t start = 0; + size_t end = input.find(delim); + while (end != std::string::npos) { + output.push_back(std::stoul(input.substr(start, end - start))); + start = end + delim.length(); + end = input.find(delim, start); + } + output.push_back(std::stoul(input.substr(start, end))); + } catch (...) { + output.clear(); + } + }; + + std::vector< unsigned long > ver_1; + std::vector< unsigned long > ver_2; + VersionSplit(ver_1, ver_1_str, L"."); + VersionSplit(ver_2, ver_2_str, L"."); + if ((ver_1.size() == 0) || (ver_2.size() == 0)) + return -1; + + size_t cnt = ((ver_1.size() < ver_2.size()) ? ver_1.size() : ver_2.size()); + for (size_t i = 0; i < cnt; i++) { + if (ver_1[i] != ver_2[i]) + return (ver_1[i] >= ver_2[i]) ? 1 : 0; + } + if (ver_1.size() != ver_2.size()) + return (ver_1.size() > ver_2.size()) ? 1 : 0; + + // They are identical + return 1; +} + +// Test template for SQLGetInfo +#define TEST_SQL_GET_INFO_STRING(test_name, info_type, expected_value) \ + TEST_F(TestSQLGetInfo, test_name) { \ + SQLTCHAR info_value_ptr[1024]; \ + SQLSMALLINT string_length_ptr; \ + SQLRETURN ret = \ + SQLGetInfo(m_conn, info_type, info_value_ptr, \ + IT_SIZEOF(info_value_ptr), &string_length_ptr); \ + LogAnyDiagnostics(SQL_HANDLE_DBC, m_conn, ret); \ + EXPECT_EQ(std::wstring(info_value_ptr), std::wstring(expected_value)); \ + } + +// Test template for SQLGetInfo +#define TEST_SQL_GET_INFO_VERSION_GE(test_name, info_type, expected_value) \ + TEST_F(TestSQLGetInfo, test_name) { \ + SQLTCHAR info_value_ptr[1024]; \ + SQLSMALLINT string_length_ptr; \ + SQLRETURN ret = \ + SQLGetInfo(m_conn, info_type, info_value_ptr, \ + IT_SIZEOF(info_value_ptr), &string_length_ptr); \ + LogAnyDiagnostics(SQL_HANDLE_DBC, m_conn, ret); \ + EXPECT_EQ(Ver1GEVer2(info_value_ptr, expected_value), 1); \ + } + +// Test template for SQLGetInfo +#define TEST_SQL_GET_INFO_UINT_MASK(test_name, info_type, expected_value) \ + TEST_F(TestSQLGetInfo, test_name) { \ + SQLUINTEGER info_value_ptr; \ + SQLSMALLINT string_length_ptr; \ + SQLRETURN ret = \ + SQLGetInfo(m_conn, info_type, &info_value_ptr, \ + sizeof(info_value_ptr), &string_length_ptr); \ + LogAnyDiagnostics(SQL_HANDLE_DBC, m_conn, ret); \ + EXPECT_EQ((size_t)info_value_ptr, (size_t)expected_value); \ + } + +// Test template for SQLGetInfo +#define TEST_SQL_GET_INFO_UINT16(test_name, info_type, expected_value) \ + TEST_F(TestSQLGetInfo, test_name) { \ + SQLUSMALLINT info_value_ptr; \ + SQLSMALLINT string_length_ptr; \ + SQLRETURN ret = \ + SQLGetInfo(m_conn, info_type, &info_value_ptr, \ + sizeof(info_value_ptr), &string_length_ptr); \ + LogAnyDiagnostics(SQL_HANDLE_DBC, m_conn, ret); \ + EXPECT_EQ(info_value_ptr, expected_value); \ + } + +///////////////// +// Driver Info // +///////////////// + +TEST_SQL_GET_INFO_STRING(SQLDriverName, SQL_DRIVER_NAME, L"odfesqlodbc.dll"); +TEST_SQL_GET_INFO_STRING(SQLDriverODBCVer, SQL_DRIVER_ODBC_VER, L"03.51"); + +std::wstring version = std::wstring_convert< std::codecvt_utf8_utf16< wchar_t >, wchar_t >{} + .from_bytes(ELASTICSEARCHDRIVERVERSION); +TEST_SQL_GET_INFO_STRING(SQLDriverVer, SQL_DRIVER_VER, version); + +TEST_SQL_GET_INFO_UINT16(SQLGetDataExtensions, SQL_GETDATA_EXTENSIONS, + (SQL_GD_ANY_COLUMN | SQL_GD_ANY_ORDER | SQL_GD_BOUND + | SQL_GD_BLOCK)); +TEST_SQL_GET_INFO_STRING(SQLSearchPatternEscape, SQL_SEARCH_PATTERN_ESCAPE, L""); + +////////////////////// +// Data Source Info // +////////////////////// + +TEST_SQL_GET_INFO_UINT16(SQLCursorCommitBehavior, SQL_CURSOR_COMMIT_BEHAVIOR, + SQL_CB_CLOSE); +TEST_SQL_GET_INFO_UINT16(SQLTxnCapable, SQL_TXN_CAPABLE, SQL_TC_NONE); +TEST_SQL_GET_INFO_UINT16(SQLConcatNullBehavior, SQL_CONCAT_NULL_BEHAVIOR, + SQL_CB_NULL); +TEST_SQL_GET_INFO_STRING(SQLSchemaTerm, SQL_SCHEMA_TERM, L"schema"); +TEST_SQL_GET_INFO_STRING(SQLCatalogTerm, SQL_CATALOG_TERM, L"catalog"); + +/////////////// +// DBMS Info // +/////////////// + +TEST_SQL_GET_INFO_STRING(SQLDBMSName, SQL_DBMS_NAME, L"Elasticsearch"); +TEST_SQL_GET_INFO_VERSION_GE(SQLDBMSVer, SQL_DBMS_VER, L"7.1.1"); + +/////////////////// +// Supported SQL // +/////////////////// + +TEST_SQL_GET_INFO_STRING(SQLColumnAlias, SQL_COLUMN_ALIAS, L"Y"); +TEST_SQL_GET_INFO_UINT16(SQLGroupBy, SQL_GROUP_BY, SQL_GB_GROUP_BY_EQUALS_SELECT); +TEST_SQL_GET_INFO_STRING(SQLIdentifierQuoteChar, SQL_IDENTIFIER_QUOTE_CHAR, + L"`"); +TEST_SQL_GET_INFO_UINT_MASK(SQLOJCapabilities, SQL_OJ_CAPABILITIES, + SQL_OJ_LEFT | SQL_OJ_RIGHT | SQL_OJ_NOT_ORDERED + | SQL_OJ_ALL_COMPARISON_OPS); +TEST_SQL_GET_INFO_UINT_MASK(SQLSchemaUsage, SQL_SCHEMA_USAGE, 0); +TEST_SQL_GET_INFO_UINT16(SQLQuotedIdentifierCase, SQL_QUOTED_IDENTIFIER_CASE, + SQL_IC_SENSITIVE); +TEST_SQL_GET_INFO_STRING(SQLSpecialCharacters, SQL_SPECIAL_CHARACTERS, L"_"); +TEST_SQL_GET_INFO_UINT_MASK(SQLODBCInterfaceConformance, + SQL_ODBC_INTERFACE_CONFORMANCE, SQL_OIC_CORE); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQLConformance, SQL_SQL_CONFORMANCE, + SQL_SC_SQL92_ENTRY); +TEST_SQL_GET_INFO_UINT_MASK(SQLCatalogUsage, SQL_CATALOG_USAGE, + SQL_CU_DML_STATEMENTS); +TEST_SQL_GET_INFO_UINT16(SQLCatalogLocation, SQL_CATALOG_LOCATION, SQL_QL_START); +TEST_SQL_GET_INFO_STRING(SQLCatalogNameSeparator, SQL_CATALOG_NAME_SEPARATOR, + L"."); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQL92Predicates, SQL_SQL92_PREDICATES, + SQL_SP_BETWEEN | SQL_SP_COMPARISON | SQL_SP_IN + | SQL_SP_ISNULL | SQL_SP_LIKE); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQL92RelationalJoinOperators, + SQL_SQL92_RELATIONAL_JOIN_OPERATORS, + SQL_SRJO_CROSS_JOIN | SQL_SRJO_INNER_JOIN + | SQL_SRJO_LEFT_OUTER_JOIN + | SQL_SRJO_RIGHT_OUTER_JOIN); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQL92ValueExpressions, + SQL_SQL92_VALUE_EXPRESSIONS, + SQL_SVE_CASE | SQL_SVE_CAST); +TEST_SQL_GET_INFO_UINT_MASK(SQLDatetimeLiterals, SQL_DATETIME_LITERALS, 0); +TEST_SQL_GET_INFO_STRING(SQLOrderByColumnsInSelect, SQL_ORDER_BY_COLUMNS_IN_SELECT, L"Y"); +TEST_SQL_GET_INFO_STRING(SQLCatalogName, SQL_CATALOG_NAME, L"N"); + +//////////////// +// Conversion // +//////////////// + +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertInteger, SQL_CONVERT_INTEGER, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertSmallint, SQL_CONVERT_SMALLINT, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertTinyint, SQL_CONVERT_TINYINT, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertBit, SQL_CONVERT_BIT, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertVarchar, SQL_CONVERT_VARCHAR, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertBigint, SQL_CONVERT_BIGINT, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertDecimal, SQL_CONVERT_DECIMAL, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertDouble, SQL_CONVERT_DOUBLE, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertFloat, SQL_CONVERT_FLOAT, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertNumeric, SQL_CONVERT_NUMERIC, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertReal, SQL_CONVERT_REAL, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertDate, SQL_CONVERT_DATE, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertTime, SQL_CONVERT_TIME, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertTimestamp, SQL_CONVERT_TIMESTAMP, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertBinary, SQL_CONVERT_BINARY, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertLongvarbinary, SQL_CONVERT_LONGVARBINARY, + 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertVarbinary, SQL_CONVERT_VARBINARY, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertChar, SQL_CONVERT_CHAR, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertLongVarchar, SQL_CONVERT_LONGVARCHAR, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertWChar, SQL_CONVERT_WCHAR, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertWLongVarchar, SQL_CONVERT_WLONGVARCHAR, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertWVarchar, SQL_CONVERT_WVARCHAR, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertGuid, SQL_CONVERT_GUID, 0); + + +////////////////////// +// Scalar Functions // +////////////////////// + +TEST_SQL_GET_INFO_UINT_MASK(SQLConvertFunctions, SQL_CONVERT_FUNCTIONS, + SQL_FN_CVT_CAST); +TEST_SQL_GET_INFO_UINT_MASK(SQLNumericFunctions, SQL_NUMERIC_FUNCTIONS, + SQL_FN_NUM_ABS | SQL_FN_NUM_ATAN | SQL_FN_NUM_ATAN2 + | SQL_FN_NUM_COS | SQL_FN_NUM_COT + | SQL_FN_NUM_DEGREES | SQL_FN_NUM_FLOOR + | SQL_FN_NUM_LOG | SQL_FN_NUM_LOG10 + | SQL_FN_NUM_PI | SQL_FN_NUM_POWER + | SQL_FN_NUM_RADIANS | SQL_FN_NUM_ROUND + | SQL_FN_NUM_SIGN | SQL_FN_NUM_SIN + | SQL_FN_NUM_SQRT | SQL_FN_NUM_TAN); +TEST_SQL_GET_INFO_UINT_MASK(SQLStringFunctions, SQL_STRING_FUNCTIONS, + SQL_FN_STR_ASCII | SQL_FN_STR_LENGTH + | SQL_FN_STR_LTRIM | SQL_FN_STR_REPLACE + | SQL_FN_STR_RTRIM | SQL_FN_STR_SUBSTRING); +TEST_SQL_GET_INFO_UINT_MASK(SQLSystemFunctions, SQL_SYSTEM_FUNCTIONS, + SQL_FN_SYS_IFNULL); +TEST_SQL_GET_INFO_UINT_MASK(SQLTimedateAddIntervals, SQL_TIMEDATE_ADD_INTERVALS, + 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLTimedateDiffIntervals, + SQL_TIMEDATE_DIFF_INTERVALS, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLTimedateFunctions, SQL_TIMEDATE_FUNCTIONS, + SQL_FN_TD_CURDATE | SQL_FN_TD_DAYOFMONTH + | SQL_FN_TD_MONTH | SQL_FN_TD_MONTHNAME + | SQL_FN_TD_NOW | SQL_FN_TD_YEAR); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQL92DatetimeFunctions, + SQL_SQL92_DATETIME_FUNCTIONS, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQL92NumericValueFunctions, + SQL_SQL92_NUMERIC_VALUE_FUNCTIONS, 0); +TEST_SQL_GET_INFO_UINT_MASK(SQLSQL92StringFunctions, SQL_SQL92_STRING_FUNCTIONS, + SQL_SSF_LOWER | SQL_SSF_UPPER); + +//////////// +// Limits // +//////////// + +TEST_SQL_GET_INFO_UINT16(SQLMaxIdentifierLen, SQL_MAX_IDENTIFIER_LEN, SHRT_MAX); +TEST_SQL_GET_INFO_UINT16(SQLMaxColumnsInGroupBy, SQL_MAX_COLUMNS_IN_GROUP_BY, 0); +TEST_SQL_GET_INFO_UINT16(SQLMaxColumnsInOrderBy, SQL_MAX_COLUMNS_IN_ORDER_BY, 0); +TEST_SQL_GET_INFO_UINT16(SQLMaxColumnsInSelect, SQL_MAX_COLUMNS_IN_SELECT, 0); + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_info > leaks_itodbc_info"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCPagination/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCPagination/CMakeLists.txt new file mode 100644 index 0000000000..d648f867df --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCPagination/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_pagination) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_pagination.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_pagination ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_pagination PUBLIC AUTO ALL) +target_link_libraries(itodbc_pagination odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_pagination PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCPagination/packages.config b/sql-odbc/src/IntegrationTests/ITODBCPagination/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCPagination/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCPagination/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCPagination/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCPagination/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCPagination/pch.h b/sql-odbc/src/IntegrationTests/ITODBCPagination/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCPagination/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCPagination/test_odbc_pagination.cpp b/sql-odbc/src/IntegrationTests/ITODBCPagination/test_odbc_pagination.cpp new file mode 100644 index 0000000000..5acbb02c1e --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCPagination/test_odbc_pagination.cpp @@ -0,0 +1,156 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +// clang-format on + +#define BIND_SIZE 255 +#define SINGLE_ROW 1 +typedef struct Col { + SQLLEN data_len; + SQLCHAR data_dat[BIND_SIZE]; +} Col; + +class TestPagination : public testing::Test { + public: + TestPagination() { + } + + void SetUp() { + AllocConnection(&m_env, &m_conn, true, true); + } + + void TearDown() { + if (SQL_NULL_HDBC != m_conn) { + SQLFreeHandle(SQL_HANDLE_DBC, m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + int GetTotalRowsAfterQueryExecution() { + SQLAllocHandle(SQL_HANDLE_STMT, m_conn, &m_hstmt); + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query.c_str(), SQL_NTS); + EXPECT_EQ(SQL_SUCCESS, ret); + + // Get column count + SQLSMALLINT total_columns = -1; + SQLNumResultCols(m_hstmt, &total_columns); + std::vector< std::vector< Col > > cols(total_columns); + for (size_t i = 0; i < cols.size(); i++) { + cols[i].resize(SINGLE_ROW); + } + + // Bind and fetch + for (size_t i = 0; i < cols.size(); i++) { + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)i + 1, SQL_C_CHAR, + (SQLPOINTER)&cols[i][0].data_dat[i], 255, + &cols[i][0].data_len); + } + + // Get total number of rows + int row_count = 0; + while (SQLFetch(m_hstmt) == SQL_SUCCESS) { + row_count++; + } + return row_count; + } + + ~TestPagination() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + SQLTCHAR m_out_conn_string[1024]; + SQLSMALLINT m_out_conn_string_length; + std::wstring m_query = + L"SELECT Origin FROM kibana_sample_data_flights"; +}; + +TEST_F(TestPagination, EnablePagination) { + // Default fetch size is -1 for driver. + // Server default page size for all cursor requests is 1000. + + //Total number of rows in kibana_sample_data_flights table + int total_rows = 13059; + std::wstring fetch_size_15_conn_string = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;"; + ASSERT_EQ(SQL_SUCCESS, + SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)fetch_size_15_conn_string.c_str(), + SQL_NTS, m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_PROMPT)); + EXPECT_EQ(total_rows, GetTotalRowsAfterQueryExecution()); +} + +TEST_F(TestPagination, DisablePagination) { + // Fetch size 0 implies no pagination + int total_rows = 200; + std::wstring fetch_size_15_conn_string = + use_ssl ? L"Driver={Elasticsearch ODBC};" + L"host=https://localhost;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"1;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;fetchSize=0;" + : L"Driver={Elasticsearch ODBC};" + L"host=localhost;port=9200;" + L"user=admin;password=admin;auth=BASIC;useSSL=" + L"0;hostnameVerification=0;logLevel=0;logOutput=C:\\;" + L"responseTimeout=10;fetchSize=0;"; + ASSERT_EQ(SQL_SUCCESS, + SQLDriverConnect( + m_conn, NULL, (SQLTCHAR*)fetch_size_15_conn_string.c_str(), + SQL_NTS, m_out_conn_string, IT_SIZEOF(m_out_conn_string), + &m_out_conn_string_length, SQL_DRIVER_PROMPT)); + EXPECT_EQ(total_rows, GetTotalRowsAfterQueryExecution()); +} + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_pagination > leaks_itodbc_pagination"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCResults/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCResults/CMakeLists.txt new file mode 100644 index 0000000000..564baa65a3 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCResults/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_results) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_results.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_results ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_results PUBLIC AUTO ALL) +target_link_libraries(itodbc_results odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_results PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCResults/packages.config b/sql-odbc/src/IntegrationTests/ITODBCResults/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCResults/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCResults/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCResults/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCResults/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCResults/pch.h b/sql-odbc/src/IntegrationTests/ITODBCResults/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCResults/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCResults/test_odbc_results.cpp b/sql-odbc/src/IntegrationTests/ITODBCResults/test_odbc_results.cpp new file mode 100644 index 0000000000..f46e160343 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCResults/test_odbc_results.cpp @@ -0,0 +1,1019 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +#include +// clang-format on + +typedef struct DescribeColumnData { + std::wstring column_name; + SQLSMALLINT data_type; +} DescribeColumnData; + +const DescribeColumnData column_data[] = {{L"Origin", SQL_WVARCHAR}, + {L"FlightNum", SQL_WVARCHAR}, + {L"FlightDelay", SQL_BIT}, + {L"DistanceMiles", SQL_REAL}, + {L"FlightTimeMin", SQL_REAL}, + {L"OriginWeather", SQL_WVARCHAR}, + {L"dayOfWeek", SQL_INTEGER}, + {L"AvgTicketPrice", SQL_REAL}, + {L"Carrier", SQL_WVARCHAR}, + {L"FlightDelayMin", SQL_INTEGER}, + {L"OriginRegion", SQL_WVARCHAR}, + {L"DestAirportID", SQL_WVARCHAR}, + {L"FlightDelayType", SQL_WVARCHAR}, + {L"timestamp", SQL_TYPE_TIMESTAMP}, + {L"Dest", SQL_WVARCHAR}, + {L"FlightTimeHour", SQL_WVARCHAR}, + {L"Cancelled", SQL_BIT}, + {L"DistanceKilometers", SQL_REAL}, + {L"OriginCityName", SQL_WVARCHAR}, + {L"DestWeather", SQL_WVARCHAR}, + {L"OriginCountry", SQL_WVARCHAR}, + {L"DestCountry", SQL_WVARCHAR}, + {L"DestRegion", SQL_WVARCHAR}, + {L"OriginAirportID", SQL_WVARCHAR}, + {L"DestCityName", SQL_WVARCHAR}}; +const std::wstring flight_data_set = L"kibana_sample_data_flights"; +const std::wstring multi_type_data_set = L"kibana_sample_data_types"; +const std::wstring single_col = L"Origin"; +// TODO (#110): Improve sample data result checks +const std::wstring m_expected_origin_column_data_1 = + L"Frankfurt am Main Airport"; +const std::wstring m_expected_origin_column_data_2 = L"Olenya Air Base"; +const std::wstring single_float_col = L"DistanceMiles"; +const std::wstring single_integer_col = L"FlightDelayMin"; +const std::wstring single_timestamp_col = L"timestamp"; +const std::wstring single_bit_col = L"Cancelled"; +const std::wstring single_row_offset_3 = L"1 OFFSET 3"; +const uint32_t data_cnt = 2; +const std::wstring single_row_offsets[data_cnt] = {L"1", L"1 OFFSET 1"}; +const std::wstring type_boolean = L"type_boolean"; +const std::vector< bool > type_boolean_vals = {false, true, true}; +const std::wstring type_byte = L"type_byte"; +const std::vector< int16_t > type_byte_vals = {100, -120}; +const std::wstring type_short = L"type_short"; +const std::vector< int16_t > type_short_vals = {1000, -2000}; +const std::wstring type_integer = L"type_integer"; +const std::vector< int32_t > type_integer_vals = {250000000, -350000000}; +const std::wstring type_long = L"type_long"; +const std::vector< int64_t > type_long_vals = {8000000000, -8010000000}; +const std::wstring type_half_float = L"type_half_float"; +const std::vector< float > type_half_float_vals = {1.115f, -2.115f}; +const std::wstring type_float = L"type_float"; +const std::vector< float > type_float_vals = {2.1512f, -3.1512f}; +const std::wstring type_double = L"type_double"; +const std::vector< double > type_double_vals = {25235.2215, -5335.2215}; +const std::wstring type_scaled_float = L"type_scaled_float"; +const std::vector< double > type_scaled_float_vals = {100, -100.1}; +const std::wstring type_keyword = L"type_keyword"; +const std::vector< std::wstring > type_keyword_vals = {L"hello", L"goodbye"}; +const std::wstring type_text = L"type_text"; +const std::vector< std::wstring > type_text_vals = {L"world", L"planet"}; +const std::wstring type_date = L"type_date"; +const std::vector< TIMESTAMP_STRUCT > type_date_vals = { + {2016, 02, 21, 12, 23, 52, 803000000}, + {2018, 07, 22, 12, 23, 52, 803000000}}; +const std::wstring type_object = L"type_object"; +const std::wstring type_nested = L"type_nested"; +// TODO (#110): Improve sample data result checks +const float distance_miles_1 = 1738.98f; +const float distance_miles_2 = 10247.90f; +const int delay_offset_3_1 = 0; +const int delay_offset_3_2 = 180; +const SQLSMALLINT single_col_name_length = 6; +const SQLSMALLINT single_col_data_type = SQL_WVARCHAR; +const SQLULEN single_col_column_size = 25; +const SQLSMALLINT single_col_decimal_digit = 0; +const SQLSMALLINT single_col_nullable = 2; +const std::wstring single_row = L"1"; +const size_t multi_row_cnt = 25; +const size_t single_row_cnt = 1; +const size_t multi_col_cnt = 25; +const size_t single_col_cnt = 1; +const size_t single_row_rd_cnt = 1; +const size_t multi_row_rd_cnt_aligned = 5; +const size_t multi_row_rd_cnt_misaligned = 3; +const std::wstring multi_col = L"*"; +const std::wstring multi_row = std::to_wstring(multi_row_cnt); +typedef struct Col { + SQLLEN data_len; + SQLCHAR data_dat[255]; +} Col; + +template < class T > +void CheckData(const std::wstring& type_name, const std::wstring& data_set, + const std::wstring row, SQLHSTMT* hstmt, + const SQLUSMALLINT ordinal_pos, const SQLUSMALLINT type, + const std::vector< T >& expected_val, const SQLLEN data_size); +template < class T > +inline bool FuzzyEquals(T a, T b, T epsil); +void BindColumns(std::vector< std::vector< Col > >& cols, SQLHSTMT* hstmt); +void ExecuteQuery(const std::wstring& column, const std::wstring& dataset, + const std::wstring& count, SQLHSTMT* hstmt); +void ExtendedFetch(const size_t exp_row_cnt, const size_t exp_read_cnt, + const bool aligned, const size_t total_row_cnt, + SQLHSTMT* hstmt); +void Fetch(size_t exp_row_cnt, SQLHSTMT* hstmt); +void QueryBind(const size_t row_cnt, const size_t col_cnt, + const size_t row_fetch_cnt, const std::wstring& column_name, + std::vector< std::vector< Col > >& cols, SQLHSTMT* hstmt); +void QueryBindFetch(const size_t row_cnt, const size_t col_cnt, + const std::wstring& column_name, SQLHSTMT* hstmt); +void QueryFetch(const std::wstring& column, const std::wstring& dataset, + const std::wstring& count, SQLHSTMT* hstmt); + +template < class T > +inline bool FuzzyEquals(T a, T b, T epsil) { + return std::abs(a - b) <= epsil; +} + +inline void BindColumns(std::vector< std::vector< Col > >& cols, + SQLHSTMT* hstmt) { + SQLRETURN ret; + for (size_t i = 0; i < cols.size(); i++) { + ret = SQLBindCol(*hstmt, (SQLUSMALLINT)i + 1, SQL_C_CHAR, + (SQLPOINTER)&cols[i][0].data_dat[i], 255, + &cols[i][0].data_len); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + } +} + +inline void ExecuteQuery(const std::wstring& column, + const std::wstring& dataset, const std::wstring& count, + SQLHSTMT* hstmt) { + std::wstring statement = QueryBuilder(column, dataset, count); + SQLRETURN ret = SQLExecDirect(*hstmt, (SQLTCHAR*)statement.c_str(), + (SQLINTEGER)statement.length()); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); +} + +inline void ExtendedFetch(const size_t exp_row_cnt, const size_t exp_read_cnt, + const bool aligned, const size_t total_row_cnt, + SQLHSTMT* hstmt) { + SQLULEN row_cnt = 0; + SQLUSMALLINT row_stat[10]; + size_t read_cnt = 0; + SQLRETURN ret; + while ( + (ret = SQLExtendedFetch(*hstmt, SQL_FETCH_NEXT, 0, &row_cnt, row_stat)) + == SQL_SUCCESS) { + read_cnt++; + if (aligned) { + EXPECT_EQ(row_cnt, exp_row_cnt); + } else { + size_t adj_exp_row_cnt = ((read_cnt * exp_row_cnt) > total_row_cnt) + ? (total_row_cnt % exp_row_cnt) + : exp_row_cnt; + EXPECT_EQ(row_cnt, adj_exp_row_cnt); + } + } + + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + EXPECT_EQ(exp_read_cnt, read_cnt); +} + +inline void Fetch(size_t exp_row_cnt, SQLHSTMT* hstmt) { + SQLRETURN ret; + size_t read_cnt = 0; + while ((ret = SQLFetch(*hstmt)) == SQL_SUCCESS) { + read_cnt++; + } + + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + EXPECT_EQ(exp_row_cnt, read_cnt); +} + +inline void QueryBind(const size_t row_cnt, const size_t col_cnt, + const size_t row_fetch_cnt, + const std::wstring& column_name, + std::vector< std::vector< Col > >& cols, + SQLHSTMT* hstmt) { + (void)col_cnt; + SQLRETURN ret = + SQLSetStmtAttr(*hstmt, SQL_ROWSET_SIZE, (void*)row_fetch_cnt, 0); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_EQ(ret, SQL_SUCCESS); + + std::wstring row_str = std::to_wstring(row_cnt); + ExecuteQuery(column_name, flight_data_set, row_str, hstmt); + + for (size_t i = 0; i < cols.size(); i++) { + cols[i].resize(row_fetch_cnt); + } + BindColumns(cols, hstmt); +} + +inline void QueryBindExtendedFetch(const size_t row_cnt, const size_t col_cnt, + const size_t row_fetch_cnt, + const std::wstring& column_name, + SQLHSTMT* hstmt) { + std::vector< std::vector< Col > > cols(col_cnt); + QueryBind(row_cnt, col_cnt, row_fetch_cnt, column_name, cols, hstmt); + + // Fetch data + size_t misaligned = ((row_cnt % row_fetch_cnt) != 0); + size_t exp_read_cnt = (row_cnt / row_fetch_cnt) + misaligned; + ExtendedFetch(row_fetch_cnt, exp_read_cnt, (bool)!misaligned, row_cnt, + hstmt); + CloseCursor(hstmt, true, true); +} + +inline void QueryBindFetch(const size_t row_cnt, const size_t col_cnt, + const std::wstring& column_name, SQLHSTMT* hstmt) { + std::vector< std::vector< Col > > cols(col_cnt); + QueryBind(row_cnt, col_cnt, 1, column_name, cols, hstmt); + + // Fetch data + Fetch(row_cnt, hstmt); + CloseCursor(hstmt, true, true); +} + +void QueryFetch(const std::wstring& column, const std::wstring& dataset, + const std::wstring& count, SQLHSTMT* hstmt) { + ExecuteQuery(column, dataset, count, hstmt); + SQLRETURN ret = SQLFetch(*hstmt); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); +} + +template < class T > +void CheckData(const std::wstring& type_name, const std::wstring& data_set, + const std::wstring row, SQLHSTMT* hstmt, + const SQLUSMALLINT ordinal_pos, const SQLUSMALLINT type, + const std::vector< T >& expected_val, const SQLLEN data_size) { + QueryFetch(type_name, data_set, row, hstmt); + T val; + SQLLEN out_size; + const SQLRETURN ret = + SQLGetData(*hstmt, ordinal_pos, type, &val, data_size, &out_size); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + bool valid = false; + for (size_t i = 0; i < expected_val.size(); i++) { + valid |= (val == expected_val[i]); + if (valid) + break; + } + EXPECT_TRUE(valid); +} + +template <> +void CheckData< float >(const std::wstring& type_name, + const std::wstring& data_set, const std::wstring row, + SQLHSTMT* hstmt, const SQLUSMALLINT ordinal_pos, + const SQLUSMALLINT type, + const std::vector< float >& expected_val, + const SQLLEN data_size) { + QueryFetch(type_name, data_set, row, hstmt); + float val; + SQLLEN out_size; + const SQLRETURN ret = + SQLGetData(*hstmt, ordinal_pos, type, &val, data_size, &out_size); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + bool valid = false; + for (auto& it : expected_val) { + valid |= FuzzyEquals(val, it, 0.1f); + if (valid) + break; + } +} + +template <> +void CheckData< double >(const std::wstring& type_name, + const std::wstring& data_set, const std::wstring row, + SQLHSTMT* hstmt, const SQLUSMALLINT ordinal_pos, + const SQLUSMALLINT type, + const std::vector< double >& expected_val, + const SQLLEN data_size) { + QueryFetch(type_name, data_set, row, hstmt); + double val; + SQLLEN out_size; + const SQLRETURN ret = + SQLGetData(*hstmt, ordinal_pos, type, &val, data_size, &out_size); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + bool valid = false; + for (auto& it : expected_val) { + valid |= FuzzyEquals(val, it, 0.1); + if (valid) + break; + } + EXPECT_TRUE(valid); +} + +template <> +void CheckData< std::wstring >(const std::wstring& type_name, + const std::wstring& data_set, + const std::wstring row, SQLHSTMT* hstmt, + const SQLUSMALLINT ordinal_pos, + const SQLUSMALLINT type, + const std::vector< std::wstring >& expected_val, + const SQLLEN data_size) { + QueryFetch(type_name, data_set, row, hstmt); + std::vector< SQLTCHAR > val(data_size); + bool valid = false; + for (auto& it : expected_val) { + std::wstring str; + SQLLEN out_size; + while (SQLGetData(*hstmt, ordinal_pos, type, val.data(), + data_size * sizeof(SQLTCHAR), &out_size) + == SQL_SUCCESS_WITH_INFO) { + str += val.data(); + } + valid |= (str == it); + if (valid) + break; + } +} + +template <> +void CheckData< TIMESTAMP_STRUCT >( + const std::wstring& type_name, const std::wstring& data_set, + const std::wstring row, SQLHSTMT* hstmt, const SQLUSMALLINT ordinal_pos, + const SQLUSMALLINT type, + const std::vector< TIMESTAMP_STRUCT >& expected_val, + const SQLLEN data_size) { + auto compare_ts_struct = [](const TIMESTAMP_STRUCT& x, + const TIMESTAMP_STRUCT& y) { + return ((x.year == y.year) && (x.month == y.month) && (x.day == y.day) + && (x.hour == y.hour) && (x.minute == y.minute) + && (x.second == y.second) && (x.fraction == y.fraction)); + }; + QueryFetch(type_name, data_set, row, hstmt); + TIMESTAMP_STRUCT val; + SQLLEN out_size; + const SQLRETURN ret = + SQLGetData(*hstmt, ordinal_pos, type, &val, data_size, &out_size); + LogAnyDiagnostics(SQL_HANDLE_STMT, *hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + bool valid = false; + for (size_t i = 0; i < expected_val.size(); i++) { + valid |= compare_ts_struct(val, expected_val[i]); + if (valid) + break; + } + EXPECT_TRUE(valid); +} + +#define GET_DATA_TEST(type, name, c_type, val_array, sz) \ + TEST_F(TestSQLGetData, type) { \ + for (uint32_t i = 0; i < data_cnt; i++) { \ + CheckData(name, multi_type_data_set, single_row_offsets[i], \ + &m_hstmt, m_single_column_ordinal_position, \ + (SQLUSMALLINT)c_type, val_array, sz); \ + if (i != (data_cnt - 1)) \ + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); \ + } \ + } + +class TestSQLBindCol : public testing::Test { + public: + TestSQLBindCol() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + + ~TestSQLBindCol() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLFetch : public testing::Test { + public: + TestSQLFetch() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLFetch() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLExtendedFetch : public testing::Test { + public: + TestSQLExtendedFetch() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLExtendedFetch() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLGetData : public testing::Test { + public: + TestSQLGetData() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLGetData() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + + static const uint16_t m_origin_column_buffer_length = 1024; + SQLUSMALLINT m_single_column_ordinal_position = 1; + + SQLTCHAR m_origin_column_data[m_origin_column_buffer_length]; + SQLLEN m_origin_indicator; +}; + +class TestSQLNumResultCols : public testing::Test { + public: + TestSQLNumResultCols() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLNumResultCols() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + SQLSMALLINT m_column_count; +}; + +class TestSQLMoreResults : public testing::Test { + public: + TestSQLMoreResults() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLMoreResults() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +class TestSQLDescribeCol : public testing::Test { + public: + TestSQLDescribeCol() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLDescribeCol() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; + SQLSMALLINT m_column_number; + SQLTCHAR m_column_name[50]; + SQLSMALLINT m_column_name_length; + SQLSMALLINT m_data_type; + SQLULEN m_column_size; + SQLSMALLINT m_decimal_digits; + SQLSMALLINT m_nullable; +}; + +class TestSQLRowCount : public testing::Test { + public: + TestSQLRowCount() { + } + + void SetUp() { + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, + &m_conn, &m_hstmt, true, true)); + } + + void TearDown() { + if (m_hstmt != SQL_NULL_HSTMT) { + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + SQLFreeHandle(SQL_HANDLE_STMT, m_hstmt); + SQLDisconnect(m_conn); + SQLFreeHandle(SQL_HANDLE_ENV, m_env); + } + } + + ~TestSQLRowCount() { + // cleanup any pending stuff, but no exceptions allowed + } + + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +TEST_F(TestSQLBindCol, SingleColumnSingleBind) { + std::vector< std::vector< Col > > cols(single_col_cnt); + QueryBind(single_row_cnt, single_col_cnt, 1, single_col, cols, &m_hstmt); +} + +TEST_F(TestSQLBindCol, MultiColumnMultiBind) { + std::vector< std::vector< Col > > cols(multi_col_cnt); + QueryBind(single_row_cnt, multi_col_cnt, 1, multi_col, cols, &m_hstmt); +} + +// Looked at SQLBindCol - if < requested column are allocated, it will +// reallocate additional space for that column +TEST_F(TestSQLBindCol, InvalidColIndex0) { + std::vector< std::vector< Col > > cols(single_col_cnt); + SQLRETURN ret = SQLSetStmtAttr(m_hstmt, SQL_ROWSET_SIZE, (void*)1, 0); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_EQ(ret, SQL_SUCCESS); + + std::wstring row_str = std::to_wstring(single_row_cnt); + ExecuteQuery(single_col, flight_data_set, row_str, &m_hstmt); + + for (size_t i = 0; i < cols.size(); i++) { + cols[i].resize(1); + } + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)1, SQL_C_CHAR, + (SQLPOINTER)&cols[0][0].data_dat[0], 255, + &cols[0][0].data_len); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)0, SQL_C_CHAR, + (SQLPOINTER)&cols[0][0].data_dat[0], 255, + &cols[0][0].data_len); + EXPECT_FALSE(SQL_SUCCEEDED(ret)); +} + +TEST_F(TestSQLBindCol, InsufficientSpace) { + SQLRETURN ret = SQLSetStmtAttr(m_hstmt, SQL_ROWSET_SIZE, (void*)1, 0); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_EQ(ret, SQL_SUCCESS); + + std::wstring row_str = std::to_wstring(single_row_cnt); + ExecuteQuery(single_col, flight_data_set, row_str, &m_hstmt); + + SQLLEN length = 0; + std::vector< SQLTCHAR > data_buffer(2); + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)1, SQL_C_CHAR, + (SQLPOINTER)data_buffer.data(), 2, &length); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + SQLULEN row_cnt = 0; + SQLUSMALLINT row_stat = 0; + std::vector< SQLTCHAR > msg_buffer(512); + ret = SQLExtendedFetch(m_hstmt, SQL_FETCH_NEXT, 0, &row_cnt, &row_stat); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret, msg_buffer.data(), 512); + EXPECT_EQ(ret, SQL_SUCCESS_WITH_INFO); + EXPECT_STREQ(msg_buffer.data(), L"Fetched item was truncated."); + // TODO (#110): Improve sample data result checks + const wchar_t* data = + reinterpret_cast< const wchar_t* >(data_buffer.data()); + bool found_expected_data = + wcscmp(data, m_expected_origin_column_data_1.substr(0, 1).c_str()) + || wcscmp(data, m_expected_origin_column_data_2.substr(0, 1).c_str()); + EXPECT_TRUE(found_expected_data); +} + +TEST_F(TestSQLFetch, SingleCol_SingleRow) { + EXPECT_NO_THROW( + QueryBindFetch(single_row_cnt, single_col_cnt, single_col, &m_hstmt)); +} + +TEST_F(TestSQLFetch, SingleCol_MultiRow) { + EXPECT_NO_THROW( + QueryBindFetch(multi_row_cnt, single_col_cnt, single_col, &m_hstmt)); +} + +TEST_F(TestSQLFetch, MultiCol_SingleRow) { + EXPECT_NO_THROW( + QueryBindFetch(single_row_cnt, multi_col_cnt, multi_col, &m_hstmt)); +} + +TEST_F(TestSQLFetch, MultiCol_MultiRow) { + EXPECT_NO_THROW( + QueryBindFetch(multi_row_cnt, multi_col_cnt, multi_col, &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, SingleCol_SingleRow) { + EXPECT_NO_THROW(QueryBindExtendedFetch(single_row_cnt, single_col_cnt, + single_row_rd_cnt, single_col, + &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, SingleCol_MultiRow_SingleFetch) { + EXPECT_NO_THROW(QueryBindExtendedFetch(multi_row_cnt, single_col_cnt, + single_row_rd_cnt, single_col, + &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, SingleCol_MultiRow_MultiFetch_Aligned) { + EXPECT_NO_THROW(QueryBindExtendedFetch(multi_row_cnt, single_col_cnt, + multi_row_rd_cnt_aligned, single_col, + &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, SingleCol_MultiRow_MultiFetch_Misaligned) { + EXPECT_NO_THROW(QueryBindExtendedFetch(multi_row_cnt, single_col_cnt, + multi_row_rd_cnt_misaligned, + single_col, &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, MultiCol_SingleRow) { + EXPECT_NO_THROW(QueryBindExtendedFetch( + single_row_cnt, multi_col_cnt, single_row_rd_cnt, multi_col, &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, MultiCol_MultiRow_SingleFetch) { + EXPECT_NO_THROW(QueryBindExtendedFetch( + multi_row_cnt, multi_col_cnt, single_row_rd_cnt, multi_col, &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, MultiCol_MultiRow_MultiFetch_Aligned) { + EXPECT_NO_THROW(QueryBindExtendedFetch(multi_row_cnt, multi_col_cnt, + multi_row_rd_cnt_aligned, multi_col, + &m_hstmt)); +} + +TEST_F(TestSQLExtendedFetch, MultiCol_MultiRow_MultiFetch_Misaligned) { + EXPECT_NO_THROW(QueryBindExtendedFetch(multi_row_cnt, multi_col_cnt, + multi_row_rd_cnt_misaligned, + multi_col, &m_hstmt)); +} + +TEST_F(TestSQLGetData, GetWVARCHARData) { + QueryFetch(single_col, flight_data_set, single_row, &m_hstmt); + + SQLRETURN ret = + SQLGetData(m_hstmt, m_single_column_ordinal_position, SQL_C_WCHAR, + m_origin_column_data, m_origin_column_buffer_length, + &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); + // TODO (#110): Improve sample data result checks + bool found_expected_data = + wcscmp(m_origin_column_data, m_expected_origin_column_data_1.c_str()) + || wcscmp(m_origin_column_data, + m_expected_origin_column_data_2.c_str()); + EXPECT_TRUE(found_expected_data); +} + +TEST_F(TestSQLGetData, GetFloatData) { + QueryFetch(single_float_col, flight_data_set, single_row, &m_hstmt); + + float data = 0.0f; + SQLRETURN ret = SQLGetData(m_hstmt, m_single_column_ordinal_position, + SQL_C_FLOAT, &data, 4, &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); + // TODO (#110): Improve sample data result checks + printf("%f\n", data); + bool found_expected_data = + (data == distance_miles_1 || data == distance_miles_2); + EXPECT_TRUE(found_expected_data); +} + +TEST_F(TestSQLGetData, GetIntegerData) { + QueryFetch(single_integer_col, flight_data_set, single_row_offset_3, + &m_hstmt); + + int data = 0; + SQLRETURN ret = + SQLGetData(m_hstmt, m_single_column_ordinal_position, SQL_C_LONG, &data, + sizeof(int), &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); + // TODO (#110): Improve sample data result checks + bool found_expected_data = + (data == delay_offset_3_1 || data == delay_offset_3_2); + EXPECT_TRUE(found_expected_data); +} + +TEST_F(TestSQLGetData, GetBitData) { + QueryFetch(single_bit_col, flight_data_set, single_row, &m_hstmt); + + bool data_false; + SQLRETURN ret = SQLGetData(m_hstmt, m_single_column_ordinal_position, + SQL_C_BIT, &data_false, 1, &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); + // TODO (#110): Improve sample data result checks + // Since it can return either true or false, will disable check for now + // EXPECT_FALSE(data_false); + + // Send another query + ASSERT_NO_THROW(CloseCursor(&m_hstmt, true, true)); + QueryFetch(single_bit_col, flight_data_set, single_row_offset_3, &m_hstmt); + + ret = SQLGetData(m_hstmt, m_single_column_ordinal_position, SQL_C_BIT, + &data_false, 1, &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_TRUE(SQL_SUCCEEDED(ret)); + // TODO (#110): Improve sample data result checks + // Since it can return either true or false, will disable check for now + // EXPECT_FALSE(data_false); +} + +GET_DATA_TEST(TypeDataSet_GetBoolData, type_boolean, SQL_C_BIT, + type_boolean_vals, 1) + +GET_DATA_TEST(TypeDataSet_GetByteData, type_byte, SQL_C_SHORT, type_byte_vals, + 2) + +GET_DATA_TEST(TypeDataSet_GetDateData, type_date, SQL_C_TIMESTAMP, + type_date_vals, sizeof(TIMESTAMP_STRUCT)) + +GET_DATA_TEST(TypeDataSet_GetShortData, type_short, SQL_C_SHORT, + type_short_vals, 2) + +GET_DATA_TEST(TypeDataSet_GetIntegerData, type_integer, SQL_C_LONG, + type_integer_vals, 4) + +GET_DATA_TEST(TypeDataSet_GetLongData, type_long, SQL_C_SBIGINT, type_long_vals, + 8) + +GET_DATA_TEST(TypeDataSet_GetHalfFloatData, type_half_float, SQL_C_FLOAT, + type_half_float_vals, 4) + +GET_DATA_TEST(TypeDataSet_GetFloatData, type_float, SQL_C_FLOAT, + type_float_vals, 4) + +GET_DATA_TEST(TypeDataSet_GetDoubleData, type_double, SQL_C_DOUBLE, + type_double_vals, 8) + +GET_DATA_TEST(TypeDataSet_GetScaledFloatData, type_scaled_float, SQL_C_DOUBLE, + type_scaled_float_vals, 8) + +GET_DATA_TEST(TypeDataSet_GetKeywordData, type_keyword, SQL_C_WCHAR, + type_keyword_vals, 512) +GET_DATA_TEST(TypeDataSet_GetKeywordDataMultiReadSingleByte, type_keyword, + SQL_C_WCHAR, type_keyword_vals, 2) +GET_DATA_TEST(TypeDataSet_GetKeywordDataMultiReadMultiByte, type_keyword, + SQL_C_WCHAR, type_keyword_vals, 4) + +GET_DATA_TEST(TypeDataSet_GetTextData, type_text, SQL_C_WCHAR, type_text_vals, + 512) +GET_DATA_TEST(TypeDataSet_GetTextDataMultiReadSingleByte, type_text, + SQL_C_WCHAR, type_text_vals, 2) +GET_DATA_TEST(TypeDataSet_GetTextDataMultiReadMultiByte, type_text, SQL_C_WCHAR, + type_text_vals, 4) + +TEST_F(TestSQLGetData, SQLSTATE_01004_StringDataRightTruncated) { + SQLRETURN ret; + SQLLEN insufficient_buffer_length = 1; + + ExecuteQuery(single_col, flight_data_set, single_row, &m_hstmt); + + ret = SQLFetch(m_hstmt); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + ret = SQLGetData(m_hstmt, m_single_column_ordinal_position, SQL_C_WCHAR, + m_origin_column_data, insufficient_buffer_length, + &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_EQ(ret, SQL_SUCCESS_WITH_INFO); + EXPECT_TRUE(CheckSQLSTATE(SQL_HANDLE_STMT, m_hstmt, + SQLSTATE_STRING_DATA_RIGHT_TRUNCATED)); +} + +TEST_F(TestSQLGetData, SQLSTATE_07009_InvalidDescriptorIndex) { + SQLRETURN ret; + SQLUSMALLINT invalid_column_ordinal_position = 2; + + ExecuteQuery(single_col, flight_data_set, single_row, &m_hstmt); + + ret = SQLFetch(m_hstmt); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + ret = SQLGetData(m_hstmt, invalid_column_ordinal_position, SQL_C_WCHAR, + m_origin_column_data, m_origin_column_buffer_length, + &m_origin_indicator); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + EXPECT_EQ(ret, SQL_ERROR); + EXPECT_TRUE(CheckSQLSTATE(SQL_HANDLE_STMT, m_hstmt, + SQLSTATE_INVALID_DESCRIPTOR_INDEX)); +} + +TEST_F(TestSQLNumResultCols, SingleColumn) { + std::wstring row_str = std::to_wstring(single_row_cnt); + ExecuteQuery(single_col, flight_data_set, row_str, &m_hstmt); + + EXPECT_EQ(SQL_SUCCESS, SQLNumResultCols(m_hstmt, &m_column_count)); + EXPECT_EQ(single_col_cnt, (size_t)m_column_count); +} + +TEST_F(TestSQLNumResultCols, MultiColumn) { + std::wstring row_str = std::to_wstring(multi_row_cnt); + ExecuteQuery(multi_col, flight_data_set, row_str, &m_hstmt); + + EXPECT_EQ(SQL_SUCCESS, SQLNumResultCols(m_hstmt, &m_column_count)); + EXPECT_EQ(multi_col_cnt, (size_t)m_column_count); +} + +TEST_F(TestSQLDescribeCol, SingleColumnMetadata) { + ExecuteQuery(single_col, flight_data_set, single_row, &m_hstmt); + + EXPECT_EQ( + SQL_SUCCESS, + SQLDescribeCol(m_hstmt, 1, m_column_name, IT_SIZEOF(m_column_name), + &m_column_name_length, &m_data_type, &m_column_size, + &m_decimal_digits, &m_nullable)); + EXPECT_EQ(single_col, m_column_name); + EXPECT_EQ(single_col_name_length, m_column_name_length); + EXPECT_EQ(single_col_data_type, m_data_type); + EXPECT_EQ(single_col_column_size, m_column_size); + EXPECT_EQ(single_col_decimal_digit, m_decimal_digits); + EXPECT_EQ(single_col_nullable, m_nullable); +} + +TEST_F(TestSQLDescribeCol, MultiColumnMetadata) { + ExecuteQuery(multi_col, flight_data_set, multi_row, &m_hstmt); + + for (SQLUSMALLINT i = 1; i <= multi_col_cnt; i++) { + EXPECT_EQ( + SQL_SUCCESS, + SQLDescribeCol(m_hstmt, i, m_column_name, IT_SIZEOF(m_column_name), + &m_column_name_length, &m_data_type, &m_column_size, + &m_decimal_digits, &m_nullable)); + } +} + +TEST_F(TestSQLDescribeCol, MultiColumnNameLengthType) { + ExecuteQuery(multi_col, flight_data_set, multi_row, &m_hstmt); + + for (SQLUSMALLINT i = 1; i <= multi_col_cnt; i++) { + EXPECT_EQ( + SQL_SUCCESS, + SQLDescribeCol(m_hstmt, i, m_column_name, IT_SIZEOF(m_column_name), + &m_column_name_length, &m_data_type, &m_column_size, + &m_decimal_digits, &m_nullable)); + EXPECT_EQ(column_data[i - 1].column_name, std::wstring(m_column_name)); + EXPECT_EQ(column_data[i - 1].data_type, m_data_type); + } +} + +TEST_F(TestSQLDescribeCol, InvalidColumnMetadata) { + ExecuteQuery(multi_col, flight_data_set, multi_row, &m_hstmt); + + EXPECT_EQ(SQL_ERROR, + SQLDescribeCol(m_hstmt, multi_col_cnt + 1, m_column_name, + IT_SIZEOF(m_column_name), &m_column_name_length, + &m_data_type, &m_column_size, &m_decimal_digits, + &m_nullable)); + EXPECT_TRUE(CheckSQLSTATE(SQL_HANDLE_STMT, m_hstmt, + SQLSTATE_INVALID_DESCRIPTOR_INDEX)); +} + +TEST_F(TestSQLMoreResults, NoData) { + SQLRETURN ret = SQLMoreResults(m_hstmt); + EXPECT_EQ(SQL_NO_DATA, ret); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); +} + +// Row count is not supported for the driver, so this should return -1, +// as defined in the ODBC API. +TEST_F(TestSQLRowCount, RowCountNotAvailable) { + SQLLEN row_count; + SQLRETURN ret = SQLRowCount(m_hstmt, &row_count); + EXPECT_EQ(SQL_SUCCESS, ret); + EXPECT_EQ(row_count, -1L); +} + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_results > leaks_itodbc_results"); +#endif + return failures; +} diff --git a/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/CMakeLists.txt b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/CMakeLists.txt new file mode 100644 index 0000000000..1203c17661 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(itodbc_tableau_queries) + +# Source, headers, and include dirs +set(SOURCE_FILES test_odbc_tableau_queries.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(itodbc_tableau_queries ${SOURCE_FILES}) + +# Library dependencies +target_code_coverage(itodbc_tableau_queries PUBLIC AUTO ALL) +target_link_libraries(itodbc_tableau_queries odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(itodbc_tableau_queries PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/packages.config b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/pch.cpp b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/pch.h b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/queries_all.txt b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/queries_all.txt new file mode 100644 index 0000000000..8fb3223732 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/queries_all.txt @@ -0,0 +1,216 @@ +SELECT SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024) AS `OriginWeather` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT SUM(`kibana_sample_data_flights`.`FlightDelayMin`) AS `sum_Offset_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`FlightDelay`) AS `sum_FlightDelay_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`DistanceMiles`) AS `sum_DistanceMiles_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT YEAR(`kibana_sample_data_flights`.`timestamp`) AS `yr_timestamp_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT SUM(ABS(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ACOS(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252358221825_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ASIN(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252358545410_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ATAN(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252358811651_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ATAN2(`kibana_sample_data_flights`.`dayOfWeek`,`kibana_sample_data_flights`.`FlightDelayMin`)) AS `sum_Calculation_160722252358811651_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(COS(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(COT(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(DEGREES(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM((`kibana_sample_data_flights`.`dayOfWeek` DIV `kibana_sample_data_flights`.`FlightDelayMin`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(EXP(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM((((CASE WHEN (ABS((`kibana_sample_data_flights`.`FlightDelayMin`) - (ROUND( ( (`kibana_sample_data_flights`.`FlightDelayMin`) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((`kibana_sample_data_flights`.`dayOfWeek`) - (ROUND( ( (`kibana_sample_data_flights`.`dayOfWeek`) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN 1.5 ELSE 0.0 END) - (CASE WHEN ((`kibana_sample_data_flights`.`dayOfWeek`) - (ROUND( ( (`kibana_sample_data_flights`.`dayOfWeek`) / 3.0 ), 0 ) * 3.0) < 0.0) AND ((CASE WHEN (ABS((`kibana_sample_data_flights`.`FlightDelayMin`) - (ROUND( ( (`kibana_sample_data_flights`.`FlightDelayMin`) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((`kibana_sample_data_flights`.`dayOfWeek`) - (ROUND( ( (`kibana_sample_data_flights`.`dayOfWeek`) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN SQRT(3.0) / 2.0 ELSE 0.0 END) > 0.0) THEN 3.0 ELSE 0.0 END)) + (ROUND( ( (`kibana_sample_data_flights`.`dayOfWeek`) / 3.0 ), 0 ) * 3.0))) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ROUND( (((CASE WHEN (ABS((`kibana_sample_data_flights`.`FlightDelayMin`) - (ROUND( ( (`kibana_sample_data_flights`.`FlightDelayMin`) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((`kibana_sample_data_flights`.`dayOfWeek`) - (ROUND( ( (`kibana_sample_data_flights`.`dayOfWeek`) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN SQRT(3.0) / 2.0 ELSE 0.0 END) - (CASE WHEN ((`kibana_sample_data_flights`.`FlightDelayMin`) - (ROUND( ( (`kibana_sample_data_flights`.`FlightDelayMin`) / SQRT(3.0) ), 0 ) * SQRT(3.0)) < 0.0) AND ((CASE WHEN (ABS((`kibana_sample_data_flights`.`FlightDelayMin`) - (ROUND( ( (`kibana_sample_data_flights`.`FlightDelayMin`) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((`kibana_sample_data_flights`.`dayOfWeek`) - (ROUND( ( (`kibana_sample_data_flights`.`dayOfWeek`) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN SQRT(3.0) / 2.0 ELSE 0.0 END) > 0.0) THEN SQRT(3.0) ELSE 0.0 END)) + (ROUND( ( (`kibana_sample_data_flights`.`FlightDelayMin`) / SQRT(3.0) ), 0 ) * SQRT(3.0))), 3)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(LOG(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM((LOG(`kibana_sample_data_flights`.`dayOfWeek`)/LOG(10))) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MAX(`kibana_sample_data_flights`.`dayOfWeek`) AS `usr_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MIN(`kibana_sample_data_flights`.`dayOfWeek`) AS `usr_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM((CASE WHEN `kibana_sample_data_flights`.`dayOfWeek` >= 0 OR FLOOR(2) = 2 THEN POWER(`kibana_sample_data_flights`.`dayOfWeek`,2) END)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(RADIANS(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ROUND(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(SIGN(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(SIN(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(SQRT(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(POWER(`kibana_sample_data_flights`.`dayOfWeek`, 2)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(TAN(`kibana_sample_data_flights`.`dayOfWeek`)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(ASCII(SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024))) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT `kibana_sample_data_flights`.`Dest` AS `Dest` FROM `kibana_sample_data_flights` WHERE ((`kibana_sample_data_flights`.`Dest` = 'caching_sha2_password') AND (LOCATE('in',LOWER(`kibana_sample_data_flights`.`Dest`)) > 0)) GROUP BY 1 +SELECT SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024) AS `OriginWeather` FROM `kibana_sample_data_flights` WHERE (RIGHT(RTRIM(LOWER(SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024))), LENGTH('.')) = '.') GROUP BY 1 +SELECT SUM(IF(ISNULL(1), NULL, LOCATE('Cape',`kibana_sample_data_flights`.`Origin`,GREATEST(1,FLOOR(1))))) AS `sum_Calculation_462181953493630977_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT (CASE WHEN 3 >= 0 THEN LEFT(`kibana_sample_data_flights`.`Origin`,3) ELSE NULL END) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT LENGTH(`kibana_sample_data_flights`.`Origin`) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT LOWER(`kibana_sample_data_flights`.`Origin`) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT LTRIM(`kibana_sample_data_flights`.`Origin`) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT MAX(`kibana_sample_data_flights`.`Origin`) AS `usr_Calculation_462181953493630977_nk` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT IF(ISNULL(0), NULL, SUBSTRING(`kibana_sample_data_flights`.`Origin`,GREATEST(1,FLOOR(0)),FLOOR(5))) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT MIN(`kibana_sample_data_flights`.`Origin`) AS `usr_Calculation_462181953493630977_nk` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT REPLACE(`kibana_sample_data_flights`.`Origin`,'Airport','') AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN 3 >= 0 THEN RIGHT(`kibana_sample_data_flights`.`Origin`,3) ELSE NULL END) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT RTRIM(`kibana_sample_data_flights`.`Origin`) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT IF(`kibana_sample_data_flights`.`AvgTicketPrice` >= 0, SPACE(FLOOR(`kibana_sample_data_flights`.`AvgTicketPrice`)), NULL) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT TRIM(LEADING '-' FROM TRIM(LEADING SUBSTRING_INDEX(`kibana_sample_data_flights`.`Origin`, '-', (2 - 1)) FROM SUBSTRING_INDEX(`kibana_sample_data_flights`.`Origin`, '-', 2))) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024) AS `OriginWeather` FROM `kibana_sample_data_flights` WHERE (LEFT(LTRIM(LOWER(SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024))), LENGTH('$')) = '$') GROUP BY 1 +SELECT TRIM(`kibana_sample_data_flights`.`Origin`) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT UPPER(`kibana_sample_data_flights`.`Origin`) AS `Calculation_462181953493630977` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT ADDDATE( DATE_FORMAT( DATE(`kibana_sample_data_flights`.`password_last_changed`), '%Y-01-01 00:00:00' ), INTERVAL 0 SECOND ) AS `tyr_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT YEAR(DATE(`kibana_sample_data_flights`.`timestamp`)) AS `yr_Calculation_462181953481519104_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (YEAR(`kibana_sample_data_flights`.`timestamp`) - YEAR(DATE('1990-01-01'))) AS `Calculation_1706301351891775489` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT MONTHNAME(`kibana_sample_data_flights`.`timestamp`) AS `Calculation_1706301351891775489` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT YEAR(TIMESTAMP(STR_TO_DATE('5.April.2004', '%d.%i.%Y'))) AS `yr_Calculation_462181953481519104_ok` FROM `kibana_sample_data_flights` +SELECT YEAR(ADDDATE( CONCAT( DATE_FORMAT( `kibana_sample_data_flights`.`timestamp`, '%Y-' ), (3*(QUARTER(`kibana_sample_data_flights`.`timestamp`)-1)+1), '-01 00:00:00' ), INTERVAL 0 SECOND )) AS `yr_Calculation_1706301351891775489_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT DAYOFMONTH(`kibana_sample_data_flights`.`timestamp`) AS `Calculation_462181953481519104` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT 2019 AS `yr_Calculation_462181953481519104_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT YEAR(ADDTIME(CAST(CAST(`kibana_sample_data_flights`.`timestamp` AS DATE) AS DATETIME), TIME(`kibana_sample_data_flights`.`timestamp`))) AS `yr_Calculation_1706301351891775489_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT YEAR(MAKETIME(`kibana_sample_data_flights`.`dayOfWeek`, `kibana_sample_data_flights`.`dayOfWeek`, `kibana_sample_data_flights`.`dayOfWeek`)) AS `yr_Calculation_1706301351891775489_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT MAX(`kibana_sample_data_flights`.`timestamp`) AS `max_timestamp_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MIN(`kibana_sample_data_flights`.`timestamp`) AS `min_timestamp_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MONTH(`kibana_sample_data_flights`.`timestamp`) AS `mn_timestamp_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT YEAR(NOW()) AS `yr_Calculation_462181953481519104_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT YEAR(CURDATE()) AS `yr_Calculation_462181953481519104_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT YEAR(`kibana_sample_data_flights`.`timestamp`) AS `yr_timestamp_ok` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT ((`kibana_sample_data_flights`.`Origin` = 'Frankfurt am Main Airport') AND (`kibana_sample_data_flights`.`Dest` = 'Sydney Kingsford Smith International Airport')) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE `kibana_sample_data_flights`.`OriginWeather` WHEN 'Sunny' THEN '1' WHEN 'Rain' THEN '0' ELSE 'NA' END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN (`kibana_sample_data_flights`.`FlightDelay` = 0) THEN 'No delay' ELSE CAST(NULL AS CHAR(1)) END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN (`kibana_sample_data_flights`.`FlightDelay` = 0) THEN 'No delay' WHEN (`kibana_sample_data_flights`.`FlightDelay` = 1) THEN 'Delay' ELSE CAST(NULL AS CHAR(1)) END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (RIGHT(RTRIM(`kibana_sample_data_flights`.`Origin`), LENGTH('Airport')) = 'Airport') AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN (`kibana_sample_data_flights`.`FlightDelay` = 0) THEN 'No delay' ELSE CAST(NULL AS CHAR(1)) END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT IFNULL(`kibana_sample_data_flights`.`Cancelled`, `kibana_sample_data_flights`.`AvgTicketPrice`) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN (`kibana_sample_data_flights`.`AvgTicketPrice` > 500) THEN 'High' WHEN NOT (`kibana_sample_data_flights`.`AvgTicketPrice` > 500) THEN 'Low' ELSE NULL END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (NOT ISNULL(DATE_FORMAT(`kibana_sample_data_flights`.`Origin`, '%Y'))) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT ISNULL(`kibana_sample_data_flights`.`FlightNum`) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT MAX(`kibana_sample_data_flights`.`dayOfWeek`) AS `max_max_questions_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MIN(`kibana_sample_data_flights`.`dayOfWeek`) AS `min_max_questions_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT (NOT ISNULL(DATE_FORMAT(`kibana_sample_data_flights`.`Origin`, '%Y'))) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT ((`kibana_sample_data_flights`.`Origin` = 'Frankfurt am Main Airport') OR (`kibana_sample_data_flights`.`Dest` = 'Sydney Kingsford Smith International Airport')) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN (`kibana_sample_data_flights`.`AvgTicketPrice` > 500) THEN 'High' WHEN NOT (`kibana_sample_data_flights`.`AvgTicketPrice` > 500) THEN 'Low' ELSE NULL END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT (CASE WHEN (`kibana_sample_data_flights`.`AvgTicketPrice` > 500) THEN 'High' WHEN NOT (`kibana_sample_data_flights`.`AvgTicketPrice` > 500) THEN 'Low' ELSE NULL END) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT IFNULL(`kibana_sample_data_flights`.`FlightDelay`, 0) AS `Calculation_462181953506873347` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT MIN(`kibana_sample_data_flights`.`Origin`) AS `TEMP(Calculation_462181953504628738)(2376748618)(0)`, MAX(`kibana_sample_data_flights`.`Origin`) AS `TEMP(Calculation_462181953504628738)(2968235173)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT AVG(`kibana_sample_data_flights`.`FlightDelayMin`) AS `avg_max_user_connections_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(1) AS `cnt_max_user_connections_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MAX(`kibana_sample_data_flights`.`max_questions`) AS `max_max_questions_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MIN(`kibana_sample_data_flights`.`dayOfWeek`) AS `min_max_questions_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM((`kibana_sample_data_flights`.`AvgTicketPrice` * `kibana_sample_data_flights`.`AvgTicketPrice`)) AS `TEMP(Calculation_462181953506873347)(1705728846)(0)`, SUM(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(Calculation_462181953506873347)(2465277995)(0)`, COUNT(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(Calculation_462181953506873347)(2633997250)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT COUNT(`kibana_sample_data_flights`.`DistanceMiles`) AS `TEMP(Calculation_462181953506873347)(2070533874)(0)`, SUM(`kibana_sample_data_flights`.`DistanceMiles`) AS `TEMP(Calculation_462181953506873347)(3496560911)(0)`, SUM((`kibana_sample_data_flights`.`DistanceMiles` * `kibana_sample_data_flights`.`DistanceMiles`)) AS `TEMP(Calculation_462181953506873347)(3595387140)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `usr_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(Calculation_462181953506873347)(105357904)(0)`, COUNT(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(Calculation_462181953506873347)(2584840543)(0)`, SUM(((`kibana_sample_data_flights`.`dayOfWeek` + 0.0) * (`kibana_sample_data_flights`.`dayOfWeek` + 0.0))) AS `TEMP(Calculation_462181953506873347)(3340567470)(0)` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT SUM(`kibana_sample_data_flights`.`DistanceKilometers`) AS `TEMP(Calculation_462181953506873347)(1474522238)(0)`, COUNT(`kibana_sample_data_flights`.`DistanceKilometers`) AS `TEMP(Calculation_462181953506873347)(2841334535)(0)`, SUM((`kibana_sample_data_flights`.`DistanceKilometers` * `kibana_sample_data_flights`.`DistanceKilometers`)) AS `TEMP(Calculation_462181953506873347)(461715975)(0)` FROM `kibana_sample_data_flights` GROUP BY 1 +SELECT SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024) AS `OriginWeather` FROM `kibana_sample_data_flights` WHERE (SUBSTRING(`kibana_sample_data_flights`.`OriginWeather`, 1, 1024) = 'ABC') GROUP BY 1 +SELECT SUM((CASE \tWHEN ISNULL(`kibana_sample_data_flights`.`dayOfWeek`) THEN NULL \tWHEN ISNULL(10) THEN NULL \tELSE GREATEST(`kibana_sample_data_flights`.`dayOfWeek`, 10) END)) AS `sum_Calculation_160722252357632000_ok` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT `kibana_sample_data_flights`.`AvgTicketPrice` AS `AvgTicketPrice`, `kibana_sample_data_flights`.`Cancelled` AS `Cancelled`, `kibana_sample_data_flights`.`Carrier` AS `Carrier`, `kibana_sample_data_flights`.`DestAirportID` AS `DestAirportID`, `kibana_sample_data_flights`.`DestCityName` AS `DestCityName`, `kibana_sample_data_flights`.`DestCountry` AS `DestCountry`, `kibana_sample_data_flights`.`DestLocation` AS `DestLocation`, `kibana_sample_data_flights`.`DestRegion` AS `Dest~~~<<>>~~~e`.`kibana_sample_data_flights` AS `kibana_sample_data_flights` FROM `kibana_sample_data_ecommerce` LEFT JOIN `kibana_sample_data_flights` ON (`kibana_sample_data_ecommerce`.`day_of_week_i` = `kibana_sample_data_flights`.`dayOfWeek`) LIMIT 1000 +SELECT `kibana_sample_data_flights`.`AvgTicketPrice` AS `AvgTicketPrice`, `kibana_sample_data_flights`.`Cancelled` AS `Cancelled`, `kibana_sample_data_flights`.`Carrier` AS `Carrier`, `kibana_sample_data_flights`.`DestAirportID` AS `DestAirportID`, `kibana_sample_data_flights`.`DestCityName` AS `DestCityName`, `kibana_sample_data_flights`.`DestCountry` AS `DestCountry`, `kibana_sample_data_flights`.`DestLocation` AS `DestLocation`, `kibana_sample_data_flights`.`DestRegion` AS `Dest~~~<<>>~~~`.`kibana_sample_data_flights` AS `kibana_sample_data_flights` FROM `kibana_sample_data_ecommerce` RIGHT JOIN `kibana_sample_data_flights` ON (`kibana_sample_data_ecommerce`.`day_of_week_i` = `kibana_sample_data_flights`.`dayOfWeek`) LIMIT 1000 +SELECT `kibana_sample_data_flights`.`OriginCityName` AS `OriginCityName` FROM `kibana_sample_data_flights` GROUP BY 1 ORDER BY `OriginCityName` ASC +SELECT `kibana_sample_data_flights`.`OriginCityName` AS `OriginCityName` FROM `kibana_sample_data_flights` GROUP BY 1 ORDER BY `OriginCityName` ASC +SELECT `kibana_sample_data_flights`.`DestCityName` AS `DestCityName`, SUM(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `$__alias__0` FROM `kibana_sample_data_flights` GROUP BY 1 ORDER BY `$__alias__0` DESC, `DestCityName` ASC LIMIT 10 +SELECT 'DESKTOP-7APIVOE\\\\Rupal' AS `Calculation_1122522251639717888` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT 0 AS `Calculation_1122522251639717888` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT 0 AS `Calculation_1122522251639717888` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000002 +SELECT 1 AS `Calculation_1122522251639717888` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT 'DESKTOP-7APIVOE' AS `Calculation_1122522251639717888` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT 'ABC' AS `Calculation_1122522251639717888` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MAX(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TC_)(3575797393)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(TEMP(TC_)(4001152001)(0))(105357904)(0)`, COUNT(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(TEMP(TC_)(4001152001)(0))(2584840543)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MIN(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TC_)(2076389572)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TEMP(TC_)(4079199159)(0))(2465277995)(0)`, COUNT(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TEMP(TC_)(4079199159)(0))(2633997250)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TC_)(2465277995)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT COUNT(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TC_)(2633997250)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MAX(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(TC_)(718966039)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT MIN(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(TC_)(2462140059)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT SUM(`kibana_sample_data_flights`.`dayOfWeek`) AS `TEMP(TC_)(105357904)(0)`, SUM(`kibana_sample_data_flights`.`AvgTicketPrice`) AS `TEMP(TC_)(2465277995)(0)` FROM `kibana_sample_data_flights` GROUP BY 1.1000000000000001 +SELECT 1 AS `empty` FROM `kibana_sample_data_flights` +SELECT substring(OriginWeather, 1, 2) AS OriginWeather FROM kibana_sample_data_flights +SELECT SUM(FlightDelayMin) AS sum_FlightDelayMin_ok FROM kibana_sample_data_flights +SELECT SUM(FlightDelay) AS sum_FlightDelay_ok FROM kibana_sample_data_flights +SELECT SUM(DistanceMiles) AS sum_DistanceMiles_ok FROM kibana_sample_data_flights +SELECT year(timestamp) AS yr_timestamp_ok FROM kibana_sample_data_flights +SELECT abs(AvgTicketPrice) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT acos(FlightDelayMin) AS sum_Calculation_160722252358221825_ok FROM kibana_sample_data_flights +SELECT asin(FlightDelayMin) AS sum_Calculation_160722252358545410_ok FROM kibana_sample_data_flights +SELECT atan(FlightDelayMin) AS sum_Calculation_160722252358811651_ok FROM kibana_sample_data_flights +SELECT atan2(FlightDelayMin,dayOfWeek) AS sum_Calculation_160722252358811651_ok FROM kibana_sample_data_flights +SELECT SUM(FlightDelayMin) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT cos(FlightDelayMin) AS sum_Calculation_160722252358221825_ok FROM kibana_sample_data_flights +SELECT cot(AvgTicketPrice) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT degrees(FlightDelayMin) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT FlightDelayMin div AvgTicketPrice AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT exp(FlightDelayMin) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT SUM(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT SUM((((CASE WHEN (ABS((AvgTicketPrice) - (ROUND( ( (AvgTicketPrice) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((FlightDelayMin) - (ROUND( ( (FlightDelayMin) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN 1.5 ELSE 0.0 END) - (CASE WHEN ((FlightDelayMin) - (ROUND( ( (FlightDelayMin) / 3.0 ), 0 ) * 3.0) < 0.0) AND ((CASE WHEN (ABS((AvgTicketPrice) - (ROUND( ( (AvgTicketPrice) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((FlightDelayMin) - (ROUND( ( (FlightDelayMin) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN SQRT(3.0) / 2.0 ELSE 0.0 END) > 0.0) THEN 3.0 ELSE 0.0 END)) + (ROUND( ( (FlightDelayMin) / 3.0 ), 0 ) * 3.0))) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT SUM(ROUND( (((CASE WHEN (ABS((AvgTicketPrice) - (ROUND( ( (AvgTicketPrice) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((FlightDelayMin) - (ROUND( ( (FlightDelayMin) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN SQRT(3.0) / 2.0 ELSE 0.0 END) - (CASE WHEN ((AvgTicketPrice) - (ROUND( ( (AvgTicketPrice) / SQRT(3.0) ), 0 ) * SQRT(3.0)) < 0.0) AND ((CASE WHEN (ABS((AvgTicketPrice) - (ROUND( ( (AvgTicketPrice) / SQRT(3.0) ), 0 ) * SQRT(3.0)))) + SQRT(3.0) * ((ABS((FlightDelayMin) - (ROUND( ( (FlightDelayMin) / 3.0 ), 0 ) * 3.0))) - 1.0) > 0.0 THEN SQRT(3.0) / 2.0 ELSE 0.0 END) > 0.0) THEN SQRT(3.0) ELSE 0.0 END)) + (ROUND( ( (AvgTicketPrice) / SQRT(3.0) ), 0 ) * SQRT(3.0))), 3)) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT log(FlightDelayMin) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT (log(FlightDelayMin)/log(10)) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT MAX(FlightDelayMin) AS usr_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT MIN(FlightDelayMin) AS usr_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT sum((case when dayOfWeek >= 0 or floor(2) = 2 then power(dayOfWeek,2) end)) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT radians(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT round(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT sign(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT sin(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT sqrt(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT power(dayOfWeek, 2) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT tan(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT SUM(dayOfWeek) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT ascii(substring(OriginWeather, 1, 5)) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT Dest, locate('air',Dest) FROM kibana_sample_data_flights +SELECT substring(OriginWeather, 1, 1024) AS OriginWeather FROM kibana_sample_data_flights WHERE (right(rtrim(lower(substring(OriginWeather, 1, 5))), length('.')) ='.') +SELECT sum(if(isnull(1), null, locate('Cape',Origin,greatest(1,floor(1))))) AS sum_Calculation_462181953493630977_ok FROM kibana_sample_data_flights +SELECT (case when 3 >= 0 then left(Origin,3) else null end) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT length(Origin) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT lower(Origin) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT ltrim(Origin) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT max(Origin) AS usr_Calculation_462181953493630977_nk FROM kibana_sample_data_flights +SELECT if(isnull(0), null, substring(Origin,greatest(1,floor(0)),floor(5))) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT min(Origin) AS usr_Calculation_462181953493630977_nk FROM kibana_sample_data_flights +SELECT replace(Origin,'Airport','') AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT (case when 3 >= 0 then right(Origin,3) else null end) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT rtrim(Origin) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT if(AvgTicketPrice >= 0, space(floor(AvgTicketPrice)), null) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT trim(leading '-' FROM trim(leading substring(Origin, '-', (2 - 1)) FROM substring_index(Origin, '-', 2))) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT substring(OriginWeather, 1, 5) AS OriginWeather FROM kibana_sample_data_flights where (left(ltrim(lower(substring(OriginWeather, 1, 5))), length('$')) = '$') +SELECT trim(Origin) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT upper(Origin) AS Calculation_462181953493630977 FROM kibana_sample_data_flights +SELECT adddate( date_format( date(timestamp), '%Y-01-01 00:00:00' ), interval 0 second ) AS tyr_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT year(date(timestamp)) AS yr_Calculation_462181953481519104_ok FROM kibana_sample_data_flights +SELECT year(timestamp(str_to_date('5.April.2004', '%d.%i.%Y'))) AS yr_Calculation_462181953481519104_ok FROM kibana_sample_data_flights +SELECT dayofmonth(timestamp) AS Calculation_462181953481519104 FROM kibana_sample_data_flights +SELECT 2019 AS yr_Calculation_462181953481519104_ok FROM kibana_sample_data_flights +SELECT max(timestamp) AS max_timestamp_ok FROM kibana_sample_data_flights +SELECT min(timestamp) AS max_timestamp_ok FROM kibana_sample_data_flights +SELECT month(timestamp) AS mn_timestamp_ok FROM kibana_sample_data_flights +SELECT year(now()) AS yr_Calculation_462181953481519104_ok FROM kibana_sample_data_flights +SELECT year(curdate()) AS yr_Calculation_462181953481519104_ok FROM kibana_sample_data_flights +SELECT year(timestamp) AS yr_timestamp_ok FROM kibana_sample_data_flights +SELECT ((Origin = 'Frankfurt am Main Airport') and (Dest = 'Sydney Kingsford Smith International Airport')) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case OriginWeather when 'Sunny' then '1' when 'Rain' then '0' else 'NA' end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case when (FlightDelay = 0) then 'No delay' else cast(null as char(1)) end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case when (FlightDelay = 0) then 'No delay' when (FlightDelay = 1) then 'Delay' else cast(null as char(1)) end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (right(rtrim(Origin), length('Airport')) = 'Airport') AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case when (FlightDelay = 0) then 'No delay' else cast(null as char(1)) end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT ifnull(Cancelled, AvgTicketPrice) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case when (AvgTicketPrice > 500) THEN 'High' when not (AvgTicketPrice > 500) then 'Low' else null end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (not isnull(date_format(Origin, '%Y'))) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT isnull(FlightNum) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT MAX(AvgTicketPrice) AS max_AvgTicketPrice_ok FROM kibana_sample_data_flights +SELECT MIN(AvgTicketPrice) AS min_AvgTicketPrice_ok FROM kibana_sample_data_flights +SELECT (not isnull(date_format(Origin, '%Y'))) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT ((Origin = 'Frankfurt am Main Airport') or (Dest = 'Sydney Kingsford Smith International Airport')) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case when (AvgTicketPrice > 500) THEN 'High' when not (AvgTicketPrice > 500) then 'Low' else null end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT (case when (AvgTicketPrice > 500) THEN 'High' when not (AvgTicketPrice > 500) then 'Low' else null end) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT ifnull(FlightDelay, 0) AS Calculation_462181953506873347 FROM kibana_sample_data_flights +SELECT min(Origin) AS temp(Calculation_462181953504628738)(2376748618)(0), max(Origin) AS temp(Calculation_462181953504628738)(2968235173)(0) FROM kibana_sample_data_flights +SELECT AVG(dayOfWeek) AS avg_dayOfWeek_ok FROM kibana_sample_data_flights +SELECT SUM(1) AS cnt_dayOfWeek_ok FROM kibana_sample_data_flights +SELECT COUNT(DISTINCT AvgTicketPrice) AS ctd_AvgTicketPrice_ok FROM kibana_sample_data_flights +SELECT MAX(AvgTicketPrice) AS max_AvgTicketPrice_ok FROM kibana_sample_data_flights +SELECT MIN(AvgTicketPrice) AS min_AvgTicketPrice_ok FROM kibana_sample_data_flights +SELECT sum((AvgTicketPrice * AvgTicketPrice)) AS temp(Calculation_462181953506873347)(1705728846)(0), sum(AvgTicketPrice) AS temp(Calculation_462181953506873347)(2465277995)(0), count(AvgTicketPrice) AS temp(Calculation_462181953506873347)(2633997250)(0) FROM kibana_sample_data_flights +SELECT count(DistanceMiles) AS temp(Calculation_462181953506873347)(2070533874)(0), sum(DistanceMiles) AS temp(Calculation_462181953506873347)(3496560911)(0), sum((DistanceMiles * DistanceMiles)) AS temp(Calculation_462181953506873347)(3595387140)(0) FROM kibana_sample_data_flights +SELECT SUM(dayOfWeek) AS usr_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT substring(OriginWeather, 1, 5) AS OriginWeather FROM kibana_sample_data_flights WHERE (substring(OriginWeather, 1, 5) = 'ABC') +SELECT sum((case when isnull(FlightDelayMin) then null when isnull(10) then null else greatest(FlightDelayMin, 10) end)) AS sum_Calculation_160722252357632000_ok FROM kibana_sample_data_flights +SELECT AvgTicketPrice AS AvgTicketPrice, Cancelled AS Cancelled, Carrier AS Carrier, DestAirportID AS DestAirportID, DestCityName AS DestCityName, DestCountry AS DestCountry, DestLocation AS DestLocation FROM kibana_sample_data_ecommerce INNER JOIN kibana_sample_data_flights ON (kibana_sample_data_ecommerce.day_of_week_i = dayOfWeek) LIMIT 1000 +SELECT AvgTicketPrice AS AvgTicketPrice, Cancelled AS Cancelled, Carrier AS Carrier, DestAirportID AS DestAirportID, DestCityName AS DestCityName, DestCountry AS DestCountry, DestLocation AS DestLocation FROM kibana_sample_data_ecommerce LEFT JOIN kibana_sample_data_flights ON (kibana_sample_data_ecommerce.day_of_week_i = dayOfWeek) LIMIT 1000 +SELECT AvgTicketPrice AS AvgTicketPrice, Cancelled AS Cancelled, Carrier AS Carrier, DestAirportID AS DestAirportID, DestCityName AS DestCityName, DestCountry AS DestCountry, DestLocation AS DestLocation FROM kibana_sample_data_ecommerce RIGHT JOIN kibana_sample_data_flights ON (kibana_sample_data_ecommerce.day_of_week_i = dayOfWeek) LIMIT 1000 +SELECT OriginCityName FROM kibana_sample_data_flights ORDER BY OriginCityName ASC +SELECT OriginCityName FROM kibana_sample_data_flights ORDER BY OriginCityName DESC +SELECT DestCityName, SUM(AvgTicketPrice) AS $__alias__0 FROM kibana_sample_data_flights ORDER BY $__alias__0 DESC, DestCityName ASC LIMIT 10 +SELECT AvgTicketPrice AS AvgTicketPrice, Cancelled AS Cancelled, Carrier AS Carrier FROM kibana_sample_data_ecommerce INNER JOIN kibana_sample_data_flights ON (kibana_sample_data_ecommerce.day_of_week_i = dayOfWeek) LIMIT 1000 +SELECT AvgTicketPrice AS AvgTicketPrice, Cancelled AS Cancelled, Carrier AS Carrier FROM kibana_sample_data_ecommerce LEFT JOIN kibana_sample_data_flights ON (kibana_sample_data_ecommerce.day_of_week_i = dayOfWeek) LIMIT 1000 +SELECT AvgTicketPrice AS AvgTicketPrice, Cancelled AS Cancelled, Carrier AS Carrier FROM kibana_sample_data_ecommerce RIGHT JOIN kibana_sample_data_flights ON (kibana_sample_data_ecommerce.day_of_week_i = dayOfWeek) LIMIT 1000 \ No newline at end of file diff --git a/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/test_odbc_tableau_queries.cpp b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/test_odbc_tableau_queries.cpp new file mode 100644 index 0000000000..1002ba3a29 --- /dev/null +++ b/sql-odbc/src/IntegrationTests/ITODBCTableauQueries/test_odbc_tableau_queries.cpp @@ -0,0 +1,97 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +#include +#include +#include +#include +#include +// clang-format on + +const std::string all_queries_file = "queries_all.txt"; +const std::string output_file = "odbc_result.txt"; + +inline void GetAllLinesInFile(const std::string file_name, + std::vector< std::string >& lines) { + std::ifstream file(file_name); + if (file.is_open()) { + std::string line; + while (getline(file, line)) { + lines.push_back(line); + } + file.close(); + } +} + +TEST(TableauQuery, IssueQueriesAll) { + // Get lines from file + std::vector< std::string > lines; + GetAllLinesInFile(all_queries_file, lines); + + // Connect to database + SQLHENV env = SQL_NULL_HENV; + SQLHDBC conn = SQL_NULL_HDBC; + SQLHSTMT stmt = SQL_NULL_HSTMT; + ASSERT_NO_THROW(AllocStatement((SQLTCHAR*)conn_string.c_str(), &env, &conn, + &stmt, true, false)); + + // Execute queries + size_t idx = 1; + std::wofstream output(output_file); + ASSERT_TRUE(output.is_open()); + + for (auto& query : lines) { + SQLRETURN ret = SQLExecDirect(stmt, (SQLTCHAR*)query.c_str(), SQL_NTS); + output << "\"" << idx++ << "\", \""; + output << (SQL_SUCCEEDED(ret) ? "PASS" : "FAIL") << "\", \"" + << query.c_str() << "\"" << std::endl; + if (SQL_SUCCEEDED(ret)) + SQLCloseCursor(stmt); + } + output.close(); + + SQLFreeHandle(SQL_HANDLE_STMT, stmt); + SQLDisconnect(conn); + SQLFreeHandle(SQL_HANDLE_ENV, env); +} + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks itodbc_tableau_queries > leaks_itodbc_tableau_queries"); +#endif + return failures; +} diff --git a/sql-odbc/src/PerformanceTests/CMakeLists.txt b/sql-odbc/src/PerformanceTests/CMakeLists.txt new file mode 100644 index 0000000000..5e1e2e9acc --- /dev/null +++ b/sql-odbc/src/PerformanceTests/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(performance_tests) + +set(RESULTS_PTESTS "${CMAKE_CURRENT_SOURCE_DIR}/PTODBCResults") +set(INFO_PTESTS "${CMAKE_CURRENT_SOURCE_DIR}/PTODBCInfo") +set(EXECUTION_PTESTS "${CMAKE_CURRENT_SOURCE_DIR}/PTODBCExecution") + +# Projects to build +add_subdirectory(${RESULTS_PTESTS}) +add_subdirectory(${INFO_PTESTS}) +add_subdirectory(${EXECUTION_PTESTS}) + diff --git a/sql-odbc/src/PerformanceTests/PTODBCExecution/CMakeLists.txt b/sql-odbc/src/PerformanceTests/PTODBCExecution/CMakeLists.txt new file mode 100644 index 0000000000..6a4a618011 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCExecution/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(performance_execution) + +# Source, headers, and include dirs +set(SOURCE_FILES performance_odbc_execution.cpp) +include_directories(${ODFEODBC_SRC}) + +# Generate executable +add_executable(performance_execution ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(performance_execution odfesqlodbc) +target_compile_definitions(performance_execution PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/PerformanceTests/PTODBCExecution/performance_odbc_execution.cpp b/sql-odbc/src/PerformanceTests/PTODBCExecution/performance_odbc_execution.cpp new file mode 100644 index 0000000000..e1d7942366 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCExecution/performance_odbc_execution.cpp @@ -0,0 +1,76 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "chrono" +#include +#include +#include +#include +// clang-format on +#define IT_SIZEOF(x) (NULL == (x) ? 0 : (sizeof((x)) / sizeof((x)[0]))) +#define ITERATION_COUNT 12 +std::wstring dsn_name = L"DSN=test_dsn"; +const wchar_t* const query = L"SELECT * FROM kibana_sample_data_flights limit 10000"; + +int Setup(SQLHENV* env, SQLHDBC* conn, SQLHSTMT* hstmt) { + SQLTCHAR out_conn_string[1024]; + SQLSMALLINT out_conn_string_length; + if (SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, env)) + && SQL_SUCCEEDED(SQLSetEnvAttr(*env, SQL_ATTR_ODBC_VERSION, (void*)SQL_OV_ODBC3, 0)) + && SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_DBC, *env, conn)) + && SQL_SUCCEEDED(SQLDriverConnect(*conn, NULL, (SQLTCHAR*)dsn_name.c_str(), SQL_NTS, + out_conn_string, IT_SIZEOF(out_conn_string), + &out_conn_string_length, SQL_DRIVER_COMPLETE)) + && SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, *conn, hstmt))) { + return SQL_SUCCESS; + } + return SQL_ERROR; +} + +void Teardown(SQLHDBC* conn, SQLHENV* env) { + SQLDisconnect(*conn); + SQLFreeHandle(SQL_HANDLE_ENV, *env); +} + +int QueryExecutionTime() { + SQLRETURN ret = SQL_ERROR; + try { + SQLHENV env = SQL_NULL_HENV; + SQLHDBC conn = SQL_NULL_HDBC; + SQLHSTMT hstmt = SQL_NULL_HSTMT; + if (SQL_SUCCEEDED(Setup(&env, &conn, &hstmt))) { + std::cout << "Time(ms) for query execution:" << std::endl; + for (int i = 0; i < ITERATION_COUNT; i++) { + // Calculate time(ms) for query execution + auto start = std::chrono::steady_clock::now(); + ret = SQLExecDirect(hstmt, (SQLTCHAR*)query, SQL_NTS); + auto end = std::chrono::steady_clock::now(); + std::cout<< std::chrono::duration_cast< std::chrono::milliseconds >( + end - start).count()<< std::endl; + SQLCloseCursor(hstmt); + } + Teardown(&conn, &env); + } + } catch (...) { + std::cout << "Exception occurred" << std::endl; + } + return ret; +} + +int main() { + return QueryExecutionTime(); +} diff --git a/sql-odbc/src/PerformanceTests/PTODBCInfo/CMakeLists.txt b/sql-odbc/src/PerformanceTests/PTODBCInfo/CMakeLists.txt new file mode 100644 index 0000000000..55f0919778 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCInfo/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(performance_info) + +# Source, headers, and include dirs +set(SOURCE_FILES performance_odbc_info.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} + ${RABBIT_SRC} + ${RAPIDJSON_SRC}) + +# Generate executable +add_executable(performance_info ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(performance_info odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(performance_info PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/PerformanceTests/PTODBCInfo/packages.config b/sql-odbc/src/PerformanceTests/PTODBCInfo/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCInfo/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/PerformanceTests/PTODBCInfo/pch.cpp b/sql-odbc/src/PerformanceTests/PTODBCInfo/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCInfo/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/PerformanceTests/PTODBCInfo/pch.h b/sql-odbc/src/PerformanceTests/PTODBCInfo/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCInfo/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/PerformanceTests/PTODBCInfo/performance_odbc_info.cpp b/sql-odbc/src/PerformanceTests/PTODBCInfo/performance_odbc_info.cpp new file mode 100644 index 0000000000..b5755ea9cc --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCInfo/performance_odbc_info.cpp @@ -0,0 +1,147 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "es_communication.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +#include "chrono" +#include +#include "rabbit.hpp" +#include +#include +// clang-format on + +const std::vector< std::string > base_items = {"name", "cluster_name", + "cluster_uuid"}; +const std::vector< std::string > version_items = { + "number", + "build_flavor", + "build_type", + "build_hash", + "build_date", + "build_snapshot", + "lucene_version", + "minimum_wire_compatibility_version", + "minimum_index_compatibility_version"}; +const std::string sync_start = "%%__PARSE__SYNC__START__%%"; +const std::string sync_sep = "%%__SEP__%%"; +const std::string sync_end = "%%__PARSE__SYNC__END__%%"; + +std::string wstring_to_string(const std::wstring& src) { + return std::wstring_convert< std::codecvt_utf8_utf16< wchar_t >, wchar_t >{} + .to_bytes(src); +} + +runtime_options rt_opts = []() { + runtime_options temp_opts; + for (auto it : conn_str_pair) { + std::wstring tmp = it.first; + std::transform(tmp.begin(), tmp.end(), tmp.begin(), towlower); + if (tmp == L"host") + temp_opts.conn.server = wstring_to_string(it.second); + else if (tmp == L"port") + temp_opts.conn.port = wstring_to_string(it.second); + else if (tmp == L"responsetimeout") + temp_opts.conn.timeout = wstring_to_string(it.second); + else if (tmp == L"auth") + temp_opts.auth.auth_type = wstring_to_string(it.second); + else if (tmp == L"user") + temp_opts.auth.username = wstring_to_string(it.second); + else if (tmp == L"password") + temp_opts.auth.password = wstring_to_string(it.second); + else if (tmp == L"region") + temp_opts.auth.region = wstring_to_string(it.second); + else if (tmp == L"usessl") + temp_opts.crypt.use_ssl = + (std::stoul(it.second, nullptr, 10) ? true : false); + else if (tmp == L"") + temp_opts.crypt.verify_server = + (std::stoul(it.second, nullptr, 10) ? true : false); + } + return temp_opts; +}(); + +void GetVersionInfoString(std::string& version_info) { + // Connect to DB + ESCommunication es_comm; + es_comm.ConnectionOptions(rt_opts, false, 0, 0); + ASSERT_TRUE(es_comm.ConnectDBStart()); + + // Issue request + std::string endpoint, content_type, query, fetch_size; + std::shared_ptr< Aws::Http::HttpResponse > response = + es_comm.IssueRequest(endpoint, Aws::Http::HttpMethod::HTTP_GET, + content_type, query, fetch_size); + + // Convert response to string + ASSERT_TRUE(response != nullptr); + es_comm.AwsHttpResponseToString(response, version_info); +} + +void ParseVersionInfoString( + const std::string& input_info, + std::vector< std::pair< std::string, std::string > >& output_info) { + // Parse input + rabbit::document doc; + doc.parse(input_info); + + // Populate output with info + for (auto& it : base_items) { + ASSERT_TRUE(doc.has(it)); + output_info.push_back( + std::pair< std::string, std::string >(it, doc[it].str())); + } + + ASSERT_TRUE(doc.has("version")); + for (auto& it : version_items) { + ASSERT_TRUE(doc["version"].has(it)); + output_info.push_back(std::pair< std::string, std::string >( + it, doc["version"][it].str())); + } +} + +TEST(InfoCollect, EndPoint) { + // Get version string from endpoint + std::string version_info; + GetVersionInfoString(version_info); + + // Parse into vector pair + std::vector< std::pair< std::string, std::string > > output_info; + ParseVersionInfoString(version_info, output_info); + + std::cout << sync_start << std::endl; + for (auto& it : output_info) { + std::cout << it.first << sync_sep << it.second << std::endl; + } + std::cout << sync_end << std::endl; +} + +int main(int argc, char** argv) { + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + + return failures; +} diff --git a/sql-odbc/src/PerformanceTests/PTODBCResults/CMakeLists.txt b/sql-odbc/src/PerformanceTests/PTODBCResults/CMakeLists.txt new file mode 100644 index 0000000000..77b3eb7048 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCResults/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(performance_results) + +# Source, headers, and include dirs +set(SOURCE_FILES performance_odbc_results.cpp) +include_directories( ${UT_HELPER} + ${IT_HELPER} + ${ODFEODBC_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(performance_results ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(performance_results odfesqlodbc itodbc_helper ut_helper gtest_main) +target_compile_definitions(performance_results PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/PerformanceTests/PTODBCResults/packages.config b/sql-odbc/src/PerformanceTests/PTODBCResults/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCResults/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/PerformanceTests/PTODBCResults/pch.cpp b/sql-odbc/src/PerformanceTests/PTODBCResults/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCResults/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/PerformanceTests/PTODBCResults/pch.h b/sql-odbc/src/PerformanceTests/PTODBCResults/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCResults/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/PerformanceTests/PTODBCResults/performance_odbc_results.cpp b/sql-odbc/src/PerformanceTests/PTODBCResults/performance_odbc_results.cpp new file mode 100644 index 0000000000..b435b5e3c5 --- /dev/null +++ b/sql-odbc/src/PerformanceTests/PTODBCResults/performance_odbc_results.cpp @@ -0,0 +1,306 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "it_odbc_helper.h" +#include "chrono" +#include +#include +// clang-format on + +#define BIND_SIZE 255 +#define ROWSET_SIZE_5 5 +#define ROWSET_SIZE_50 50 +#define SINGLE_ROW 1 +#define ITERATION_COUNT 10 + +#ifndef WIN32 +typedef SQLULEN SQLROWCOUNT; +typedef SQLULEN SQLROWSETSIZE; +typedef SQLULEN SQLTRANSID; +typedef SQLLEN SQLROWOFFSET; +#endif + +const wchar_t* const m_query = + L"SELECT * FROM kibana_sample_data_flights limit 10000"; + +typedef struct Col { + SQLLEN data_len; + SQLCHAR data_dat[BIND_SIZE]; +} Col; + +class TestPerformance : public testing::Test { + public: + TestPerformance() { + } + void SetUp() { + AllocStatement((SQLTCHAR*)conn_string.c_str(), &m_env, &m_conn, + &m_hstmt, true, true); + } + void TearDown() { + SQLDisconnect(m_conn); + } + + protected: + SQLHENV m_env = SQL_NULL_HENV; + SQLHDBC m_conn = SQL_NULL_HDBC; + SQLHSTMT m_hstmt = SQL_NULL_HSTMT; +}; + +const std::string sync_start = "%%__PARSE__SYNC__START__%%"; +const std::string sync_query = "%%__QUERY__%%"; +const std::string sync_case = "%%__CASE__%%"; +const std::string sync_min = "%%__MIN__%%"; +const std::string sync_max = "%%__MAX__%%"; +const std::string sync_mean = "%%__MEAN__%%"; +const std::string sync_median = "%%__MEDIAN__%%"; +const std::string sync_end = "%%__PARSE__SYNC__END__%%"; + +void ReportTime(const std::string test_case, std::vector< long long > data) { + size_t size = data.size(); + ASSERT_EQ(size, (size_t)ITERATION_COUNT); + + // Get max and min + long long time_max = *std::max_element(data.begin(), data.end()); + long long time_min = *std::min_element(data.begin(), data.end()); + + // Get median + long long time_mean = + std::accumulate(std::begin(data), std::end(data), 0ll) / data.size(); + + // Get median + std::sort(data.begin(), data.end()); + long long time_median = (size % 2) + ? data[size / 2] + : ((data[(size / 2) - 1] + data[size / 2]) / 2); + + // Output results + std::cout << sync_start << std::endl; + std::cout << sync_query; + std::wcout << std::wstring(m_query) << std::endl; + std::cout << sync_case << test_case << std::endl; + std::cout << sync_min << time_min << " ms" << std::endl; + std::cout << sync_max << time_max << " ms" << std::endl; + std::cout << sync_mean << time_mean << " ms" << std::endl; + std::cout << sync_median << time_median << " ms" << std::endl; + std::cout << sync_end << std::endl; + + std::cout << "Time dump: "; + for (size_t i = 0; i < data.size(); i++) { + std::cout << data[i] << " ms"; + if (i != (data.size() - 1)) + std::cout << ", "; + } + std::cout << std::endl; +} + +TEST_F(TestPerformance, Time_Execute) { + // Execute a query just to wake the server up in case it has been sleeping + // for a while + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query, SQL_NTS); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + ASSERT_TRUE(SQL_SUCCEEDED(SQLCloseCursor(m_hstmt))); + + std::vector< long long > times; + for (size_t iter = 0; iter < ITERATION_COUNT; iter++) { + auto start = std::chrono::steady_clock::now(); + ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query, SQL_NTS); + auto end = std::chrono::steady_clock::now(); + LogAnyDiagnostics(SQL_HANDLE_STMT, m_hstmt, ret); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + ASSERT_TRUE(SQL_SUCCEEDED(SQLCloseCursor(m_hstmt))); + times.push_back( + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count()); + } + ReportTime("Execute Query", times); +} + +TEST_F(TestPerformance, Time_BindColumn_FetchSingleRow) { + SQLSMALLINT total_columns = 0; + int row_count = 0; + + std::vector< long long > times; + for (size_t iter = 0; iter < ITERATION_COUNT; iter++) { + // Execute query + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query, SQL_NTS); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + // Get column count + SQLNumResultCols(m_hstmt, &total_columns); + std::vector< std::vector< Col > > cols(total_columns); + for (size_t i = 0; i < cols.size(); i++) + cols[i].resize(SINGLE_ROW); + + // Bind and fetch + auto start = std::chrono::steady_clock::now(); + for (size_t i = 0; i < cols.size(); i++) + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)i + 1, SQL_C_CHAR, + (SQLPOINTER)&cols[i][0].data_dat[i], 255, + &cols[i][0].data_len); + while (SQLFetch(m_hstmt) == SQL_SUCCESS) + row_count++; + auto end = std::chrono::steady_clock::now(); + ASSERT_TRUE(SQL_SUCCEEDED(SQLCloseCursor(m_hstmt))); + times.push_back( + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count()); + } + ReportTime("Bind and (1 row) Fetch", times); +} + +TEST_F(TestPerformance, Time_BindColumn_Fetch5Rows) { + SQLROWSETSIZE row_count = 0; + SQLSMALLINT total_columns = 0; + SQLROWSETSIZE rows_fetched = 0; + SQLUSMALLINT row_status[ROWSET_SIZE_5]; + SQLSetStmtAttr(m_hstmt, SQL_ROWSET_SIZE, (void*)ROWSET_SIZE_5, 0); + + std::vector< long long > times; + for (size_t iter = 0; iter < ITERATION_COUNT; iter++) { + // Execute query + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query, SQL_NTS); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + // Get column count + SQLNumResultCols(m_hstmt, &total_columns); + std::vector< std::vector< Col > > cols(total_columns); + for (size_t i = 0; i < cols.size(); i++) + cols[i].resize(ROWSET_SIZE_5); + + // Bind and fetch + auto start = std::chrono::steady_clock::now(); + for (size_t i = 0; i < cols.size(); i++) + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)i + 1, SQL_C_CHAR, + (SQLPOINTER)&cols[i][0].data_dat[i], BIND_SIZE, + &cols[i][0].data_len); + while (SQLExtendedFetch(m_hstmt, SQL_FETCH_NEXT, 0, &rows_fetched, + row_status) + == SQL_SUCCESS) { + row_count += rows_fetched; + if (rows_fetched < ROWSET_SIZE_5) + break; + } + auto end = std::chrono::steady_clock::now(); + ASSERT_TRUE(SQL_SUCCEEDED(SQLCloseCursor(m_hstmt))); + times.push_back( + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count()); + } + ReportTime("Bind and (5 row) Fetch", times); +} + +TEST_F(TestPerformance, Time_BindColumn_Fetch50Rows) { + SQLROWSETSIZE row_count = 0; + SQLSMALLINT total_columns = 0; + SQLROWSETSIZE rows_fetched = 0; + SQLUSMALLINT row_status[ROWSET_SIZE_50]; + SQLSetStmtAttr(m_hstmt, SQL_ROWSET_SIZE, (void*)ROWSET_SIZE_50, 0); + + std::vector< long long > times; + for (size_t iter = 0; iter < ITERATION_COUNT; iter++) { + // Execute query + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query, SQL_NTS); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + // Get column count + SQLNumResultCols(m_hstmt, &total_columns); + std::vector< std::vector< Col > > cols(total_columns); + for (size_t i = 0; i < cols.size(); i++) + cols[i].resize(ROWSET_SIZE_50); + + // Bind and fetch + auto start = std::chrono::steady_clock::now(); + for (size_t i = 0; i < cols.size(); i++) + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)i + 1, SQL_C_CHAR, + (SQLPOINTER)&cols[i][0].data_dat[i], BIND_SIZE, + &cols[i][0].data_len); + while (SQLExtendedFetch(m_hstmt, SQL_FETCH_NEXT, 0, &rows_fetched, + row_status) + == SQL_SUCCESS) { + row_count += rows_fetched; + if (rows_fetched < ROWSET_SIZE_50) + break; + } + + auto end = std::chrono::steady_clock::now(); + ASSERT_TRUE(SQL_SUCCEEDED(SQLCloseCursor(m_hstmt))); + times.push_back( + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count()); + } + ReportTime("Bind and (50 row) Fetch", times); +} + +TEST_F(TestPerformance, Time_Execute_FetchSingleRow) { + SQLSMALLINT total_columns = 0; + int row_count = 0; + + std::vector< long long > times; + for (size_t iter = 0; iter < ITERATION_COUNT; iter++) { + // Execute query + auto start = std::chrono::steady_clock::now(); + SQLRETURN ret = SQLExecDirect(m_hstmt, (SQLTCHAR*)m_query, SQL_NTS); + ASSERT_TRUE(SQL_SUCCEEDED(ret)); + + // Get column count + SQLNumResultCols(m_hstmt, &total_columns); + std::vector< std::vector< Col > > cols(total_columns); + for (size_t i = 0; i < cols.size(); i++) + cols[i].resize(SINGLE_ROW); + + // Bind and fetch + for (size_t i = 0; i < cols.size(); i++) + ret = SQLBindCol(m_hstmt, (SQLUSMALLINT)i + 1, SQL_C_CHAR, + (SQLPOINTER)&cols[i][0].data_dat[i], BIND_SIZE, + &cols[i][0].data_len); + while (SQLFetch(m_hstmt) == SQL_SUCCESS) + row_count++; + + auto end = std::chrono::steady_clock::now(); + ASSERT_TRUE(SQL_SUCCEEDED(SQLCloseCursor(m_hstmt))); + times.push_back( + std::chrono::duration_cast< std::chrono::milliseconds >(end - start) + .count()); + } + ReportTime("Execute Query, Bind and (1 row) Fetch", times); +} + +int main(int argc, char** argv) { +#ifdef __APPLE__ + // Enable malloc logging for detecting memory leaks. + system("export MallocStackLogging=1"); +#endif + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + +#ifdef __APPLE__ + // Disable malloc logging and report memory leaks + system("unset MallocStackLogging"); + system("leaks performance_results > leaks_performance_results"); +#endif + return failures; +} diff --git a/sql-odbc/src/TableauConnector/odfe_sql_odbc/README.md b/sql-odbc/src/TableauConnector/odfe_sql_odbc/README.md new file mode 100644 index 0000000000..6498f886a5 --- /dev/null +++ b/sql-odbc/src/TableauConnector/odfe_sql_odbc/README.md @@ -0,0 +1,8 @@ +## odfe_sql_odbc + +The connector files in `odfe_sql_odbc` can be used to connect to elasticsearch server in Tableau. + +* These connector files remove and modify a set of unsupported functions and operations. +* It includes set of capabilities to customize and tune connector behavior + +To use this connector with Tableau, [Package the connector](https://tableau.github.io/connector-plugin-sdk/docs/package-sign) and [Run Packaged Connector (TACO file)](https://tableau.github.io/connector-plugin-sdk/docs/run-taco) \ No newline at end of file diff --git a/sql-odbc/src/TableauConnector/odfe_sql_odbc/connection-dialog.tcd b/sql-odbc/src/TableauConnector/odfe_sql_odbc/connection-dialog.tcd new file mode 100644 index 0000000000..24c364c615 --- /dev/null +++ b/sql-odbc/src/TableauConnector/odfe_sql_odbc/connection-dialog.tcd @@ -0,0 +1,14 @@ + + + + + + + + + + + diff --git a/sql-odbc/src/TableauConnector/odfe_sql_odbc/connectionBuilder.js b/sql-odbc/src/TableauConnector/odfe_sql_odbc/connectionBuilder.js new file mode 100644 index 0000000000..b5a04d1777 --- /dev/null +++ b/sql-odbc/src/TableauConnector/odfe_sql_odbc/connectionBuilder.js @@ -0,0 +1,42 @@ +(function dsbuilder(attr){ + var params = {}; + + // Set host information in connection string + params["SERVER"] = attr[connectionHelper.attributeServer]; + params["PORT"] = attr[connectionHelper.attributePort]; + + // Set authentication values in connection string + var authAttrValue = attr[connectionHelper.attributeAuthentication]; + params["Auth"] = attr[connectionHelper.attributeAuthentication]; + if (authAttrValue == "AWS_SIGV4"){ + params["Region"] = attr[connectionHelper.attributeVendor1]; + } else if (authAttrValue == "BASIC"){ + params["UID"] = attr[connectionHelper.attributeUsername]; + params["PWD"] = attr[connectionHelper.attributePassword]; + } + + // Set SSL value in connection string + if (attr[connectionHelper.attributeSSLMode] == "require"){ + params["useSSL"] = "1"; + } else { + params["useSSL"] = "0"; + } + + // Parse additional options and add in connection string + var odbcConnectStringExtrasMap = {}; + const attributeODBCConnectStringExtras = "vendor2"; + if (attributeODBCConnectStringExtras in attr){ + odbcConnectStringExtrasMap = connectionHelper.ParseODBCConnectString(attr[attributeODBCConnectStringExtras]); + } + for (var key in odbcConnectStringExtrasMap){ + params[key] = odbcConnectStringExtrasMap[key]; + } + + // Format the attributes as 'key=value' + var formattedParams = []; + formattedParams.push(connectionHelper.formatKeyValuePair(driverLocator.keywordDriver, driverLocator.locateDriver(attr))); + for (var key in params){ + formattedParams.push(connectionHelper.formatKeyValuePair(key, params[key])); + } + return formattedParams; +}) diff --git a/sql-odbc/src/TableauConnector/odfe_sql_odbc/connectionResolver.tdr b/sql-odbc/src/TableauConnector/odfe_sql_odbc/connectionResolver.tdr new file mode 100644 index 0000000000..ecf00d88a3 --- /dev/null +++ b/sql-odbc/src/TableauConnector/odfe_sql_odbc/connectionResolver.tdr @@ -0,0 +1,27 @@ + + + + + + diff --git a/sql-odbc/src/TestRunner/test_exclude_list.txt b/sql-odbc/src/TestRunner/test_exclude_list.txt new file mode 100644 index 0000000000..32a0ca2634 --- /dev/null +++ b/sql-odbc/src/TestRunner/test_exclude_list.txt @@ -0,0 +1,2 @@ +ut_aws_sdk_cpp +itodbc_aws_auth \ No newline at end of file diff --git a/sql-odbc/src/TestRunner/test_runner.py b/sql-odbc/src/TestRunner/test_runner.py new file mode 100644 index 0000000000..09433637a4 --- /dev/null +++ b/sql-odbc/src/TestRunner/test_runner.py @@ -0,0 +1,319 @@ +""" + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. +""" + +import os +import subprocess +import json +import re +import traceback +import sys +import getopt +import shutil +from mako.template import Template +from string import capwords + +UT_TYPE = "UT" +IT_TYPE = "IT" +PERFORMANCE_TYPE = "performance" +PERFORMANCE_INFO = "performance_info" +PERFORMANCE_RESULTS = "performance_results" +EXCLUDE_EXTENSION_LIST = ( + ".py", ".c", ".cmake", ".log", + ".pdb", ".dll", ".sln", ".vcxproj", ".user", + ".tlog", ".lastbuildstate", ".filters", + ".obj", ".exp", ".lib", ".h", ".cpp", ".ilk") +total_failures = 0 +SYNC_START = "%%__PARSE__SYNC__START__%%" +SYNC_SEP = "%%__SEP__%%" +SYNC_END = "%%__PARSE__SYNC__END__%%" +SYNC_QUERY = "%%__QUERY__%%"; +SYNC_CASE = "%%__CASE__%%"; +SYNC_MIN = "%%__MIN__%%"; +SYNC_MAX = "%%__MAX__%%"; +SYNC_MEAN = "%%__MEAN__%%"; +SYNC_MEDIAN = "%%__MEDIAN__%%"; + +def GetTestSuiteExes(test_type, test_suites, exclude_tests_list): + test_exes = [] + for root, dirs, files in os.walk(os.getcwd()): + for name in dirs: + if name.startswith("bin"): + dirs = name + for file_name in files: + if file_name.endswith(EXCLUDE_EXTENSION_LIST): + continue + if file_name.startswith(tuple(exclude_tests_list)): + continue + if test_suites is None and file_name.startswith(test_type.lower()): + print(f"Found {test_type} file: {file_name}") + test_exes.append(os.path.join(root, file_name)) + elif test_suites is not None and file_name.startswith(test_type.lower()) and (file_name in test_suites.split(sep=",")): + print(f"Found {test_type} file: {file_name}") + test_exes.append(os.path.join(root, file_name)) + return test_exes + +def RunTests(tests, test_type): + output = [] + global total_failures + for test in tests: + print("Running " + test) + output_path = test.replace(".exe", "") + ".log" + total_failures += subprocess.call([test, "-fout", output_path, "--gtest_color=no"]) + if test_type == UT_TYPE: + with open(output_path, "r+") as f: + output.append({"UnitTest" : test.split(os.path.sep)[-1].replace(".exe",""), "Log": f.read()}) + elif test_type == IT_TYPE: + with open(output_path, "r+") as f: + output.append({"IntegrationTest" : test.split(os.path.sep)[-1].replace(".exe",""), "Log": f.read()}) + print("Total Failures :", total_failures) + return output + +def FindBetween(s, f, l): + try: + start = s.index(f) + len(f) + end = s.index(l,start) + return s[start:end] + except ValueError: + return "" + +def GetAndTranslatePerformanceInfo(test): + global total_failures + output_path = test.replace(".exe", "") + ".log" + total_failures += subprocess.call([test, "-fout", output_path, "--gtest_color=no"]) + output = None + with open(output_path, "r+") as f: + log = f.readlines() + if log == None: + raise Exception("Failed to read in performance info test results") + reading = False + output = {} + for line in log: + if SYNC_START in line: + reading = True + continue + if SYNC_END in line: + reading = False + continue + if reading: + data = line.split(SYNC_SEP) + if len(data) != 2: + raise Exception(f"Unknown log line format: {line}") + if data[0].rstrip() == "number": + data[0] = "Version Number" + else: + data[0] = capwords(data[0].rstrip().replace("_", " ")) + data[0].replace("Uuid", "UUID") + output[data[0]] = data[1] + if "Not all tests passed" in line: + raise Exception("Performance info test failed") + if output == {}: + raise Exception("Failed to get any information out of performance info log") + return output + +def GetAndTranslatePerformanceResults(test): + global total_failures + output_path = test.replace(".exe", "") + ".log" + total_failures += subprocess.call([test, "-fout", output_path, "--gtest_color=no"]) + output = None + with open(output_path, "r+") as f: + log = f.readlines() + if log == None: + raise Exception("Failed to read in performance info test results") + reading = False + output = [] + single_case = {} + sync_items_line = [SYNC_QUERY, SYNC_CASE, SYNC_MIN, SYNC_MAX, SYNC_MEAN, SYNC_MEDIAN] + sync_items_readable = [item.replace("%%","").replace("__","").capitalize() for item in sync_items_line] + for line in log: + if SYNC_START in line: + single_case = {} + reading = True + continue + if SYNC_END in line: + if set(sync_items_readable) != set(single_case.keys()): + info = f'Missing data in test case: {single_case}. Items {sync_items_readable}. Keys {single_case.keys()}' + raise Exception(info) + output.append(single_case) + reading = False + continue + if reading: + for sync_item in sync_items_line: + if sync_item in line: + single_case[sync_item.replace("%%","").replace("__","").capitalize()] = line.replace(sync_item,"").rstrip() + return output + +def ParseUnitTestCase(log_lines, test_case): + start_tag = test_case + "." + test_case_info = { "TestCase" : test_case } + tests = [] + for log_line in log_lines: + if start_tag in log_line and "RUN" in log_line: + test = log_line.split(start_tag)[1] + tests.append(test) + if "[----------] " in log_line and (test_case + " ") in log_line and log_line.endswith(" ms total)"): + test_case_info["TotalTime"] = FindBetween(log_line, "(", ")").replace(" total","") + + test_infos = [] + for test in tests: + test_tag = start_tag + test + test_info = { "TestName" : test } + for log_line in log_lines: + if test_tag in log_line and log_line.endswith(")"): + test_info["TestTime"] = FindBetween(log_line, "(", ")") + test_info["TestResult"] = FindBetween(log_line, "[", "]").replace(" ", "") + + if test_info["TestResult"] != "OK": + start_error_grab = False + error_info = "" + for log_line in log_lines: + if test_tag in log_line and not log_line.endswith(")"): + start_error_grab = True + elif test_tag in log_line and log_line.endswith(")"): + break + elif start_error_grab: + if error_info != "": + error_info += os.linesep + log_line + else: + error_info += log_line + test_info["Error"] = error_info + test_infos.append(test_info) + test_case_info["TestCount"] = str(len(test_infos)) + test_case_info["TestResults"] = test_infos + pass_count = 0 + for test_info in test_infos: + if test_info["TestResult"] == "OK": + pass_count = pass_count + 1 + test_case_info["PassCount"] = str(pass_count) + return test_case_info + +def ParseUnitTestLog(unit_test, log): + log_json = { "UnitTest" : unit_test } + log_split = log.splitlines() + if len(log) < 8: + return {} + + tmp = "" + for log in log_split: + if log.startswith("[==========] Running"): + tmp = log.replace("[==========] Running ", "").replace(" test suites.", "").replace( + " test suite.", "").replace("tests from", "").replace("test from", "") + if tmp == "": + print('!!! FAILED TO FIND LOG WITH RUNNING !!!') + log_json["TotalTestCount"] = "0" + log_json["TotalTestCases"] = "0" + else: + log_json["TotalTestCount"] = tmp.split(" ")[0] + log_json["TotalTestCases"] = tmp.split(" ")[1] + log_json["TestCases"] = [] + test_cases = [] + for _line in log_split: + tag = { } + if re.match(r".*tests? from.*", _line) and "[----------]" in _line and "total" not in _line: + test_cases.append(re.split(" tests? from ", _line)[1]) + case_pass_count = 0 + test_pass_count = 0 + for test_case in test_cases: + log_json["TestCases"].append(ParseUnitTestCase(log_split, test_case)) + for test_case in log_json["TestCases"]: + if test_case["PassCount"] == test_case["TestCount"]: + case_pass_count += 1 + test_pass_count += int(test_case["PassCount"]) + log_json["CasePassCount"] = str(case_pass_count) + log_json["TestPassCount"] = str(test_pass_count) + return log_json + +def TranslateTestOutput(test_type, outputs): + log_jsons = [] + if test_type == UT_TYPE: + for output in outputs: + log_jsons.append(ParseUnitTestLog(output["UnitTest"], output["Log"])) + elif test_type == IT_TYPE: + for output in outputs: + log_jsons.append(ParseUnitTestLog(output["IntegrationTest"], output["Log"])) + return log_jsons + +def RunAllTests(test_types, test_suites, exclude_test_list): + final_output = {} + + for _type in test_types: + tests = GetTestSuiteExes(_type, test_suites, exclude_test_list) + print("!! Found tests:", *tests, sep="\n") + if PERFORMANCE_TYPE == _type: + final_output[PERFORMANCE_TYPE] = {} + for test in tests: + if test.replace(".exe", "").endswith(PERFORMANCE_INFO): + final_output[PERFORMANCE_TYPE]["Info"] = GetAndTranslatePerformanceInfo(test) + elif test.replace(".exe", "").endswith(PERFORMANCE_RESULTS): + final_output[PERFORMANCE_TYPE]["Results"] = GetAndTranslatePerformanceResults(test) + else: + test_outputs = RunTests(tests, _type) + final_output[_type] = TranslateTestOutput(_type, test_outputs) + return final_output + +def ParseCommandLineArguments(): + infile = None + outfile = None + suites = None + efile = None + opts, args = getopt.getopt(sys.argv[1:],"i:o:s:e:",["ifile=","ofile=","suites=","efile="]) + for opt,arg in opts: + if opt in ('-i', '--ifile'): + infile = arg + elif opt in ('-s', '--suites'): + suites = arg + elif opt in ('-o', '--ofile'): + outfile = arg + elif opt in ('-e', '--efile'): + efile = arg + return (infile, outfile, suites, efile) + +def main(): + try: + (infile, outfile, suites, efile) = ParseCommandLineArguments() + if infile is None or outfile is None: + print("Usage: -i -o [-s -e ]") + sys.exit(1) + exclude_test_list = [] + global total_failures + total_failures = 0 + if efile is not None: + with open(efile) as ef: + exclude_test_list = ef.readlines() + exclude_test_list = [l.strip() for l in exclude_test_list if l.strip() != ""] + if len(exclude_test_list) == 0: + print('== Exclude list empty. Running all available tests ==') + else: + print(f'== Excluding tests {exclude_test_list} ==') + else: + print('== No exclude list. Running all available tests ==') + print(f'== Using template file {infile} ==') + template = Template(filename=infile) + + if suites is not None: + print(f'== Using suites {suites} ==') + with open(os.path.join(os.getcwd(), outfile), 'w+') as results_file: + data = RunAllTests([UT_TYPE, IT_TYPE, PERFORMANCE_TYPE], suites, exclude_test_list) + os.chmod(outfile, 0o744) + results_file.write(template.render(data = data)) + + print(f"== Finished generating results file {outfile} ==") + os._exit(total_failures) + + except: + print(traceback.format_exc()) + os._exit(255) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/CMakeLists.txt b/sql-odbc/src/UnitTests/CMakeLists.txt new file mode 100644 index 0000000000..3d78103887 --- /dev/null +++ b/sql-odbc/src/UnitTests/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(unit_tests) + +set(HELPER_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTHelper") +set(CONN_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTConn") +set(RABBIT_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTRabbit") +set(CRITICALSECTION_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTCriticalSection") +set(AWSSDKCPP_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTAwsSdkCpp") + +# Projects to build +add_subdirectory(${HELPER_UTEST}) +add_subdirectory(${CONN_UTEST}) +add_subdirectory(${RABBIT_UTEST}) +add_subdirectory(${CRITICALSECTION_UTEST}) +add_subdirectory(${AWSSDKCPP_UTEST}) \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/GoogleTest.LICENSE b/sql-odbc/src/UnitTests/GoogleTest.LICENSE new file mode 100644 index 0000000000..65c76c50ce --- /dev/null +++ b/sql-odbc/src/UnitTests/GoogleTest.LICENSE @@ -0,0 +1,28 @@ +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/CMakeLists.txt b/sql-odbc/src/UnitTests/UTAwsSdkCpp/CMakeLists.txt new file mode 100644 index 0000000000..65184d80f0 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTAwsSdkCpp/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(ut_aws_sdk_cpp) + +# Source, headers, and include dirs +set(SOURCE_FILES test_aws_sdk_cpp.cpp) +include_directories( ${UT_HELPER} + ${AWSSDK_INCLUDE_DIR} + ${VLD_SRC}) + +# Generate executable +add_executable(ut_aws_sdk_cpp ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(ut_aws_sdk_cpp ut_helper gtest_main aws-cpp-sdk-core ${VLD}) +target_compile_definitions(ut_aws_sdk_cpp PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/packages.config b/sql-odbc/src/UnitTests/UTAwsSdkCpp/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTAwsSdkCpp/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.cpp b/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.h b/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/test_aws_sdk_cpp.cpp b/sql-odbc/src/UnitTests/UTAwsSdkCpp/test_aws_sdk_cpp.cpp new file mode 100644 index 0000000000..08a0d3696f --- /dev/null +++ b/sql-odbc/src/UnitTests/UTAwsSdkCpp/test_aws_sdk_cpp.cpp @@ -0,0 +1,77 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif // __APPLE__ +#include +#include +#include +#include +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +#include "unit_test_helper.h" + +using namespace Aws::Auth; +using namespace Aws::Client; +using namespace Aws::Http; + +static const char service_name[] = "es"; +static const char allocation_tag[] = "AWS_SIGV4_Test"; +static const char host[] = "https://search-bit-quill-cx3hpfoxvasohujxkllmgjwqde.us-west-2.es.amazonaws.com"; +static const char region[] = "us-west-2"; + +TEST(AWS_SIGV4, EnvironmentAWSCredentials) { + Aws::SDKOptions options; + EXPECT_NO_THROW(Aws::InitAPI(options)); + + auto request = CreateHttpRequest(Aws::String(host), HttpMethod::HTTP_GET, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod); + + std::shared_ptr credential_provider = Aws::MakeShared(allocation_tag); + + AWSAuthV4Signer signer(credential_provider, service_name, region); + ASSERT_TRUE(signer.SignRequest(*request)); + + auto http_client = CreateHttpClient(Aws::Client::ClientConfiguration()); + + auto response = http_client->MakeRequest(request); + ASSERT_NE(response, nullptr); + EXPECT_EQ(Aws::Http::HttpResponseCode::OK, response->GetResponseCode()); + + EXPECT_NO_THROW(Aws::ShutdownAPI(options)); +} + +TEST(SettingSDKOptions, TurnLoggingOn) { + Aws::SDKOptions options; + options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Info; + EXPECT_NO_THROW(Aws::InitAPI(options)); + EXPECT_NO_THROW(Aws::ShutdownAPI(options)); +} + +int main(int argc, char** argv) { + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + int failures = RUN_ALL_TESTS(); + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + return failures; +} diff --git a/sql-odbc/src/UnitTests/UTConn/CMakeLists.txt b/sql-odbc/src/UnitTests/UTConn/CMakeLists.txt new file mode 100644 index 0000000000..0caf1b7d86 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTConn/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(ut_conn) + +# Source, headers, and include dirs +set(SOURCE_FILES test_conn.cpp test_query_execution.cpp) +include_directories( ${UT_HELPER} + ${ODFEODBC_SRC} + ${RAPIDJSON_SRC} + ${RABBIT_SRC} + ${LIBCURL_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(ut_conn ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(ut_conn odfesqlodbc ut_helper gtest_main) +target_compile_definitions(ut_conn PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTConn/packages.config b/sql-odbc/src/UnitTests/UTConn/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTConn/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTConn/pch.cpp b/sql-odbc/src/UnitTests/UTConn/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/UnitTests/UTConn/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTConn/pch.h b/sql-odbc/src/UnitTests/UTConn/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/UnitTests/UTConn/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTConn/test_conn.cpp b/sql-odbc/src/UnitTests/UTConn/test_conn.cpp new file mode 100644 index 0000000000..69ea55e612 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTConn/test_conn.cpp @@ -0,0 +1,144 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "es_communication.h" +// clang-format on + +const size_t valid_option_count = 4; +const size_t invalid_option_count = 4; +const size_t missing_option_count = 3; +const std::string valid_host = (use_ssl ? "https://localhost" : "localhost"); +const std::string valid_port = "9200"; +const std::string valid_user = "admin"; +const std::string valid_pw = "admin"; +const std::string valid_region = "us-west-3"; +const std::string invalid_host = "10.1.1.189"; +const std::string invalid_port = "920"; +const std::string invalid_user = "amin"; +const std::string invalid_pw = "amin"; +const std::string invalid_region = "bad-region"; +runtime_options valid_opt_val = {{valid_host, valid_port, "1", "0"}, + {"BASIC", valid_user, valid_pw, valid_region}, + {use_ssl, false, "", "", "", ""}}; +runtime_options invalid_opt_val = { + {invalid_host, invalid_port, "1", "0"}, + {"BASIC", invalid_user, invalid_pw, valid_region}, + {use_ssl, false, "", "", "", ""}}; +runtime_options missing_opt_val = {{"", "", "1", "0"}, + {"BASIC", "", invalid_pw, valid_region}, + {use_ssl, false, "", "", "", ""}}; + +TEST(TestESConnConnectionOptions, ValidParameters) { + ESCommunication conn; + EXPECT_EQ(true, + conn.ConnectionOptions(valid_opt_val, 1, 1, valid_option_count)); +} + +TEST(TestESConnConnectionOptions, MissingParameters) { + ESCommunication conn; + EXPECT_EQ(false, conn.ConnectionOptions(missing_opt_val, 1, 1, + missing_option_count)); +} + +class TestESConnConnectDBStart : public testing::Test { + public: + TestESConnConnectDBStart() { + } + + void SetUp() { + } + + void TearDown() { + m_conn.DropDBConnection(); + } + + ~TestESConnConnectDBStart() { + // cleanup any pending stuff, but no exceptions allowed + } + + ESCommunication m_conn; +}; + +TEST_F(TestESConnConnectDBStart, ValidParameters) { + ASSERT_NE(false, m_conn.ConnectionOptions(valid_opt_val, 1, 1, + valid_option_count)); + EXPECT_EQ(true, m_conn.ConnectDBStart()); + EXPECT_EQ(CONNECTION_OK, m_conn.GetConnectionStatus()); +} + +TEST_F(TestESConnConnectDBStart, InvalidParameters) { + ASSERT_TRUE( + m_conn.ConnectionOptions(invalid_opt_val, 1, 1, invalid_option_count)); + EXPECT_EQ(false, m_conn.ConnectDBStart()); + EXPECT_EQ(CONNECTION_BAD, m_conn.GetConnectionStatus()); +} + +TEST_F(TestESConnConnectDBStart, MissingParameters) { + ASSERT_NE(true, m_conn.ConnectionOptions(missing_opt_val, 1, 1, + missing_option_count)); + EXPECT_EQ(false, m_conn.ConnectDBStart()); + EXPECT_EQ(CONNECTION_BAD, m_conn.GetConnectionStatus()); +} + +TEST(TestESConnDropDBConnection, InvalidParameters) { + ESCommunication conn; + ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); + ASSERT_TRUE( + conn.ConnectionOptions(invalid_opt_val, 1, 1, invalid_option_count)); + ASSERT_NE(true, conn.ConnectDBStart()); + ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); + conn.DropDBConnection(); + EXPECT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); +} + +TEST(TestESConnDropDBConnection, MissingParameters) { + ESCommunication conn; + ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); + ASSERT_NE(true, conn.ConnectionOptions(missing_opt_val, 1, 1, + missing_option_count)); + ASSERT_NE(true, conn.ConnectDBStart()); + ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); + conn.DropDBConnection(); + EXPECT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); +} + +TEST(TestESConnDropDBConnection, ValidParameters) { + ESCommunication conn; + ASSERT_NE(false, + conn.ConnectionOptions(valid_opt_val, 1, 1, valid_option_count)); + ASSERT_NE(false, conn.ConnectDBStart()); + ASSERT_EQ(CONNECTION_OK, conn.GetConnectionStatus()); + conn.DropDBConnection(); + EXPECT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); +} + +int main(int argc, char** argv) { + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + + int failures = RUN_ALL_TESTS(); + + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + + return failures; +} diff --git a/sql-odbc/src/UnitTests/UTConn/test_query_execution.cpp b/sql-odbc/src/UnitTests/UTConn/test_query_execution.cpp new file mode 100644 index 0000000000..e2af4d40a0 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTConn/test_query_execution.cpp @@ -0,0 +1,130 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#include "unit_test_helper.h" +#include "es_communication.h" +#include "es_helper.h" +// clang-format on + +const std::string valid_host = (use_ssl ? "https://localhost" : "localhost"); +const std::string valid_port = "9200"; +const std::string valid_user = "admin"; +const std::string valid_pw = "admin"; +const std::string valid_region = "us-west-3"; +const std::string query = + "SELECT Origin FROM kibana_sample_data_flights LIMIT 5"; +const std::string all_columns_flights_query = + "SELECT * FROM kibana_sample_data_flights LIMIT 5"; +const std::string some_columns_flights_query = + "SELECT Origin, OriginWeather FROM kibana_sample_data_flights LIMIT 5"; +const std::string invalid_query = "SELECT"; +const int EXECUTION_SUCCESS = 0; +const int EXECUTION_ERROR = -1; +const std::string fetch_size = "0"; +const int all_columns_flights_count = 25; +const int some_columns_flights_count = 2; +runtime_options valid_conn_opt_val = { + {valid_host, valid_port, "1", "0"}, + {"BASIC", valid_user, valid_pw, valid_region}, + {use_ssl, false, "", "", "", ""}}; + +TEST(TestESExecDirect, ValidQuery) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + EXPECT_EQ(EXECUTION_SUCCESS, + ESExecDirect(&conn, some_columns_flights_query.c_str(), fetch_size.c_str())); +} + +TEST(TestESExecDirect, MissingQuery) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + EXPECT_EQ(EXECUTION_ERROR, ESExecDirect(&conn, NULL, fetch_size.c_str())); +} + +TEST(TestESExecDirect, MissingConnection) { + EXPECT_EQ(EXECUTION_ERROR, + ESExecDirect(NULL, query.c_str(), fetch_size.c_str())); +} + +// Conn::ExecDirect + +TEST(TestConnExecDirect, ValidQueryAllColumns) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + + conn.ExecDirect(all_columns_flights_query.c_str(), fetch_size.c_str()); + ESResult* result = conn.PopResult(); + EXPECT_EQ("SELECT", result->command_type); + EXPECT_FALSE(result->result_json.empty()); + EXPECT_EQ(all_columns_flights_count, result->num_fields); + EXPECT_EQ((size_t)all_columns_flights_count, result->column_info.size()); +} + +TEST(TestConnExecDirect, ValidQuerySomeColumns) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + + conn.ExecDirect(some_columns_flights_query.c_str(), fetch_size.c_str()); + ESResult* result = conn.PopResult(); + EXPECT_EQ("SELECT", result->command_type); + EXPECT_FALSE(result->result_json.empty()); + EXPECT_EQ(some_columns_flights_count, result->num_fields); + EXPECT_EQ((size_t)some_columns_flights_count, result->column_info.size()); +} + +TEST(TestConnExecDirect, InvalidQuery) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + + conn.ExecDirect(invalid_query.c_str(), fetch_size.c_str()); + ESResult* result = conn.PopResult(); + EXPECT_EQ(NULL, (void*)result); +} + +// Conn::PopResult + +TEST(TestConnPopResult, PopEmptyQueue) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + + ESResult* result = conn.PopResult(); + EXPECT_EQ(NULL, (void*)result); +} + +TEST(TestConnPopResult, PopTwoQueryResults) { + ESCommunication conn; + ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); + ASSERT_TRUE(conn.ConnectDBStart()); + + conn.ExecDirect(some_columns_flights_query.c_str(), fetch_size.c_str()); + conn.ExecDirect(all_columns_flights_query.c_str(), fetch_size.c_str()); + + // Pop some_columns + ESResult* result = conn.PopResult(); + EXPECT_EQ(some_columns_flights_count, result->num_fields); + + // Pop all_columns + result = conn.PopResult(); + EXPECT_EQ(all_columns_flights_count, result->num_fields); +} diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/CMakeLists.txt b/sql-odbc/src/UnitTests/UTCriticalSection/CMakeLists.txt new file mode 100644 index 0000000000..d381ae4145 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTCriticalSection/CMakeLists.txt @@ -0,0 +1,31 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(ut_critical_section) + +# Source, headers, and include dirs +set(SOURCE_FILES test_critical_section.cpp) +include_directories( ${UT_HELPER} + ${ODFEODBC_SRC} + ${RAPIDJSON_SRC} + ${VLD_SRC} + ${RABBIT_SRC} ) + +# Generate executable +add_executable(ut_critical_section ${SOURCE_FILES}) + +# Library dependencies +target_link_libraries(ut_critical_section odfesqlodbc ut_helper gtest_main) +target_compile_definitions(ut_critical_section PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/packages.config b/sql-odbc/src/UnitTests/UTCriticalSection/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTCriticalSection/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/pch.cpp b/sql-odbc/src/UnitTests/UTCriticalSection/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/UnitTests/UTCriticalSection/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/pch.h b/sql-odbc/src/UnitTests/UTCriticalSection/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/UnitTests/UTCriticalSection/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/test_critical_section.cpp b/sql-odbc/src/UnitTests/UTCriticalSection/test_critical_section.cpp new file mode 100644 index 0000000000..5d729ffd83 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTCriticalSection/test_critical_section.cpp @@ -0,0 +1,151 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ +#include + +#include +#include +#include + +#include "es_helper.h" +#include "pch.h" +#include "unit_test_helper.h" + +const size_t loop_count = 10; +const size_t thread_count = 1000; + +#define INIT_CS(x) XPlatformInitializeCriticalSection(&(x)) +#define ENTER_CS(x) XPlatformEnterCriticalSection((x)) +#define LEAVE_CS(x) XPlatformLeaveCriticalSection((x)) +#define DELETE_CS(x) XPlatformDeleteCriticalSection(&(x)) + +class TestCriticalSection : public testing::Test { + public: + TestCriticalSection() : m_lock(NULL) { + } + + void SetUp() { + INIT_CS(m_lock); + } + + void TearDown() { + DELETE_CS(m_lock); + } + + ~TestCriticalSection() { + } + void* m_lock; + + typedef struct CriticalInfo { + volatile size_t* shared_mem; + void* lock; + } CriticalInfo; +}; + +TEST_F(TestCriticalSection, SingleEnterExit) { + ENTER_CS(m_lock); + LEAVE_CS(m_lock); +} + +TEST_F(TestCriticalSection, MultipleEntersMultipleExits) { + for (size_t i = 0; i < loop_count; i++) + ENTER_CS(m_lock); + for (size_t i = 0; i < loop_count; i++) + LEAVE_CS(m_lock); +} + +TEST_F(TestCriticalSection, MultipleEnterExit) { + for (size_t i = 0; i < loop_count; i++) { + ENTER_CS(m_lock); + LEAVE_CS(m_lock); + } +} + +TEST_F(TestCriticalSection, MultiThreadSingleLock) { + auto f = [](CriticalInfo* info) { + *info->shared_mem = static_cast< size_t >(1); + ENTER_CS(info->lock); + *info->shared_mem = static_cast< size_t >(2); + LEAVE_CS(info->lock); + }; + + volatile size_t shared_mem = 0; + CriticalInfo crit_info; + crit_info.shared_mem = &shared_mem; + crit_info.lock = m_lock; + + ENTER_CS(m_lock); + std::thread thread_object(f, &crit_info); +#ifdef WIN32 + Sleep(1000); +#else + usleep(1000 * 1000); +#endif + EXPECT_EQ(shared_mem, static_cast< size_t >(1)); + LEAVE_CS(m_lock); +#ifdef WIN32 + Sleep(1000); +#else + usleep(1000 * 1000); +#endif + EXPECT_EQ(shared_mem, static_cast< size_t >(2)); + thread_object.join(); +} + +// Make many threads to see if multiple simultaneous attempts at locking cause +// any issues +TEST_F(TestCriticalSection, RaceConditions) { + auto f = [](CriticalInfo* info) { + std::stringstream ss_thread_id; + ss_thread_id << std::this_thread::get_id(); + size_t thread_id = static_cast< size_t >( + strtoull(ss_thread_id.str().c_str(), NULL, 10)); + ENTER_CS(info->lock); + // Update shared memory, release thread priority, then check if memory + // is still the same + *info->shared_mem = static_cast< size_t >(thread_id); +#ifdef WIN32 + Sleep(0); +#else + usleep(0); +#endif + EXPECT_EQ(thread_id, *info->shared_mem); + LEAVE_CS(info->lock); + }; + + volatile size_t shared_mem = 0; + CriticalInfo crit_info; + crit_info.shared_mem = &shared_mem; + crit_info.lock = m_lock; + std::vector< std::thread > threads; + threads.reserve(thread_count); + + for (size_t i = 0; i < thread_count; i++) + threads.emplace_back(std::thread(f, &crit_info)); + + for (auto& it : threads) + it.join(); +} + +int main(int argc, char** argv) { + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + int failures = RUN_ALL_TESTS(); + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); +} diff --git a/sql-odbc/src/UnitTests/UTHelper/CMakeLists.txt b/sql-odbc/src/UnitTests/UTHelper/CMakeLists.txt new file mode 100644 index 0000000000..9b831dfccd --- /dev/null +++ b/sql-odbc/src/UnitTests/UTHelper/CMakeLists.txt @@ -0,0 +1,42 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(ut_helper) + +# Source, headers, and include dirs +set(SOURCE_FILES unit_test_helper.cpp) +set(HEADER_FILES unit_test_helper.h) +include_directories(${ODFEODBC_SRC} ${VLD_SRC}) + +# Generate dll (SHARED) +add_library(ut_helper SHARED ${SOURCE_FILES} ${HEADER_FILES}) + +if (WIN32 AND BITNESS EQUAL 64) +find_library( VLD + vld + HINTS "${LIBRARY_DIRECTORY}/VisualLeakDetector/lib64" + ) +target_link_libraries(ut_helper ${VLD}) +elseif (WIN32 AND BITNESS EQUAL 32) +find_library( VLD + vld + HINTS "${LIBRARY_DIRECTORY}/VisualLeakDetector/lib" + ) +target_link_libraries(ut_helper ${VLD}) +endif() + +# Library dependencies +target_link_libraries(ut_helper odfesqlodbc gtest_main) +target_compile_definitions(ut_helper PUBLIC _UNICODE UNICODE) \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.cpp b/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.cpp new file mode 100644 index 0000000000..08d3f4f00f --- /dev/null +++ b/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.cpp @@ -0,0 +1,31 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "unit_test_helper.h" + +#include +#include + +void WriteFileIfSpecified(char** begin, char** end, const std::string& option, + std::string& output) { + char** itr = std::find(begin, end, option); + if (itr != end && ++itr != end) { + std::ofstream out_file(*itr); + if (out_file.good()) + out_file << output; + } + return; +} diff --git a/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.h b/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.h new file mode 100644 index 0000000000..a020ddf88a --- /dev/null +++ b/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.h @@ -0,0 +1,37 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef UNIT_TEST_HELPER +#define UNIT_TEST_HELPER + +#if defined(WIN32) || defined (WIN64) +#ifdef _DEBUG +#define VLD_FORCE_ENABLE 1 +#include +#endif +#endif + +#include +#ifdef USE_SSL +const bool use_ssl = true; +#else +const bool use_ssl = false; +#endif + +void WriteFileIfSpecified(char** begin, char** end, const std::string& option, + std::string& output); + +#endif diff --git a/sql-odbc/src/UnitTests/UTRabbit/CMakeLists.txt b/sql-odbc/src/UnitTests/UTRabbit/CMakeLists.txt new file mode 100644 index 0000000000..70a6e8e6c6 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTRabbit/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(ut_rabbit) + +# Source, headers, and include dirs +set(SOURCE_FILES test_rabbit.cpp) +include_directories( ${UT_HELPER} + ${RAPIDJSON_SRC} + ${RABBIT_SRC} + ${VLD_SRC} ) + +# Generate executable +add_executable(ut_rabbit ${SOURCE_FILES}) + +target_link_libraries(ut_rabbit ut_helper gtest_main ${VLD}) +target_compile_definitions(ut_rabbit PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTRabbit/packages.config b/sql-odbc/src/UnitTests/UTRabbit/packages.config new file mode 100644 index 0000000000..3c6fe17f54 --- /dev/null +++ b/sql-odbc/src/UnitTests/UTRabbit/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTRabbit/pch.cpp b/sql-odbc/src/UnitTests/UTRabbit/pch.cpp new file mode 100644 index 0000000000..f0bfab74ef --- /dev/null +++ b/sql-odbc/src/UnitTests/UTRabbit/pch.cpp @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTRabbit/pch.h b/sql-odbc/src/UnitTests/UTRabbit/pch.h new file mode 100644 index 0000000000..f0314e057d --- /dev/null +++ b/sql-odbc/src/UnitTests/UTRabbit/pch.h @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// +// pch.h +// Header for standard system include files. +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTRabbit/test_rabbit.cpp b/sql-odbc/src/UnitTests/UTRabbit/test_rabbit.cpp new file mode 100644 index 0000000000..9a3647b01a --- /dev/null +++ b/sql-odbc/src/UnitTests/UTRabbit/test_rabbit.cpp @@ -0,0 +1,284 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "pch.h" +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif // __APPLE__ +#include "rabbit.hpp" +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +#include "unit_test_helper.h" + +const std::string invalid_json_schema = "{ invalid schema }"; +const std::string valid_json_schema = "{" // This was generated from the example elasticsearch data + "\"type\": \"object\"," + "\"properties\": {" + "\"schema\": {" + "\"type\": \"array\"," + "\"items\": [{" + "\"type\": \"object\"," + "\"properties\": {" + "\"name\": { \"type\": \"string\" }," + "\"type\": { \"type\": \"string\" }" + "}," + "\"required\": [ \"name\", \"type\" ]" + "}]" + "}," + "\"total\": { \"type\": \"integer\" }," + "\"datarows\": {" + "\"type\": \"array\"," + "\"items\": {}" + "}," + "\"size\": { \"type\": \"integer\" }," + "\"status\": { \"type\": \"integer\" }" + "}," + "\"required\": [\"schema\", \"total\", \"datarows\", \"size\", \"status\"]" +"}"; +const std::string valid_json_for_schema = "{" // This was taken from the example elasticsearch data + "\"schema\": [{" + "\"name\": \"valid_name1\"," + "\"type\": \"valid_type1\"},{" + "\"name\": \"valid_name2\"," + "\"type\": \"valid_type2\"},{" + "\"name\": \"valid_name3\"," + "\"type\": \"valid_type3\"}]," + "\"total\": 10," + "\"datarows\": []," + "\"size\": 3," + "\"status\": 200" +"}"; +const std::string invalid_json_for_schema = "{" + "\"schema\": [{" + "\"name\": 1," + "\"type\": \"valid_type1\"},{" + "\"name\": 2," + "\"type\": \"valid_type2\"},{" + "\"name\": 3," + "\"type\": \"valid_type3\"}]," + "\"total\": \"10\"," + "\"datarows\": {}," + "\"size\": \"string_size\"," + "\"status\": 200" +"}"; +const std::string invalid_json = "invalid json"; +const std::string valid_json_int = "{ \"value\" : 123 }"; +const std::string invalid_json_int = "{ \"value\" : invalid }"; +const std::string valid_json_str = "{ \"value\" : \"123\"}"; +const std::string invalid_json_str = "{ \"value\" : \"123}"; +const std::string valid_json_arr = "{ \"value\" : [ 1, \"2\", true] }"; +const std::string invalid_json_arr = "{ \"value\" : [ 1, 2 3] }"; +const std::string valid_json_obj = "{" + "\"value\" : {" + "\"subval_str\" : \"1\"," + "\"subval_int\" : 2," + "\"subval_bool\" : true," + "\"subval_flt\" : 3.4" + "}" +"}"; +const std::string invalid_json_obj = "{" + "\"value\" : {" + "\"subval_str\" : \"1\"" + "\"subval_int\" : 2," + "\"subval_bool\" : true," + "\"subval_flt\" : 3.4" + "}" +"}"; +// Intentionally serialized because it will be compared to a str parsed by rabbit, which is serialized by default +const std::string valid_sub_obj_for_conversion = "{\"subval_obj\":{\"subval_str\":\"1\",\"subval_int\":2,\"subval_bool\":true,\"subval_flt\":3.4}}"; +const std::string valid_obj_for_conversion = "{ \"value\" : " + valid_sub_obj_for_conversion + "}"; +// clang-format on + +const std::vector< size_t > distances = {0, 1, 5, 30}; + +TEST(StandardDistance, ValidIterator) { + rabbit::array arr; + for (size_t i = 0; i < distances.size(); i++) { + rabbit::array sub_array; + for (size_t j = 0; j < distances[i]; j++) { + sub_array.push_back(static_cast< uint64_t >(j)); + } + arr.push_back(sub_array); + } + + ASSERT_EQ(static_cast< size_t >(std::distance(arr.begin(), arr.end())), + distances.size()); + size_t i = 0; + for (auto it = arr.begin(); it < arr.end(); it++, i++) { + EXPECT_EQ(static_cast< size_t >( + std::distance(it->value_begin(), it->value_end())), + distances[i]); + } +} + +TEST(ConvertObjectToString, IteratorAtStringConvert) { + rabbit::document doc; + ASSERT_NO_THROW(doc.parse(valid_json_for_schema)); + rabbit::array arr; + ASSERT_NO_THROW(arr = doc["schema"]); + size_t i = 1; + std::string valid_name = "valid_name"; + std::string valid_type = "valid_type"; + for (auto it = arr.begin(); it < arr.end(); ++it, ++i) { + std::string name, type; + ASSERT_NO_THROW(name = it->at("name").as_string()); + ASSERT_NO_THROW(type = it->at("type").as_string()); + EXPECT_EQ(name, valid_name + std::to_string(i)); + EXPECT_EQ(type, valid_type + std::to_string(i)); + } +} + +TEST(ConvertObjectToString, ValidObject) { + rabbit::document doc; + EXPECT_NO_THROW(doc.parse(valid_obj_for_conversion)); + ASSERT_TRUE(doc.is_object()); + ASSERT_TRUE(doc.has("value")); + ASSERT_TRUE(doc["value"].is_object()); + std::string value_str = doc["value"].str(); + EXPECT_EQ(value_str, valid_sub_obj_for_conversion); +} + +TEST(ParseSchema, ValidSchemaValidDoc) { + rabbit::document doc; + EXPECT_NO_THROW(doc.parse(valid_json_for_schema, valid_json_schema)); +} + +TEST(ParseSchema, InvalidSchemaValidDoc) { + rabbit::document doc; + EXPECT_THROW(doc.parse(valid_json_for_schema, invalid_json_schema), + rabbit::parse_error); +} + +TEST(ParseSchema, ValidSchemaInvalidDoc) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json_for_schema, valid_json_schema), + rabbit::parse_error); +} + +TEST(ParseSchema, InvalidSchemaInvalidDoc) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json, invalid_json_schema), + rabbit::parse_error); +} + +TEST(ParseObj, ValidObj) { + rabbit::document doc; + EXPECT_NO_THROW(doc.parse(valid_json_obj)); + ASSERT_TRUE(doc.is_object()); + ASSERT_TRUE(doc.has("value")); + ASSERT_TRUE(doc["value"].is_object()); + ASSERT_TRUE(doc["value"].has("subval_str")); + ASSERT_TRUE(doc["value"].has("subval_int")); + ASSERT_TRUE(doc["value"].has("subval_bool")); + ASSERT_TRUE(doc["value"].has("subval_flt")); + ASSERT_TRUE(doc["value"]["subval_str"].is_string()); + ASSERT_TRUE(doc["value"]["subval_int"].is_int()); + ASSERT_TRUE(doc["value"]["subval_bool"].is_bool()); + ASSERT_TRUE(doc["value"]["subval_flt"].is_number()); + EXPECT_EQ("1", doc["value"]["subval_str"].as_string()); + EXPECT_EQ(2, doc["value"]["subval_int"].as_int()); + EXPECT_EQ(true, doc["value"]["subval_bool"].as_bool()); + EXPECT_EQ(3.4, doc["value"]["subval_flt"].as_double()); +} + +TEST(ParseObj, InvalidObj) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json_obj), rabbit::parse_error); +} + +TEST(ParseArr, ValidArr) { + rabbit::document doc; + ASSERT_NO_THROW(doc.parse(valid_json_arr)); + ASSERT_TRUE(doc.is_object()); + ASSERT_TRUE(doc.has("value")); + ASSERT_TRUE(doc["value"].is_array()); + + rabbit::array arr; + ASSERT_NO_THROW(arr = doc["value"]); + size_t i = 0; + for (rabbit::array::iterator it = arr.begin(); it != arr.end(); ++it, ++i) { + switch (i) { + case 0: + ASSERT_TRUE(it->is_int()); + EXPECT_EQ(1, it->as_int()); + break; + case 1: + ASSERT_TRUE(it->is_string()); + EXPECT_EQ("2", it->as_string()); + break; + case 2: + ASSERT_TRUE(it->is_bool()); + EXPECT_EQ(true, it->as_bool()); + break; + default: + FAIL() << "Array iterator exceeded bounds"; + return; + } + } +} +TEST(ParseArr, InvalidArr) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json_arr), rabbit::parse_error); +} + +TEST(ParseStr, ValidStr) { + rabbit::document doc; + ASSERT_NO_THROW(doc.parse(valid_json_str)); + ASSERT_TRUE(doc.is_object()); + ASSERT_TRUE(doc.has("value")); + ASSERT_TRUE(doc["value"].is_string()); + EXPECT_EQ("123", doc["value"].as_string()); +} + +TEST(ParseStr, InvalidStr) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json_str), rabbit::parse_error); +} + +TEST(ParseInt, ValidInt) { + rabbit::document doc; + ASSERT_NO_THROW(doc.parse(valid_json_int)); + ASSERT_TRUE(doc.is_object()); + ASSERT_TRUE(doc.has("value")); + ASSERT_TRUE(doc["value"].is_int()); + EXPECT_EQ(123, doc["value"].as_int()); +} + +TEST(ParseInt, InvalidInt) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json_int), rabbit::parse_error); +} + +TEST(Parse, InvalidJson) { + rabbit::document doc; + EXPECT_THROW(doc.parse(invalid_json), rabbit::parse_error); +} + +int main(int argc, char** argv) { + testing::internal::CaptureStdout(); + ::testing::InitGoogleTest(&argc, argv); + int failures = RUN_ALL_TESTS(); + std::string output = testing::internal::GetCapturedStdout(); + std::cout << output << std::endl; + std::cout << (failures ? "Not all tests passed." : "All tests passed") + << std::endl; + WriteFileIfSpecified(argv, argv + argc, "-fout", output); + + return failures; +} diff --git a/sql-odbc/src/autoconf.h.in b/sql-odbc/src/autoconf.h.in new file mode 100644 index 0000000000..5abb4109e0 --- /dev/null +++ b/sql-odbc/src/autoconf.h.in @@ -0,0 +1,2 @@ +#cmakedefine AUTOCONF_ENABLE +#cmakedefine AUTOCONF_STRING "@AUTOCONF_STRING@" diff --git a/sql-odbc/src/gtest/googletest-download.cmake b/sql-odbc/src/gtest/googletest-download.cmake new file mode 100644 index 0000000000..a7ddc10a0e --- /dev/null +++ b/sql-odbc/src/gtest/googletest-download.cmake @@ -0,0 +1,35 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +# code copied from https://crascit.com/2015/07/25/cmake-gtest/ +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) + +project(googletest-download NONE) + +include(ExternalProject) + +ExternalProject_Add( + googletest + SOURCE_DIR "@GOOGLETEST_DOWNLOAD_ROOT@/googletest-src" + BINARY_DIR "@GOOGLETEST_DOWNLOAD_ROOT@/googletest-build" + GIT_REPOSITORY + https://github.com/google/googletest.git + GIT_TAG + release-1.10.0 + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) \ No newline at end of file diff --git a/sql-odbc/src/gtest/googletest.cmake b/sql-odbc/src/gtest/googletest.cmake new file mode 100644 index 0000000000..5ac79e7dfc --- /dev/null +++ b/sql-odbc/src/gtest/googletest.cmake @@ -0,0 +1,48 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + + +# the following code to fetch googletest +# is inspired by and adapted after https://crascit.com/2015/07/25/cmake-gtest/ +# download and unpack googletest at configure time + +macro(fetch_googletest _download_module_path _download_root) + set(GOOGLETEST_DOWNLOAD_ROOT ${_download_root}) + configure_file( + ${_download_module_path}/googletest-download.cmake + ${_download_root}/CMakeLists.txt + @ONLY + ) + unset(GOOGLETEST_DOWNLOAD_ROOT) + + execute_process( + COMMAND + "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY + ${_download_root} + ) + execute_process( + COMMAND + "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY + ${_download_root} + ) + + # adds the targers: gtest, gtest_main, gmock, gmock_main + add_subdirectory( + ${_download_root}/googletest-src + ${_download_root}/googletest-build + ) +endmacro() \ No newline at end of file diff --git a/sql-odbc/src/installer/CMakeLists.txt b/sql-odbc/src/installer/CMakeLists.txt new file mode 100644 index 0000000000..712653058c --- /dev/null +++ b/sql-odbc/src/installer/CMakeLists.txt @@ -0,0 +1,118 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +include(InstallRequiredSystemLibraries) +include(CPackComponent) + +set(CMAKE_INSTALL_PREFIX ${INSTALL_ROOT}) + +# General package info +set(CPACK_PACKAGE_NAME "Open Distro for Elasticsearch SQL ODBC Driver ${BITNESS}-bit") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Open Distro for Elasticsearch SQL ODBC Driver ${BITNESS}-bit") +set(CPACK_PACKAGE_VENDOR "Amazon") +set(CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_NAME}") +set(CPACK_SYSTEM_NAME "${CMAKE_SYSTEM_NAME}") +set(CPACK_PACKAGE_VERSION "${DRIVER_PACKAGE_VERSION}") + +# OS-specific package info +if(WIN32) + # Set generator to WIX + set(CPACK_GENERATOR "WIX") + + # This is a unique id for the installer - required for Windows + # Generated at https://www.guidgen.com/ + set(CPACK_WIX_UPGRADE_GUID "2D325BD7-1176-40E8-8AB8-C52DD2F7B792") + + # The Icon shown next to the program name in Add/Remove programs + set(CPACK_WIX_PRODUCT_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icon.ico") + + # The bitmap will appear at the top of all installer pages other than the welcome and completion dialogs + set(CPACK_WIX_UI_BANNER "${CMAKE_CURRENT_SOURCE_DIR}/banner.bmp") + + # Background bitmap used on the welcome and completion dialogs + set(CPACK_WIX_UI_DIALOG "${CMAKE_CURRENT_SOURCE_DIR}/dialog.bmp") + + # This XML file is used for registry setup + set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_SOURCE_DIR}/patch.xml") + + # CPack doesn't allow extensionless licenses, need to make a copy with an extension, .txt is appropriate + configure_file("${PROJECT_ROOT}/LICENSE" "${PROJECT_ROOT}/LICENSE.txt" COPYONLY) + set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_ROOT}/LICENSE.txt") +else() + set(CPACK_GENERATOR "productbuild") + + # This script will be run once the Driver component has finished installing. + set(CPACK_POSTFLIGHT_DRIVER_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/postinstall") + + # The productbuild generator copies files from this directory + set(CPACK_PRODUCTBUILD_RESOURCES_DIR "${CMAKE_CURRENT_SOURCE_DIR}/Resources") + + # Background setup to Distribution XML + set(CPACK_PRODUCTBUILD_BACKGROUND "background.bmp") + set(CPACK_PRODUCTBUILD_BACKGROUND_ALIGNMENT "bottomleft") + set(CPACK_PRODUCTBUILD_BACKGROUND_SCALING "none") + + # Background setup for the Dark Aqua theme to Distribution XML + set(CPACK_PRODUCTBUILD_BACKGROUND_DARKAQUA "background_darkaqua.bmp") + set(CPACK_PRODUCTBUILD_BACKGROUND_DARKAQUA_ALIGNMENT "bottomleft") + set(CPACK_PRODUCTBUILD_BACKGROUND_DARKAQUA_SCALING "none") + + # CPack doesn't allow extensionless licenses, need to make a copy with an extension, .txt is appropriate + configure_file("${PROJECT_ROOT}/LICENSE" "${PROJECT_ROOT}/LICENSE.txt" COPYONLY) + set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_ROOT}/LICENSE.txt") + set(CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/Resources/README.txt") + set(CPACK_RESOURCE_FILE_WELCOME "${CMAKE_CURRENT_SOURCE_DIR}/Resources/Welcome.txt") +endif() + +# Set up components for installer +cpack_add_component(Docs + DISPLAY_NAME "Documentation" + DESCRIPTION "Documentation about Open Distro for Elasticsearch SQL ODBC Driver" +) +cpack_add_component(Driver + DISPLAY_NAME "Driver" + DESCRIPTION "Library files for running the Open Distro for Elasticsearch SQL ODBC Driver" + REQUIRED +) +cpack_add_component(Resources + DISPLAY_NAME "Resources" + DESCRIPTION "Resources for Open Distro for Elasticsearch SQL ODBC Driver" +) + +# Install driver files +install(TARGETS odfesqlodbc DESTINATION bin COMPONENT "Driver") +# TODO: look into DSN Installer failure +# if(APPLE) +# install(FILES "${PROJECT_ROOT}/bin64/dsn_installer" DESTINATION bin COMPONENT "Driver") +# install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/remove-odfe-dsn.sh" DESTINATION bin COMPONENT "Driver") +# endif() + +# Install documentation files +install(FILES "${PROJECT_ROOT}/README.md" DESTINATION doc COMPONENT "Docs") +install(FILES "${PROJECT_ROOT}/LICENSE" DESTINATION doc COMPONENT "Docs") +install(FILES "${PROJECT_ROOT}/THIRD-PARTY" DESTINATION doc COMPONENT "Docs") + +# Install resource files +install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/Resources/odfe_sql_odbc.tdc" DESTINATION resources COMPONENT "Resources") + +# Install AWS dependencies +if(WIN32) + install(FILES "${PROJECT_ROOT}/sdk-build${BITNESS}/bin/Release/aws-c-common.dll" DESTINATION bin COMPONENT "Driver") + install(FILES "${PROJECT_ROOT}/sdk-build${BITNESS}/bin/Release/aws-c-event-stream.dll" DESTINATION bin COMPONENT "Driver") + install(FILES "${PROJECT_ROOT}/sdk-build${BITNESS}/bin/Release/aws-checksums.dll" DESTINATION bin COMPONENT "Driver") + install(FILES "${PROJECT_ROOT}/sdk-build${BITNESS}/bin/Release/aws-cpp-sdk-core.dll" DESTINATION bin COMPONENT "Driver") +endif() + +include(CPack) diff --git a/sql-odbc/src/installer/Resources/README.txt b/sql-odbc/src/installer/Resources/README.txt new file mode 100644 index 0000000000..65afeb49ee --- /dev/null +++ b/sql-odbc/src/installer/Resources/README.txt @@ -0,0 +1,19 @@ +All files are available in '/usr/local/lib/odfe-sql-odbc' after installation. + +To setup a connection, you can use DSN to store your data source connection information, +1. Open 'iODBC Data Source Administrator'. +2. Go to 'User DSN'. +3. Select 'ODFE SQL ODBC DSN' and click on 'Configure'. +4. Update the connection string values. For the list of all supported options, check '/usr/local/lib/odfe-sql-odbc/doc/README.md'. +5. Click 'Ok' to save changes. + +If using with ODBC compatible BI tools, refer to the tool documentation on configuring a new ODBC driver. The typical requirement is to make the tool aware of the location of the driver library file and then use it to setup database (i.e Elasticsearch) connections. + +For example, if you want to use Tableau with Elasticsearch Server, +1. Open 'Tableau'. +2. Click on 'Other Databases (ODBC)'. +3. Select 'ODFE SQL ODBC DSN' from the DSN list or 'ODFE SQL ODBC Driver' from the driver list. If using driver, you need to enter connection string values. +4. Click on 'Connect'. All connection attributes will be retrived. +5. Click on 'Sign In'. You will be successfully connected to elasticsearch server. + +For more details, check 'https://github.com/opendistro-for-elasticsearch/sql-odbc'. \ No newline at end of file diff --git a/sql-odbc/src/installer/Resources/Welcome.txt b/sql-odbc/src/installer/Resources/Welcome.txt new file mode 100644 index 0000000000..8e9d0c1bcf --- /dev/null +++ b/sql-odbc/src/installer/Resources/Welcome.txt @@ -0,0 +1 @@ +Open Distro for Elasticsearch SQL ODBC is a read-only ODBC driver for connecting to Open Distro for Elasticsearch SQL support. \ No newline at end of file diff --git a/sql-odbc/src/installer/Resources/background.bmp b/sql-odbc/src/installer/Resources/background.bmp new file mode 100644 index 0000000000..f371a5a0ae Binary files /dev/null and b/sql-odbc/src/installer/Resources/background.bmp differ diff --git a/sql-odbc/src/installer/Resources/background_darkaqua.bmp b/sql-odbc/src/installer/Resources/background_darkaqua.bmp new file mode 100644 index 0000000000..d179819510 Binary files /dev/null and b/sql-odbc/src/installer/Resources/background_darkaqua.bmp differ diff --git a/sql-odbc/src/installer/Resources/odfe_sql_odbc.tdc b/sql-odbc/src/installer/Resources/odfe_sql_odbc.tdc new file mode 100644 index 0000000000..8603f7149e --- /dev/null +++ b/sql-odbc/src/installer/Resources/odfe_sql_odbc.tdc @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/sql-odbc/src/installer/banner.bmp b/sql-odbc/src/installer/banner.bmp new file mode 100644 index 0000000000..27d966883a Binary files /dev/null and b/sql-odbc/src/installer/banner.bmp differ diff --git a/sql-odbc/src/installer/dialog.bmp b/sql-odbc/src/installer/dialog.bmp new file mode 100644 index 0000000000..502ccafff6 Binary files /dev/null and b/sql-odbc/src/installer/dialog.bmp differ diff --git a/sql-odbc/src/installer/icon.ico b/sql-odbc/src/installer/icon.ico new file mode 100644 index 0000000000..0c71118661 Binary files /dev/null and b/sql-odbc/src/installer/icon.ico differ diff --git a/sql-odbc/src/installer/patch.xml b/sql-odbc/src/installer/patch.xml new file mode 100644 index 0000000000..4305cc4acd --- /dev/null +++ b/sql-odbc/src/installer/patch.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/sql-odbc/src/installer/postinstall b/sql-odbc/src/installer/postinstall new file mode 100644 index 0000000000..aed11b42c0 --- /dev/null +++ b/sql-odbc/src/installer/postinstall @@ -0,0 +1,27 @@ +#!/bin/bash + +PKG_INSTALL_DIR=/Applications +FINAL_INSTALL_DIR=/usr/local/lib/odfe-sql-odbc + +# Remove install directory if it already exists +if [ -d "${FINAL_INSTALL_DIR}" ]; then + # Fail if FINAL_INSTALL_DIR is not set for whatever reason + if [ -z ${FINAL_INSTALL_DIR} ]; then exit 1; fi + rm -rf ${FINAL_INSTALL_DIR} +fi + +# Move PKG installed folders to intended install directory +mkdir -p ${FINAL_INSTALL_DIR} +mv ${PKG_INSTALL_DIR}/bin ${FINAL_INSTALL_DIR}/bin +mv ${PKG_INSTALL_DIR}/doc ${FINAL_INSTALL_DIR}/doc +mv ${PKG_INSTALL_DIR}/resources ${FINAL_INSTALL_DIR}/resources + +# TODO: look into why DSN installer is not working for fresh Mac install +# Current issue: "General installer error" when adding driver entry + +# Run DSN installer to configurate driver and DSN for system for easy setup. +# chmod a+x ${FINAL_INSTALL_DIR}/bin/dsn_installer +# chmod a+x ${FINAL_INSTALL_DIR}/bin/remove-odfe-dsn.sh +# echo "I can write to this file" > /tmp/dsn_installer.log +# ${FINAL_INSTALL_DIR}/bin/dsn_installer ${FINAL_INSTALL_DIR}/bin/ >> /tmp/dsn_installer.log +# echo "After DSN Installer finishes" >> /tmp/dsn_installer.log diff --git a/sql-odbc/src/installer/remove-odfe-dsn.sh b/sql-odbc/src/installer/remove-odfe-dsn.sh new file mode 100644 index 0000000000..ecf7121f5f --- /dev/null +++ b/sql-odbc/src/installer/remove-odfe-dsn.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +echo "This script will only remove the default DSN and Driver entries from your ODBC configuration." +echo "You will be responsible for removing installed files from the system." +if [[ $EUID -ne 0 ]]; then + echo "ERROR: This script must be run as root" + exit 1 +fi + +# check for "Yes" +while true; do + read -p "Do you want to continue? (Y/y) " yn + case $yn in + [Yy]* ) break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac +done + +# Run dsn_installer uninstall +${BASH_SOURCE%/*}/dsn_installer uninstall +if [ $? -ne 0 ]; then + echo "Error while removing DSN and Driver entries." +else + echo "DSN and Driver entries have been removed successfully." +fi diff --git a/sql-odbc/src/modules/code-coverage.cmake b/sql-odbc/src/modules/code-coverage.cmake new file mode 100644 index 0000000000..2f21d087b0 --- /dev/null +++ b/sql-odbc/src/modules/code-coverage.cmake @@ -0,0 +1,610 @@ +# +# Copyright (C) 2018-2020 by George Cave - gcave@stablecoder.ca +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +# USAGE: To enable any code coverage instrumentation/targets, the single CMake +# option of `CODE_COVERAGE` needs to be set to 'ON', either by GUI, ccmake, or +# on the command line. +# +# From this point, there are two primary methods for adding instrumentation to +# targets: 1 - A blanket instrumentation by calling `add_code_coverage()`, where +# all targets in that directory and all subdirectories are automatically +# instrumented. 2 - Per-target instrumentation by calling +# `target_code_coverage()`, where the target is given and thus only +# that target is instrumented. This applies to both libraries and executables. +# +# To add coverage targets, such as calling `make ccov` to generate the actual +# coverage information for perusal or consumption, call +# `target_code_coverage()` on an *executable* target. +# +# Example 1: All targets instrumented +# +# In this case, the coverage information reported will will be that of the +# `theLib` library target and `theExe` executable. +# +# 1a: Via global command +# +# ~~~ +# add_code_coverage() # Adds instrumentation to all targets +# +# add_library(theLib lib.cpp) +# +# add_executable(theExe main.cpp) +# target_link_libraries(theExe PRIVATE theLib) +# target_code_coverage(theExe) # As an executable target, adds the 'ccov-theExe' target (instrumentation already added via global anyways) for generating code coverage reports. +# ~~~ +# +# 1b: Via target commands +# +# ~~~ +# add_library(theLib lib.cpp) +# target_code_coverage(theLib) # As a library target, adds coverage instrumentation but no targets. +# +# add_executable(theExe main.cpp) +# target_link_libraries(theExe PRIVATE theLib) +# target_code_coverage(theExe) # As an executable target, adds the 'ccov-theExe' target and instrumentation for generating code coverage reports. +# ~~~ +# +# Example 2: Target instrumented, but with regex pattern of files to be excluded +# from report +# +# ~~~ +# add_executable(theExe main.cpp non_covered.cpp) +# target_code_coverage(theExe EXCLUDE non_covered.cpp test/*) # As an executable target, the reports will exclude the non-covered.cpp file, and any files in a test/ folder. +# ~~~ +# +# Example 3: Target added to the 'ccov' and 'ccov-all' targets +# +# ~~~ +# add_code_coverage_all_targets(EXCLUDE test/*) # Adds the 'ccov-all' target set and sets it to exclude all files in test/ folders. +# +# add_executable(theExe main.cpp non_covered.cpp) +# target_code_coverage(theExe AUTO ALL EXCLUDE non_covered.cpp test/*) # As an executable target, adds to the 'ccov' and ccov-all' targets, and the reports will exclude the non-covered.cpp file, and any files in a test/ folder. +# ~~~ + +# Options +option( + CODE_COVERAGE + "Builds targets with code coverage instrumentation. (Requires GCC or Clang)" + OFF) + +# Programs +find_program(LLVM_COV_PATH llvm-cov) +find_program(LLVM_PROFDATA_PATH llvm-profdata) +find_program(LCOV_PATH lcov) +find_program(GENHTML_PATH genhtml) +# Hide behind the 'advanced' mode flag for GUI/ccmake +mark_as_advanced( + FORCE + LLVM_COV_PATH + LLVM_PROFDATA_PATH + LCOV_PATH + GENHTML_PATH) + +# Variables +set(CMAKE_COVERAGE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/ccov) + +# Common initialization/checks +if(CODE_COVERAGE AND NOT CODE_COVERAGE_ADDED) + set(CODE_COVERAGE_ADDED ON) + + # Common Targets + add_custom_target( + ccov-preprocessing + COMMAND ${CMAKE_COMMAND} -E make_directory + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY} + DEPENDS ccov-clean) + + if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" + OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") + # Messages + message(STATUS "Building with llvm Code Coverage Tools") + + if(NOT LLVM_COV_PATH) + message(FATAL_ERROR "llvm-cov not found! Aborting.") + else() + # Version number checking for 'EXCLUDE' compatibility + execute_process(COMMAND ${LLVM_COV_PATH} --version + OUTPUT_VARIABLE LLVM_COV_VERSION_CALL_OUTPUT) + string( + REGEX MATCH + "[0-9]+\\.[0-9]+\\.[0-9]+" + LLVM_COV_VERSION + ${LLVM_COV_VERSION_CALL_OUTPUT}) + + if(LLVM_COV_VERSION VERSION_LESS "7.0.0") + message( + WARNING + "target_code_coverage()/add_code_coverage_all_targets() 'EXCLUDE' option only available on llvm-cov >= 7.0.0" + ) + endif() + endif() + + # Targets + add_custom_target( + ccov-clean + COMMAND rm -f ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list + COMMAND rm -f ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/profraw.list) + + # Used to get the shared object file list before doing the main all- + # processing + add_custom_target( + ccov-libs + COMMAND ; + COMMENT "libs ready for coverage report.") + + elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" + OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + # Messages + message(STATUS "Building with lcov Code Coverage Tools") + + if(CMAKE_BUILD_TYPE) + string(TOUPPER ${CMAKE_BUILD_TYPE} upper_build_type) + if(NOT + ${upper_build_type} + STREQUAL + "DEBUG") + message( + WARNING + "Code coverage results with an optimized (non-Debug) build may be misleading" + ) + endif() + else() + message( + WARNING + "Code coverage results with an optimized (non-Debug) build may be misleading" + ) + endif() + if(NOT LCOV_PATH) + message(FATAL_ERROR "lcov not found! Aborting...") + endif() + if(NOT GENHTML_PATH) + message(FATAL_ERROR "genhtml not found! Aborting...") + endif() + + # Targets + add_custom_target(ccov-clean COMMAND ${LCOV_PATH} --directory + ${CMAKE_BINARY_DIR} --zerocounters) + + else() + message(FATAL_ERROR "Code coverage requires Clang or GCC. Aborting.") + endif() +endif() + +# Adds code coverage instrumentation to a library, or instrumentation/targets +# for an executable target. +# ~~~ +# EXECUTABLE ADDED TARGETS: +# GCOV/LCOV: +# ccov : Generates HTML code coverage report for every target added with 'AUTO' parameter. +# ccov-${TARGET_NAME} : Generates HTML code coverage report for the associated named target. +# ccov-all : Generates HTML code coverage report, merging every target added with 'ALL' parameter into a single detailed report. +# +# LLVM-COV: +# ccov : Generates HTML code coverage report for every target added with 'AUTO' parameter. +# ccov-report : Generates HTML code coverage report for every target added with 'AUTO' parameter. +# ccov-${TARGET_NAME} : Generates HTML code coverage report. +# ccov-report-${TARGET_NAME} : Prints to command line summary per-file coverage information. +# ccov-show-${TARGET_NAME} : Prints to command line detailed per-line coverage information. +# ccov-all : Generates HTML code coverage report, merging every target added with 'ALL' parameter into a single detailed report. +# ccov-all-report : Prints summary per-file coverage information for every target added with ALL' parameter to the command line. +# +# Required: +# TARGET_NAME - Name of the target to generate code coverage for. +# Optional: +# PUBLIC - Sets the visibility for added compile options to targets to PUBLIC instead of the default of PRIVATE. +# PUBLIC - Sets the visibility for added compile options to targets to INTERFACE instead of the default of PRIVATE. +# AUTO - Adds the target to the 'ccov' target so that it can be run in a batch with others easily. Effective on executable targets. +# ALL - Adds the target to the 'ccov-all' and 'ccov-all-report' targets, which merge several executable targets coverage data to a single report. Effective on executable targets. +# EXTERNAL - For GCC's lcov, allows the profiling of 'external' files from the processing directory +# COVERAGE_TARGET_NAME - For executables ONLY, changes the outgoing target name so instead of `ccov-${TARGET_NAME}` it becomes `ccov-${COVERAGE_TARGET_NAME}`. +# EXCLUDE - Excludes files of the patterns provided from coverage. **These do not copy to the 'all' targets.** +# OBJECTS - For executables ONLY, if the provided targets are shared libraries, adds coverage information to the output +# ARGS - For executables ONLY, appends the given arguments to the associated ccov-* executable call +# ~~~ +function(target_code_coverage TARGET_NAME) + # Argument parsing + set(options + AUTO + ALL + EXTERNAL + PUBLIC + INTERFACE) + set(single_value_keywords COVERAGE_TARGET_NAME) + set(multi_value_keywords EXCLUDE OBJECTS ARGS) + cmake_parse_arguments( + target_code_coverage + "${options}" + "${single_value_keywords}" + "${multi_value_keywords}" + ${ARGN}) + + # Set the visibility of target functions to PUBLIC, INTERFACE or default to + # PRIVATE. + if(target_code_coverage_PUBLIC) + set(TARGET_VISIBILITY PUBLIC) + elseif(target_code_coverage_INTERFACE) + set(TARGET_VISIBILITY INTERFACE) + else() + set(TARGET_VISIBILITY PRIVATE) + endif() + + if(NOT target_code_coverage_COVERAGE_TARGET_NAME) + # If a specific name was given, use that instead. + set(target_code_coverage_COVERAGE_TARGET_NAME ${TARGET_NAME}) + endif() + + if(CODE_COVERAGE) + + # Add code coverage instrumentation to the target's linker command + if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" + OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") + target_compile_options( + ${TARGET_NAME} + ${TARGET_VISIBILITY} + -fprofile-instr-generate + -fcoverage-mapping) + target_link_options( + ${TARGET_NAME} + ${TARGET_VISIBILITY} + -fprofile-instr-generate + -fcoverage-mapping) + elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" + OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + target_compile_options( + ${TARGET_NAME} + ${TARGET_VISIBILITY} + -fprofile-arcs + -ftest-coverage) + target_link_libraries(${TARGET_NAME} ${TARGET_VISIBILITY} gcov) + endif() + + # Targets + get_target_property(target_type ${TARGET_NAME} TYPE) + + # Add shared library to processing for 'all' targets + if(target_type STREQUAL "SHARED_LIBRARY" AND target_code_coverage_ALL) + if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" + OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") + add_custom_target( + ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND echo "-object=$" >> + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list + DEPENDS ccov-preprocessing ${TARGET_NAME}) + + if(NOT TARGET ccov-libs) + message( + FATAL_ERROR + "Calling target_code_coverage with 'ALL' must be after a call to 'add_code_coverage_all_targets'." + ) + endif() + + add_dependencies(ccov-libs + ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME}) + endif() + endif() + + # For executables add targets to run and produce output + if(target_type STREQUAL "EXECUTABLE") + if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" + OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") + + # If there are shared objects to also work with, generate the string to + # add them here + foreach(SO_TARGET ${target_code_coverage_OBJECTS}) + # Check to see if the target is a shared object + if(TARGET ${SO_TARGET}) + get_target_property(SO_TARGET_TYPE ${SO_TARGET} TYPE) + if(${SO_TARGET_TYPE} STREQUAL "SHARED_LIBRARY") + set(SO_OBJECTS ${SO_OBJECTS} -object=$) + endif() + endif() + endforeach() + + # Run the executable, generating raw profile data + add_custom_target( + ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND + LLVM_PROFILE_FILE=${target_code_coverage_COVERAGE_TARGET_NAME}.profraw + $ ${target_code_coverage_ARGS} + COMMAND echo "-object=$" >> + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list + COMMAND + echo + "${CMAKE_CURRENT_BINARY_DIR}/${target_code_coverage_COVERAGE_TARGET_NAME}.profraw " + >> ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/profraw.list + DEPENDS ccov-preprocessing ccov-libs ${TARGET_NAME}) + + # Merge the generated profile data so llvm-cov can process it + add_custom_target( + ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND + ${LLVM_PROFDATA_PATH} merge -sparse + ${target_code_coverage_COVERAGE_TARGET_NAME}.profraw -o + ${target_code_coverage_COVERAGE_TARGET_NAME}.profdata + DEPENDS ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME}) + + # Ignore regex only works on LLVM >= 7 + if(LLVM_COV_VERSION VERSION_GREATER_EQUAL "7.0.0") + foreach(EXCLUDE_ITEM ${target_code_coverage_EXCLUDE}) + set(EXCLUDE_REGEX ${EXCLUDE_REGEX} + -ignore-filename-regex='${EXCLUDE_ITEM}') + endforeach() + endif() + + # Print out details of the coverage information to the command line + add_custom_target( + ccov-show-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND + ${LLVM_COV_PATH} show $ ${SO_OBJECTS} + -instr-profile=${target_code_coverage_COVERAGE_TARGET_NAME}.profdata + -show-line-counts-or-regions ${EXCLUDE_REGEX} + DEPENDS ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME}) + + # Print out a summary of the coverage information to the command line + add_custom_target( + ccov-report-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND + ${LLVM_COV_PATH} report $ ${SO_OBJECTS} + -instr-profile=${target_code_coverage_COVERAGE_TARGET_NAME}.profdata + ${EXCLUDE_REGEX} + DEPENDS ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME}) + + # Generates HTML output of the coverage information for perusal + add_custom_target( + ccov-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND + ${LLVM_COV_PATH} show $ ${SO_OBJECTS} + -instr-profile=${target_code_coverage_COVERAGE_TARGET_NAME}.profdata + -show-line-counts-or-regions + -output-dir=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME} + -format="html" ${EXCLUDE_REGEX} + DEPENDS ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME}) + + elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" + OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + set(COVERAGE_INFO + "${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME}.info" + ) + + # Run the executable, generating coverage information + add_custom_target( + ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND $ ${target_code_coverage_ARGS} + DEPENDS ccov-preprocessing ${TARGET_NAME}) + + # Generate exclusion string for use + foreach(EXCLUDE_ITEM ${target_code_coverage_EXCLUDE}) + set(EXCLUDE_REGEX + ${EXCLUDE_REGEX} + --remove + ${COVERAGE_INFO} + '${EXCLUDE_ITEM}') + endforeach() + + if(EXCLUDE_REGEX) + set(EXCLUDE_COMMAND ${LCOV_PATH} ${EXCLUDE_REGEX} --output-file + ${COVERAGE_INFO}) + else() + set(EXCLUDE_COMMAND ;) + endif() + + if(NOT ${target_code_coverage_EXTERNAL}) + set(EXTERNAL_OPTION --no-external) + endif() + + # Capture coverage data + add_custom_target( + ccov-capture-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND ${CMAKE_COMMAND} -E remove ${COVERAGE_INFO} + COMMAND ${LCOV_PATH} --directory ${CMAKE_BINARY_DIR} --zerocounters + COMMAND $ ${target_code_coverage_ARGS} + COMMAND + ${LCOV_PATH} --directory ${CMAKE_BINARY_DIR} --base-directory + ${CMAKE_SOURCE_DIR} --capture ${EXTERNAL_OPTION} --output-file + ${COVERAGE_INFO} + COMMAND ${EXCLUDE_COMMAND} + DEPENDS ccov-preprocessing ${TARGET_NAME}) + + # Generates HTML output of the coverage information for perusal + add_custom_target( + ccov-${target_code_coverage_COVERAGE_TARGET_NAME} + COMMAND + ${GENHTML_PATH} -o + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME} + ${COVERAGE_INFO} + DEPENDS ccov-capture-${target_code_coverage_COVERAGE_TARGET_NAME}) + endif() + + add_custom_command( + TARGET ccov-${target_code_coverage_COVERAGE_TARGET_NAME} + POST_BUILD + COMMAND ; + COMMENT + "Open ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME}/index.html in your browser to view the coverage report." + ) + + # AUTO + if(target_code_coverage_AUTO) + if(NOT TARGET ccov) + add_custom_target(ccov) + endif() + add_dependencies(ccov ccov-${target_code_coverage_COVERAGE_TARGET_NAME}) + + if(NOT CMAKE_C_COMPILER_ID MATCHES "GNU" + OR NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU") + if(NOT TARGET ccov-report) + add_custom_target(ccov-report) + endif() + add_dependencies( + ccov-report + ccov-report-${target_code_coverage_COVERAGE_TARGET_NAME}) + endif() + endif() + + # ALL + if(target_code_coverage_ALL) + if(NOT TARGET ccov-all-processing) + message( + FATAL_ERROR + "Calling target_code_coverage with 'ALL' must be after a call to 'add_code_coverage_all_targets'." + ) + endif() + + add_dependencies(ccov-all-processing + ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME}) + endif() + endif() + endif() +endfunction() + +# Adds code coverage instrumentation to all targets in the current directory and +# any subdirectories. To add coverage instrumentation to only specific targets, +# use `target_code_coverage`. +function(add_code_coverage) + if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" + OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") + add_compile_options(-fprofile-instr-generate -fcoverage-mapping) + add_link_options(-fprofile-instr-generate -fcoverage-mapping) + elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" + OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + add_compile_options(-fprofile-arcs -ftest-coverage) + link_libraries(gcov) + endif() +endfunction() + +# Adds the 'ccov-all' type targets that calls all targets added via +# `target_code_coverage` with the `ALL` parameter, but merges all the coverage +# data from them into a single large report instead of the numerous smaller +# reports. Also adds the ccov-all-capture Generates an all-merged.info file, for +# use with coverage dashboards (e.g. codecov.io, coveralls). +# ~~~ +# Optional: +# EXCLUDE - Excludes files of the regex patterns provided from coverage. +# ~~~ +function(add_code_coverage_all_targets) + # Argument parsing + set(multi_value_keywords EXCLUDE) + cmake_parse_arguments( + add_code_coverage_all_targets + "" + "" + "${multi_value_keywords}" + ${ARGN}) + + if(CODE_COVERAGE) + if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" + OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") + + # Merge the profile data for all of the run executables + add_custom_target( + ccov-all-processing + COMMAND + ${LLVM_PROFDATA_PATH} merge -o + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata -sparse `cat + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/profraw.list`) + + # Regex exclude only available for LLVM >= 7 + if(LLVM_COV_VERSION VERSION_GREATER_EQUAL "7.0.0") + foreach(EXCLUDE_ITEM ${add_code_coverage_all_targets_EXCLUDE}) + set(EXCLUDE_REGEX ${EXCLUDE_REGEX} + -ignore-filename-regex='${EXCLUDE_ITEM}') + endforeach() + endif() + + # Print summary of the code coverage information to the command line + add_custom_target( + ccov-all-report + COMMAND + ${LLVM_COV_PATH} report `cat + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list` + -instr-profile=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata + ${EXCLUDE_REGEX} + DEPENDS ccov-all-processing) + + # Export coverage information so continuous integration tools (e.g. + # Jenkins) can consume it + add_custom_target( + ccov-all-export + COMMAND + ${LLVM_COV_PATH} export `cat + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list` + -instr-profile=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata + -format="text" ${EXCLUDE_REGEX} > + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/coverage.json + DEPENDS ccov-all-processing) + + # Generate HTML output of all added targets for perusal + add_custom_target( + ccov-all + COMMAND + ${LLVM_COV_PATH} show `cat + ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list` + -instr-profile=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata + -show-line-counts-or-regions + -output-dir=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged + -format="html" ${EXCLUDE_REGEX} + DEPENDS ccov-all-processing) + + elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" + OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + set(COVERAGE_INFO "${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.info") + + # Nothing required for gcov + add_custom_target(ccov-all-processing COMMAND ;) + + # Exclusion regex string creation + set(EXCLUDE_REGEX) + foreach(EXCLUDE_ITEM ${add_code_coverage_all_targets_EXCLUDE}) + set(EXCLUDE_REGEX + ${EXCLUDE_REGEX} + --remove + ${COVERAGE_INFO} + '${EXCLUDE_ITEM}') + endforeach() + + if(EXCLUDE_REGEX) + set(EXCLUDE_COMMAND ${LCOV_PATH} ${EXCLUDE_REGEX} --output-file + ${COVERAGE_INFO}) + else() + set(EXCLUDE_COMMAND ;) + endif() + + # Capture coverage data + add_custom_target( + ccov-all-capture + COMMAND ${CMAKE_COMMAND} -E remove ${COVERAGE_INFO} + COMMAND ${LCOV_PATH} --directory ${CMAKE_BINARY_DIR} --capture + --output-file ${COVERAGE_INFO} + COMMAND ${EXCLUDE_COMMAND} + DEPENDS ccov-all-processing) + + # Generates HTML output of all targets for perusal + add_custom_target( + ccov-all + COMMAND ${GENHTML_PATH} -o ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged + ${COVERAGE_INFO} + DEPENDS ccov-all-capture) + + endif() + + add_custom_command( + TARGET ccov-all + POST_BUILD + COMMAND ; + COMMENT + "Open ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged/index.html in your browser to view the coverage report." + ) + endif() +endfunction() \ No newline at end of file diff --git a/sql-odbc/src/odfeenlist/CMakeLists.txt b/sql-odbc/src/odfeenlist/CMakeLists.txt new file mode 100644 index 0000000000..23103f8f04 --- /dev/null +++ b/sql-odbc/src/odfeenlist/CMakeLists.txt @@ -0,0 +1,34 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(odfeenlist) + +# Source files for odfeenlist +set(SOURCE_FILES msdtc_enlist.cpp) +set(HEADER_FILES elasticenlist.h) + +# Generate static lib +add_library(odfeenlist STATIC ${SOURCE_FILES} ${HEADER_FILES}) + +# Library dependencies +target_link_libraries(odfeenlist kernel32 advapi32 Delayimp XOleHlp) + +# Platform specific library dependencies +if(WIN32) + # Windows specifiec + target_link_libraries(odfeenlist wsock32 winspool user32 gdi32 comdlg32 shell32 uuid) +else() + # Unix specific +endif() \ No newline at end of file diff --git a/sql-odbc/src/odfeenlist/elasticenlist.h b/sql-odbc/src/odfeenlist/elasticenlist.h new file mode 100644 index 0000000000..0e33bf9394 --- /dev/null +++ b/sql-odbc/src/odfeenlist/elasticenlist.h @@ -0,0 +1,50 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ESENLIST_H__ +#define __ESENLIST_H__ + +#ifdef __cplusplus +extern "C" { +#endif +#ifdef WIN32 +#ifdef _HANDLE_ENLIST_IN_DTC_ + +#undef DLL_DECLARE +#ifdef _ESENLIST_FUNCS_IMPLEMENT_ +#define DLL_DECLARE _declspec(dllexport) +#else +#ifdef _ESENLIST_FUNCS_IMPORT_ +#define DLL_DECLARE _declspec(dllimport) +#else +#define DLL_DECLARE +#endif /* _ESENLIST_FUNCS_IMPORT_ */ +#endif /* _ESENLIST_FUNCS_IMPLEMENT_ */ + +RETCODE EnlistInDtc(void *conn, void *pTra, int method); +RETCODE DtcOnDisconnect(void *); +RETCODE IsolateDtcConn(void *, BOOL continueConnection); +// for testing +DLL_DECLARE void *GetTransactionObject(HRESULT *hres); +DLL_DECLARE void ReleaseTransactionObject(void *); + +#endif /* _HANDLE_ENLIST_IN_DTC_ */ +#endif /* WIN32 */ + +#ifdef __cplusplus +} +#endif +#endif /* __ESENLIST_H__ */ diff --git a/sql-odbc/src/odfeenlist/msdtc_enlist.cpp b/sql-odbc/src/odfeenlist/msdtc_enlist.cpp new file mode 100644 index 0000000000..e3fade30c4 --- /dev/null +++ b/sql-odbc/src/odfeenlist/msdtc_enlist.cpp @@ -0,0 +1,1295 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifdef _HANDLE_ENLIST_IN_DTC_ + +#undef _MEMORY_DEBUG_ +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0400 +#endif /* _WIN32_WINNT */ + +#define WIN32_LEAN_AND_MEAN +#include +#include +/*#include */ +#define _ESDTC_FUNCS_IMPORT_ +#include "connexp.h" + +/*#define _SLEEP_FOR_TEST_*/ +#include +#include +#include +#include +#include +#ifndef WIN32 +#include +#endif /* WIN32 */ + +#include +#define _MYLOG_FUNCS_IMPORT_ +#include "mylog.h" +#define _ESENLIST_FUNCS_IMPLEMENT_ +#include "esenlist.h" +#include "xalibname.h" + +#ifdef WIN32 +#ifndef snprintf +#define snprintf _snprintf +#endif /* snprintf */ +#endif /* WIN32 */ + +/* Define a type for defining a constant string expression */ +#ifndef CSTR +#define CSTR static const char *const +#endif /* CSTR */ + +EXTERN_C { + HINSTANCE s_hModule; /* Saved module handle. */ +} +/* This is where the Driver Manager attaches to this Driver */ +BOOL WINAPI DllMain(HANDLE hInst, ULONG ul_reason_for_call, LPVOID lpReserved) { + switch (ul_reason_for_call) { + case DLL_PROCESS_ATTACH: + s_hModule = (HINSTANCE)hInst; /* Save for dialog boxes */ + break; + case DLL_PROCESS_DETACH: + mylog("DETACHING esenlist\n"); + break; + } + return TRUE; +} + +/* + * A comment About locks used in this module + * + * the locks should be acquired with stronger to weaker order. + * + * 1:ELOCK -- the strongest per IAsyncES object lock + * When the *isolated* or *dtcconn* member of an IAsyncES object + * is changed, this lock should be held. + * While an IAsyncES object accesses a es_odbc connection, + * this lock should be held. + * + * 2:[CONN_CS] -- per es_odbc connection lock + * This lock would be held for a pretty long time while accessing + * the es_odbc connection assigned to an IAsyncES object. You + * can use the connecion safely by holding a ELOCK for the + * IAsyncES object because the assignment is ensured to be + * fixed while the ELOCK is held. + * + * 3:LIFELOCK -- a global lock to ensure the lives of IAsyncES objects + * While this lock is held, IAsyncES objects would never die. + * + * 4:SLOCK -- the short term per IAsyncES object lock + * When any member of an IAsyncES object is changed, this lock + * should be held. + */ + +// #define _LOCK_DEBUG_ +static class INIT_CRIT { + public: + CRITICAL_SECTION life_cs; /* for asdum member of ConnectionClass */ + INIT_CRIT() { + InitializeCriticalSection(&life_cs); + } + ~INIT_CRIT() { + DeleteCriticalSection(&life_cs); + } +} init_crit; +#define LIFELOCK_ACQUIRE EnterCriticalSection(&init_crit.life_cs) +#define LIFELOCK_RELEASE LeaveCriticalSection(&init_crit.life_cs) + +/* + * Some helper macros about connection handling. + */ +#define CONN_CS_ACQUIRE(conn) EsDtc_lock_cntrl((conn), TRUE, FALSE) +#define TRY_CONN_CS_ACQUIRE(conn) EsDtc_lock_cntrl((conn), TRUE, TRUE) +#define CONN_CS_RELEASE(conn) EsDtc_lock_cntrl((conn), FALSE, FALSE) + +#define CONN_IS_IN_TRANS(conn) EsDtc_get_property((conn), inTrans) + +static const char *XidToText(const XID &xid, char *rtext) { + int glen = xid.gtrid_length, blen = xid.bqual_length; + int i, j; + + for (i = 0, j = 0; i < glen; i++, j += 2) + sprintf(rtext + j, "%02x", (unsigned char)xid.data[i]); + strcat(rtext, "-"); + j++; + for (; i < glen + blen; i++, j += 2) + sprintf(rtext + j, "%02x", (unsigned char)xid.data[i]); + return rtext; +} + +static LONG g_cComponents = 0; +static LONG g_cServerLocks = 0; + +// +// �ȉ���ITransactionResourceAsync�I�u�W�F�N�g�͔C�ӂ̃X���b�h���� +// ���R�ɃA�N�Z�X�”\�Ȃ悤�Ɏ�������B�eRequest�̌��ʂ�Ԃ����߂� +// �g�p����ITransactionEnlistmentAsync�C���^�[�t�F�C�X�����̂悤�� +// ��������Ă���i�Ǝv����A���L�Q�Ɓj�̂ŌĂяo����COM�̃A�p�[ +// �g�����g���ӎ�����(CoMarshalInterThreadInterfaceInStream/CoGetIn +// terfaceAndReleaseStream���g�p����j�K�v�͂Ȃ��B +// ����DLL���Ŏg�p����ITransactionResourceAsync��ITransactionEnlist +// mentAsync�̃C���^�[�t�F�C�X�|�C���^�[�͔C�ӂ̃X���b�h���璼�ڎg�p +// ���邱�Ƃ��ł���B +// + +// OLE Transactions Standard +// +// OLE Transactions is the Microsoft interface standard for transaction +// management. Applications use OLE Transactions-compliant interfaces to +// initiate, commit, abort, and inquire about transactions. Resource +// managers use OLE Transactions-compliant interfaces to enlist in +// transactions, to propagate transactions to other resource managers, +// to propagate transactions from process to process or from system to +// system, and to participate in the two-phase commit protocol. +// +// The Microsoft DTC system implements most OLE Transactions-compliant +// objects, interfaces, and methods. Resource managers that wish to use +// OLE Transactions must implement some OLE Transactions-compliant objects, +// interfaces, and methods. +// +// The OLE Transactions specification is based on COM but it differs in the +// following respects: +// +// OLE Transactions objects cannot be created using the COM CoCreate APIs. +// References to OLE Transactions objects are always direct. Therefore, +// no proxies or stubs are created for inter-apartment, inter-process, +// or inter-node calls and OLE Transactions references cannot be marshaled +// using standard COM marshaling. +// All references to OLE Transactions objects and their sinks are completely +// free threaded and cannot rely upon COM concurrency control models. +// For example, you cannot pass a reference to an IResourceManagerSink +// interface on a single-threaded apartment and expect the callback to occur +// only on the same single-threaded apartment. + +class IAsyncES : public ITransactionResourceAsync { + private: + IDtcToXaHelperSinglePipe *helper; + DWORD RMCookie; + void *dtcconn; + LONG refcnt; + CRITICAL_SECTION as_spin; // to make this object Both + CRITICAL_SECTION as_exec; // to make this object Both + XID xid; + bool isolated; + bool prepared; + bool done; + bool abort; + HANDLE eThread[3]; + bool eFin[3]; + bool requestAccepted; + HRESULT prepare_result; + HRESULT commit_result; +#ifdef _LOCK_DEBUG_ + int spin_cnt; + int cs_cnt; +#endif /* _LOCK_DEBUG_ */ + + public: + enum { PrepareExec = 0, CommitExec, AbortExec }; + + ITransactionEnlistmentAsync *enlist; + + HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject); + ULONG STDMETHODCALLTYPE AddRef(void); + ULONG STDMETHODCALLTYPE Release(void); + + HRESULT STDMETHODCALLTYPE PrepareRequest(BOOL fRetaining, DWORD grfRM, + BOOL fWantMoniker, + BOOL fSinglePhase); + HRESULT STDMETHODCALLTYPE CommitRequest(DWORD grfRM, XACTUOW *pNewUOW); + HRESULT STDMETHODCALLTYPE AbortRequest(BOID *pboidReason, BOOL fRetaining, + XACTUOW *pNewUOW); + HRESULT STDMETHODCALLTYPE TMDown(void); + + IAsyncES(); + void SetHelper(IDtcToXaHelperSinglePipe *pHelper, DWORD dwRMCookie) { + helper = pHelper; + RMCookie = dwRMCookie; + } + + HRESULT RequestExec(DWORD type, HRESULT res); + HRESULT ReleaseConnection(void); + void SetConnection(void *sconn) { + SLOCK_ACQUIRE(); + dtcconn = sconn; + SLOCK_RELEASE(); + } + void SetXid(const XID *ixid) { + SLOCK_ACQUIRE(); + xid = *ixid; + SLOCK_RELEASE(); + } + void *separateXAConn(bool spinAcquired, bool continueConnection); + bool CloseThread(DWORD type); + + private: + ~IAsyncES(); + void SLOCK_ACQUIRE() { + EnterCriticalSection(&as_spin); + } + void SLOCK_RELEASE() { + LeaveCriticalSection(&as_spin); + } + void ELOCK_ACQUIRE() { + EnterCriticalSection(&as_exec); + } + void ELOCK_RELEASE() { + LeaveCriticalSection(&as_exec); + } + void *getLockedXAConn(void); + void *generateXAConn(bool spinAcquired); + void *isolateXAConn(bool spinAcquired, bool continueConnection); + void SetPrepareResult(HRESULT res) { + SLOCK_ACQUIRE(); + prepared = true; + prepare_result = res; + SLOCK_RELEASE(); + } + void SetDone(HRESULT); + void Wait_pThread(bool slock_hold); + void Wait_cThread(bool slock_hold, bool once); +}; + +IAsyncES::IAsyncES(void) + : helper(NULL), + RMCookie(0), + enlist(NULL), + dtcconn(NULL), + refcnt(1), + isolated(false), + done(false), + abort(false), + prepared(false), + requestAccepted(false) { + InterlockedIncrement(&g_cComponents); + InitializeCriticalSection(&as_spin); + InitializeCriticalSection(&as_exec); + eThread[0] = eThread[1] = eThread[2] = NULL; + eFin[0] = eFin[1] = eFin[2] = false; + memset(&xid, 0, sizeof(xid)); +#ifdef _LOCK_DEBUG_ + spin_cnt = 0; + cs_cnt = 0; +#endif /* _LOCK_DEBUG_ */ +} + +// +// invoked from *delete*. +// When entered ELOCK -> LIFELOCK -> SLOCK are held +// and they are released. +// +IAsyncES::~IAsyncES(void) { + void *fconn = NULL; + + if (dtcconn) { + if (isolated) + fconn = dtcconn; + EsDtc_set_async(dtcconn, NULL); + dtcconn = NULL; + } + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + if (fconn) { + mylog("IAsyncES Destructor is freeing the connection\n"); + EsDtc_free_connect(fconn); + } + DeleteCriticalSection(&as_spin); + ELOCK_RELEASE(); + DeleteCriticalSection(&as_exec); + InterlockedDecrement(&g_cComponents); +} +HRESULT STDMETHODCALLTYPE IAsyncES::QueryInterface(REFIID riid, + void **ppvObject) { + mylog("%p QueryInterface called\n", this); + if (riid == IID_IUnknown || riid == IID_ITransactionResourceAsync) { + *ppvObject = this; + AddRef(); + return S_OK; + } + *ppvObject = NULL; + return E_NOINTERFACE; +} +// +// acquire/releases SLOCK. +// +ULONG STDMETHODCALLTYPE IAsyncES::AddRef(void) { + mylog("%p->AddRef called\n", this); + SLOCK_ACQUIRE(); + refcnt++; + SLOCK_RELEASE(); + return refcnt; +} +// +// acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. +// +ULONG STDMETHODCALLTYPE IAsyncES::Release(void) { + mylog("%p->Release called refcnt=%d\n", this, refcnt); + SLOCK_ACQUIRE(); + refcnt--; + if (refcnt <= 0) { + SLOCK_RELEASE(); + ELOCK_ACQUIRE(); + LIFELOCK_ACQUIRE; + SLOCK_ACQUIRE(); + if (refcnt <= 0) { + const int refcnt_copy = refcnt; + mylog("delete %p\n", this); + delete this; + return refcnt_copy; + } else { + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + ELOCK_RELEASE(); + } + } else + SLOCK_RELEASE(); + return refcnt; +} + +// +// Acquire/release SLOCK. +// +void IAsyncES::Wait_pThread(bool slock_hold) { + mylog("Wait_pThread %d in\n", slock_hold); + HANDLE wThread; + int wait_idx = PrepareExec; + DWORD ret; + + if (!slock_hold) + SLOCK_ACQUIRE(); + while (NULL != (wThread = eThread[wait_idx]) && !eFin[wait_idx]) { + SLOCK_RELEASE(); + ret = WaitForSingleObject(wThread, 2000); + SLOCK_ACQUIRE(); + if (WAIT_TIMEOUT != ret) + eFin[wait_idx] = true; + } + if (!slock_hold) + SLOCK_RELEASE(); + mylog("Wait_pThread out\n"); +} + +// +// Acquire/releases SLOCK. +// +void IAsyncES::Wait_cThread(bool slock_hold, bool once) { + HANDLE wThread; + int wait_idx; + DWORD ret; + + mylog("Wait_cThread %d,%d in\n", slock_hold, once); + if (!slock_hold) + SLOCK_ACQUIRE(); + if (NULL != eThread[CommitExec]) + wait_idx = CommitExec; + else + wait_idx = AbortExec; + while (NULL != (wThread = eThread[wait_idx]) && !eFin[wait_idx]) { + SLOCK_RELEASE(); + ret = WaitForSingleObject(wThread, 2000); + SLOCK_ACQUIRE(); + if (WAIT_TIMEOUT != ret) + eFin[wait_idx] = true; + else if (once) + break; + } + if (!slock_hold) + SLOCK_RELEASE(); + mylog("Wait_cThread out\n"); +} + +/* Processing Prepare/Commit Request */ +typedef struct RequestPara { + DWORD type; + LPVOID lpr; + HRESULT res; +} RequestPara; + +// +// Acquire/releases LIFELOCK -> SLOCK. +// may acquire/release ELOCK. +// +void IAsyncES::SetDone(HRESULT res) { + LIFELOCK_ACQUIRE; + SLOCK_ACQUIRE(); + done = true; + if (E_FAIL == res || E_UNEXPECTED == res) + abort = true; + requestAccepted = true; + commit_result = res; + if (dtcconn) { + EsDtc_set_async(dtcconn, NULL); + if (isolated) { + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + ELOCK_ACQUIRE(); + if (dtcconn) { + mylog("Freeing isolated connection=%p\n", dtcconn); + EsDtc_free_connect(dtcconn); + SetConnection(NULL); + } + ELOCK_RELEASE(); + } else { + dtcconn = NULL; + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + } + } else { + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + } +} + +// +// Acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. +// +void *IAsyncES::generateXAConn(bool spinAcquired) { + mylog("generateXAConn isolated=%d dtcconn=%p\n", isolated, dtcconn); + if (!spinAcquired) + SLOCK_ACQUIRE(); + if (isolated || done) { + SLOCK_RELEASE(); + return dtcconn; + } + SLOCK_RELEASE(); + ELOCK_ACQUIRE(); + LIFELOCK_ACQUIRE; + SLOCK_ACQUIRE(); + if (dtcconn && !isolated && !done && prepared) { + void *sconn = dtcconn; + + dtcconn = EsDtc_isolate(sconn, useAnotherRoom); + isolated = true; + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + // EsDtc_connect(dtcconn); may be called in getLockedXAConn + } else { + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + } + ELOCK_RELEASE(); + return dtcconn; +} + +// +// Acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. +// +void *IAsyncES::isolateXAConn(bool spinAcquired, bool continueConnection) { + void *sconn; + + mylog("isolateXAConn isolated=%d dtcconn=%p\n", isolated, dtcconn); + if (!spinAcquired) + SLOCK_ACQUIRE(); + if (isolated || done || NULL == dtcconn) { + SLOCK_RELEASE(); + return dtcconn; + } + SLOCK_RELEASE(); + ELOCK_ACQUIRE(); + LIFELOCK_ACQUIRE; + SLOCK_ACQUIRE(); + if (isolated || done || NULL == dtcconn) { + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + ELOCK_RELEASE(); + return dtcconn; + } + sconn = dtcconn; + + dtcconn = + EsDtc_isolate(sconn, continueConnection ? 0 : disposingConnection); + + isolated = true; + SLOCK_RELEASE(); + LIFELOCK_RELEASE; + if (continueConnection) { + EsDtc_connect(sconn); + } + ELOCK_RELEASE(); + return dtcconn; +} + +// +// Acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. +// +void *IAsyncES::separateXAConn(bool spinAcquired, bool continueConnection) { + mylog("%s isolated=%d dtcconn=%p\n", __FUNCTION__, isolated, dtcconn); + if (!spinAcquired) + SLOCK_ACQUIRE(); + if (prepared) + return generateXAConn(true); + else + return isolateXAConn(true, continueConnection); +} + +// +// [when entered] +// ELOCK is held. +// +// Acquire/releases SLOCK. +// Try to acquire CONN_CS also. +// +// [on exit] +// ELOCK is kept held. +// If the return connection != NULL +// the CONN_CS lock for the connection is held. +// +void *IAsyncES::getLockedXAConn() { + SLOCK_ACQUIRE(); + while (!done && !isolated && NULL != dtcconn) { + /* + * Note that COMMIT/ROLLBACK PREPARED command should be + * issued outside the transaction. + */ + if (!prepared || !CONN_IS_IN_TRANS(dtcconn)) { + if (TRY_CONN_CS_ACQUIRE(dtcconn)) { + if (prepared && CONN_IS_IN_TRANS(dtcconn)) { + CONN_CS_RELEASE(dtcconn); + } else + break; + } + } + separateXAConn(true, true); + SLOCK_ACQUIRE(); // SLOCK was released by separateXAConn() + } + SLOCK_RELEASE(); + if (isolated && NULL != dtcconn) { + CONN_CS_ACQUIRE(dtcconn); + if (!EsDtc_get_property(dtcconn, connected)) + EsDtc_connect(dtcconn); + } + return dtcconn; +} + +// +// Acquire/release ELOCK -> SLOCK. +// +HRESULT IAsyncES::RequestExec(DWORD type, HRESULT res) { + HRESULT ret; + bool bReleaseEnlist = false; + void *econn; + char esxid[258]; + + mylog("%p->RequestExec type=%d conn=%p\n", this, type, dtcconn); + XidToText(xid, esxid); +#ifdef _SLEEP_FOR_TEST_ + /*Sleep(2000);*/ +#endif /* _SLEEP_FOR_TEST_ */ + ELOCK_ACQUIRE(); + switch (type) { + case PrepareExec: + if (done || NULL == dtcconn) { + res = E_UNEXPECTED; + break; + } + if (econn = getLockedXAConn(), NULL != econn) { + EsDtc_set_property(econn, inprogress, (void *)1); + if (E_FAIL == res) + EsDtc_one_phase_operation(econn, ABORT_GLOBAL_TRANSACTION); + else if (XACT_S_SINGLEPHASE == res) { + if (!EsDtc_one_phase_operation(econn, ONE_PHASE_COMMIT)) + res = E_FAIL; + } else { + if (!EsDtc_two_phase_operation(econn, PREPARE_TRANSACTION, + esxid)) + res = E_FAIL; + } + EsDtc_set_property(econn, inprogress, (void *)0); + CONN_CS_RELEASE(econn); + } + if (S_OK != res) { + SetDone(res); + bReleaseEnlist = true; + } + ret = enlist->PrepareRequestDone(res, NULL, NULL); + SetPrepareResult(res); + break; + case CommitExec: + Wait_pThread(false); + if (E_FAIL != res) { + econn = getLockedXAConn(); + if (econn) { + EsDtc_set_property(econn, inprogress, (void *)1); + if (!EsDtc_two_phase_operation(econn, COMMIT_PREPARED, + esxid)) + res = E_FAIL; + EsDtc_set_property(econn, inprogress, (void *)0); + CONN_CS_RELEASE(econn); + } + } + SetDone(res); + ret = enlist->CommitRequestDone(res); + bReleaseEnlist = true; + break; + case AbortExec: + Wait_pThread(false); + if (prepared && !done) { + econn = getLockedXAConn(); + if (econn) { + EsDtc_set_property(econn, inprogress, (void *)1); + if (!EsDtc_two_phase_operation(econn, ROLLBACK_PREPARED, + esxid)) + res = E_FAIL; + EsDtc_set_property(econn, inprogress, (void *)0); + CONN_CS_RELEASE(econn); + } + } + SetDone(res); + ret = enlist->AbortRequestDone(res); + bReleaseEnlist = true; + break; + default: + ret = -1; + } + if (bReleaseEnlist) { + helper->ReleaseRMCookie(RMCookie, TRUE); + enlist->Release(); + } + ELOCK_RELEASE(); + mylog("%p->Done ret=%d\n", this, ret); + return ret; +} + +// +// Acquire/releses SLOCK +// or [ELOCK -> LIFELOCK -> ] SLOCK. +// +HRESULT IAsyncES::ReleaseConnection(void) { + mylog("%p->ReleaseConnection\n", this); + + SLOCK_ACQUIRE(); + if (isolated || NULL == dtcconn) { + SLOCK_RELEASE(); + return SQL_SUCCESS; + } + Wait_pThread(true); + if (NULL != eThread[CommitExec] || NULL != eThread[AbortExec] + || requestAccepted) { + if (!done) + Wait_cThread(true, true); + } + if (!isolated && !done && dtcconn + && EsDtc_get_property(dtcconn, connected)) { + isolateXAConn(true, false); + } else + SLOCK_RELEASE(); + mylog("%p->ReleaseConnection exit\n", this); + return SQL_SUCCESS; +} + +EXTERN_C static unsigned WINAPI DtcRequestExec(LPVOID para); +EXTERN_C static void __cdecl ClosePrepareThread(LPVOID para); +EXTERN_C static void __cdecl CloseCommitThread(LPVOID para); +EXTERN_C static void __cdecl CloseAbortThread(LPVOID para); + +// +// Acquire/release [ELOCK -> ] SLOCK. +// +HRESULT STDMETHODCALLTYPE IAsyncES::PrepareRequest(BOOL fRetaining, DWORD grfRM, + BOOL fWantMoniker, + BOOL fSinglePhase) { + HRESULT ret, res; + RequestPara *reqp; + const DWORD reqtype = PrepareExec; + + mylog("%p PrepareRequest called grhRM=%d enl=%p\n", this, grfRM, enlist); + SLOCK_ACQUIRE(); + if (dtcconn && 0 != EsDtc_get_property(dtcconn, errorNumber)) + res = ret = E_FAIL; + else { + ret = S_OK; + if (fSinglePhase) { + res = XACT_S_SINGLEPHASE; + mylog("XACT is singlePhase\n"); + } else + res = S_OK; + } + SLOCK_RELEASE(); + ELOCK_ACQUIRE(); +#ifdef _SLEEP_FOR_TEST_ + Sleep(2000); +#endif /* _SLEEP_FOR_TEST_ */ + reqp = new RequestPara; + reqp->type = reqtype; + reqp->lpr = (LPVOID)this; + reqp->res = res; +#define DONT_CALL_RETURN_FROM_HERE ? ? ? + AddRef(); + HANDLE hThread = + (HANDLE)_beginthreadex(NULL, 0, DtcRequestExec, reqp, 0, NULL); + if (NULL == hThread) { + delete (reqp); + ret = E_FAIL; + } else { + SLOCK_ACQUIRE(); + eThread[reqtype] = hThread; + SLOCK_RELEASE(); + /* + * We call here _beginthread not _beginthreadex + * so as not to call CloseHandle() to clean up + * the thread. + */ + _beginthread(ClosePrepareThread, 0, (void *)this); + } + ELOCK_RELEASE(); + Release(); +#undef return + return ret; +} +// +// Acquire/release [ELOCK -> ] SLOCK. +// +HRESULT STDMETHODCALLTYPE IAsyncES::CommitRequest(DWORD grfRM, + XACTUOW *pNewUOW) { + HRESULT res = S_OK, ret = S_OK; + RequestPara *reqp; + const DWORD reqtype = CommitExec; + + mylog("%p CommitRequest called grfRM=%d enl=%p\n", this, grfRM, enlist); + + SLOCK_ACQUIRE(); + if (!prepared || done) + ret = E_UNEXPECTED; + else if (S_OK != prepare_result) + ret = E_UNEXPECTED; + SLOCK_RELEASE(); + if (S_OK != ret) + return ret; +#define DONT_CALL_RETURN_FROM_HERE ? ? ? + AddRef(); + ELOCK_ACQUIRE(); +#ifdef _SLEEP_FOR_TEST_ + Sleep(1000); +#endif /* _SLEEP_FOR_TEST_ */ + reqp = new RequestPara; + reqp->type = reqtype; + reqp->lpr = (LPVOID)this; + reqp->res = res; + enlist->AddRef(); + HANDLE hThread = + (HANDLE)_beginthreadex(NULL, 0, DtcRequestExec, reqp, 0, NULL); + if (NULL == hThread) { + delete (reqp); + enlist->Release(); + ret = E_FAIL; + } else { + SLOCK_ACQUIRE(); + eThread[reqtype] = hThread; + SLOCK_RELEASE(); + /* + * We call here _beginthread not _beginthreadex + * so as not to call CloseHandle() to clean up + * the thread. + */ + _beginthread(CloseCommitThread, 0, (void *)this); + } + mylog("CommitRequest ret=%d\n", ret); + requestAccepted = true; + ELOCK_RELEASE(); + Release(); +#undef return + return ret; +} +// +// Acquire/release [ELOCK -> ] SLOCK. +// +HRESULT STDMETHODCALLTYPE IAsyncES::AbortRequest(BOID *pboidReason, + BOOL fRetaining, + XACTUOW *pNewUOW) { + HRESULT res = S_OK, ret = S_OK; + RequestPara *reqp; + const DWORD reqtype = AbortExec; + + mylog("%p AbortRequest called\n", this); + SLOCK_ACQUIRE(); + if (done) + ret = E_UNEXPECTED; + else if (prepared && S_OK != prepare_result) + ret = E_UNEXPECTED; + SLOCK_RELEASE(); + if (S_OK != ret) + return ret; +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + AddRef(); + ELOCK_ACQUIRE(); + if (!prepared && dtcconn) { + EsDtc_set_property(dtcconn, inprogress, (void *)1); + EsDtc_one_phase_operation(dtcconn, ONE_PHASE_ROLLBACK); + EsDtc_set_property(dtcconn, inprogress, (void *)0); + } + reqp = new RequestPara; + reqp->type = reqtype; + reqp->lpr = (LPVOID)this; + reqp->res = res; + enlist->AddRef(); + HANDLE hThread = + (HANDLE)_beginthreadex(NULL, 0, DtcRequestExec, reqp, 0, NULL); + if (NULL == hThread) { + delete (reqp); + enlist->Release(); + ret = E_FAIL; + } else { + SLOCK_ACQUIRE(); + eThread[reqtype] = hThread; + SLOCK_RELEASE(); + /* + * We call here _beginthread not _beginthreadex + * so as not to call CloseHandle() to clean up + * the thread. + */ + _beginthread(CloseAbortThread, 0, (void *)this); + } + mylog("AbortRequest ret=%d\n", ret); + requestAccepted = true; + ELOCK_RELEASE(); + Release(); +#undef return + return ret; +} +HRESULT STDMETHODCALLTYPE IAsyncES::TMDown(void) { + mylog("%p TMDown called\n", this); + return S_OK; +} + +bool IAsyncES::CloseThread(DWORD type) { + CSTR func = "CloseThread"; + HANDLE th; + DWORD ret, excode = S_OK; + bool rls_async = false; + + mylog("%s for %p thread=%d\n", func, this, eThread[type]); + if (th = eThread[type], NULL == th || eFin[type]) + return false; + ret = WaitForSingleObject(th, INFINITE); + if (WAIT_OBJECT_0 == ret) { + switch (type) { + case IAsyncES::AbortExec: + case IAsyncES::CommitExec: + rls_async = true; + break; + default: + GetExitCodeThread(th, &excode); + if (S_OK != excode) + rls_async = true; + } + SLOCK_ACQUIRE(); + eThread[type] = NULL; + eFin[type] = true; + SLOCK_RELEASE(); + CloseHandle(th); + } + mylog("%s ret=%d\n", func, ret); + return rls_async; +} + +EXTERN_C static void __cdecl ClosePrepareThread(LPVOID para) { + CSTR func = "ClosePrepareThread"; + IAsyncES *async = (IAsyncES *)para; + bool release; + + mylog("%s for %p", func, async); + if (release = async->CloseThread(IAsyncES::PrepareExec), release) + async->Release(); + mylog("%s release=%d\n", func, release); +} + +EXTERN_C static void __cdecl CloseCommitThread(LPVOID para) { + CSTR func = "CloseCommitThread"; + IAsyncES *async = (IAsyncES *)para; + bool release; + + mylog("%s for %p", func, async); + if (release = async->CloseThread(IAsyncES::CommitExec), release) + async->Release(); + mylog("%s release=%d\n", func, release); +} + +EXTERN_C static void __cdecl CloseAbortThread(LPVOID para) { + CSTR func = "CloseAbortThread"; + IAsyncES *async = (IAsyncES *)para; + bool release; + + mylog("%s for %p", func, async); + if (release = async->CloseThread(IAsyncES::AbortExec), release) + async->Release(); + mylog("%s release=%d\n", func, release); +} + +EXTERN_C static unsigned WINAPI DtcRequestExec(LPVOID para) { + RequestPara *reqp = (RequestPara *)para; + DWORD type = reqp->type; + IAsyncES *async = (IAsyncES *)reqp->lpr; + HRESULT res = reqp->res, ret; + + mylog("DtcRequestExec type=%d", reqp->type); + delete (reqp); + ret = async->RequestExec(type, res); + mylog(" Done ret=%d\n", ret); + return ret; +} + +CSTR regKey = "SOFTWARE\\Microsoft\\MSDTC\\XADLL"; + +static int regkeyCheck(const char *xalibname, const char *xalibpath) { + int retcode = 0; + LONG ret; + HKEY sKey; + DWORD rSize; + + ret = ::RegOpenKeyEx(HKEY_LOCAL_MACHINE, regKey, 0, + KEY_QUERY_VALUE | KEY_SET_VALUE | KEY_WOW64_64KEY, + &sKey); + switch (ret) { + case ERROR_SUCCESS: + break; + case ERROR_FILE_NOT_FOUND: + ret = ::RegCreateKeyEx(HKEY_LOCAL_MACHINE, regKey, 0, NULL, + REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, + NULL, &sKey, NULL); + mylog("%s:CreateKeyEx ret=%d\n", __FUNCTION__, ret); + break; + default: + mylog("%s:OpenKeyEx ret=%d\n", __FUNCTION__, ret); + } + if (ERROR_SUCCESS != ret) + return -1; + else { + char keyval[1024]; + + rSize = sizeof(keyval); + switch (ret = ::RegQueryValueEx(sKey, xalibname, NULL, NULL, + (LPBYTE)keyval, &rSize)) { + case ERROR_SUCCESS: + if (rSize > 0) { + if (0 == _stricmp(keyval, xalibpath)) + break; + mylog("%s:XADLL value %s is different from %s\n", + __FUNCTION__, keyval, xalibpath); + if (IsWow64()) { + mylog( + "%s:avoid RegSetValue operation from wow64 " + "process\n", + __FUNCTION__); + break; + } + } + case ERROR_FILE_NOT_FOUND: + mylog("%s:Setting value %s\n", __FUNCTION__, xalibpath); + ret = ::RegSetValueEx(sKey, xalibname, 0, REG_SZ, + (CONST BYTE *)xalibpath, + (DWORD)strlen(xalibpath) + 1); + if (ERROR_SUCCESS == ret) + retcode = 1; + else { + retcode = -1; + mylog("%s:SetValuEx ret=%d\n", __FUNCTION__, ret); + } + break; + default: + retcode = -1; + mylog("%s:QueryValuEx ret=%d\n", __FUNCTION__, ret); + break; + } + ::RegCloseKey(sKey); + } + return retcode; +} + +RETCODE static EnlistInDtc_1pipe(void *conn, ITransaction *pTra, + ITransactionDispenser *pDtc, int method) { + CSTR func = "EnlistInDtc_1pipe"; + static IDtcToXaHelperSinglePipe *pHelper = NULL; + ITransactionResourceAsync *pRes = NULL; + IAsyncES *asdum; + HRESULT res; + DWORD dwRMCookie; + XID xid; + const char *xalibname = GetXaLibName(); + const char *xalibpath = GetXaLibPath(); + + int recovLvl; + char errmsg[256]; + char reason[128]; + + if (!pHelper) { + res = pDtc->QueryInterface(IID_IDtcToXaHelperSinglePipe, + (void **)&pHelper); + if (res != S_OK || !pHelper) { + mylog("DtcToXaHelperSingelPipe get error %d\n", res); + pHelper = NULL; + return SQL_ERROR; + } + } + res = (NULL != (asdum = new IAsyncES)) ? S_OK : E_FAIL; + if (S_OK != res) { + mylog("CoCreateInstance error %d\n", res); + return SQL_ERROR; + } + + recovLvl = EsDtc_is_recovery_available(conn, reason, sizeof(reason)); + switch (method) { + case DTC_CHECK_BEFORE_LINK: + if (0 == recovLvl) { + snprintf(errmsg, sizeof(errmsg), + "%s is unavailable in distributed transactions", + reason); + EsDtc_set_error(conn, errmsg, func); + return SQL_ERROR; + } + } + /*mylog("dllname=%s dsn=%s\n", xalibname, conn->connInfo.dsn); res = 0;*/ + char dtcname[1024]; + EsDtc_create_connect_string(conn, dtcname, sizeof(dtcname)); + + bool confirmedRegkey = false, confirmingLink = false, xarmerr = false; + char error_header[64]; + while (true) { + res = pHelper->XARMCreate(dtcname, (char *)xalibname, &dwRMCookie); + + mylog("XARMcreate error code=%x (%d %d)\n", res, confirmedRegkey, + confirmingLink); + xarmerr = true; + if (!confirmingLink) + snprintf(error_header, sizeof(error_header), + "XARMcreate error code=%x", res); + switch (res) { + case S_OK: + if (confirmingLink) { + switch (recovLvl) { + case 0: + snprintf(errmsg, sizeof(errmsg), + "%s:%s is currently unavailable in " + "distributed transactions", + error_header, reason); + break; + case -1: + snprintf( + errmsg, sizeof(errmsg), + "%s:Possibly you connect to the database whose " + "authentication method is %s or ident", + error_header, reason); + break; + case 1: + snprintf( + errmsg, sizeof(errmsg), + "%s:Are you trying to connect to the database " + "whose authentication method is ident?", + error_header); + break; + } + } else + xarmerr = false; + break; + case XACT_E_XA_TX_DISABLED: + snprintf(errmsg, sizeof(errmsg), + "%s:Please enable XA transaction in MSDTC security " + "configuration", + error_header); + break; + case XACT_E_TMNOTAVAILABLE: + snprintf(errmsg, sizeof(errmsg), + "%s:Please start Distributed Transaction Coordinator " + "service", + error_header); + break; + case E_FAIL: + if (!confirmedRegkey) { + int retcode = regkeyCheck(xalibname, xalibpath); + confirmedRegkey = true; + if (retcode > 0) + continue; + } + switch (method) { + case DTC_CHECK_RM_CONNECTION: + if (!confirmingLink) { + confirmingLink = true; + strcat(dtcname, ";" KEYWORD_DTC_CHECK "=0"); + continue; + } + default: + snprintf(errmsg, sizeof(errmsg), + "%s:Failed to link with DTC service. Please " + "look at the log of Event Viewer etc.", + error_header); + } + break; + case XACT_E_CONNECTION_DOWN: + snprintf(errmsg, sizeof(errmsg), + "%s:Lost connection with DTC transaction " + "manager\nMSDTC has some trouble?", + error_header); + break; + default: + snprintf(errmsg, sizeof(errmsg), "%s\n", error_header); + break; + } + break; + } + if (xarmerr) { + EsDtc_set_error(conn, errmsg, func); + return SQL_ERROR; + } + + res = pHelper->ConvertTridToXID((DWORD *)pTra, dwRMCookie, &xid); + if (res != S_OK) { + mylog("ConvertTridToXid error %d\n", res); + return SQL_ERROR; + } + { + char esxid[258]; + XidToText(xid, esxid); + mylog("ConvertTridToXID -> %s\n", esxid); + } + asdum->SetXid(&xid); + /* Create an IAsyncES instance by myself */ + /* DLLGetClassObject(GUID_IAsyncES, IID_ITransactionResourceAsync, (void **) + * &asdum); */ + + asdum->SetHelper(pHelper, dwRMCookie); + res = pHelper->EnlistWithRM(dwRMCookie, pTra, asdum, &asdum->enlist); + if (res != S_OK) { + mylog("EnlistWithRM error %d\n", res); + pHelper->ReleaseRMCookie(dwRMCookie, TRUE); + return SQL_ERROR; + } + + mylog("asdum=%p start transaction\n", asdum); + asdum->SetConnection(conn); + LIFELOCK_ACQUIRE; + EsDtc_set_async(conn, asdum); + LIFELOCK_RELEASE; + + return SQL_SUCCESS; +} + +EXTERN_C RETCODE IsolateDtcConn(void *conn, BOOL continueConnection) { + IAsyncES *async; + + LIFELOCK_ACQUIRE; + if (async = (IAsyncES *)EsDtc_get_async(conn), NULL != async) { + if (EsDtc_get_property(conn, idleInGlobalTransaction)) { + async->AddRef(); + LIFELOCK_RELEASE; + async->separateXAConn(false, continueConnection ? true : false); + async->Release(); + } else + LIFELOCK_RELEASE; + } else + LIFELOCK_RELEASE; + return SQL_SUCCESS; +} + +static ITransactionDispenser *getITransactionDispenser(DWORD grfOptions, + HRESULT *hres) { + static ITransactionDispenser *pDtc = NULL; + HRESULT res = S_OK; + + if (!pDtc) { + res = DtcGetTransactionManagerEx(NULL, NULL, IID_ITransactionDispenser, + + grfOptions, NULL, (void **)&pDtc); + if (FAILED(res)) { + mylog("DtcGetTransactionManager error %x\n", res); + pDtc = NULL; + } + } + if (hres) + *hres = res; + + return pDtc; +} + +EXTERN_C void *GetTransactionObject(HRESULT *hres) { + ITransaction *pTra = NULL; + ITransactionDispenser *pDtc = NULL; + + if (pDtc = getITransactionDispenser(OLE_TM_FLAG_NONE, hres), NULL == pDtc) + return pTra; + HRESULT res = pDtc->BeginTransaction(NULL, ISOLATIONLEVEL_READCOMMITTED, 0, + NULL, &pTra); + switch (res) { + case S_OK: + break; + default: + pTra = NULL; + } + if (hres) + *hres = res; + return pTra; +} + +EXTERN_C void ReleaseTransactionObject(void *pObj) { + ITransaction *pTra = (ITransaction *)pObj; + + if (!pTra) + return; + pTra->Release(); +} + +EXTERN_C RETCODE EnlistInDtc(void *conn, void *pTra, int method) { + ITransactionDispenser *pDtc = NULL; + RETCODE ret; + + if (!pTra) { + IAsyncES *asdum = (IAsyncES *)EsDtc_get_async(conn); + EsDtc_set_property(conn, enlisted, (void *)0); + return SQL_SUCCESS; + } + if (CONN_IS_IN_TRANS(conn)) { + EsDtc_one_phase_operation(conn, SHUTDOWN_LOCAL_TRANSACTION); + } + HRESULT hres; + pDtc = getITransactionDispenser(OLE_TM_FLAG_NODEMANDSTART, &hres); + if (!pDtc) { + char errmsg[128]; + snprintf(errmsg, sizeof(errmsg), + "enlistment error:DtcGetTransactionManager error code=%x", + hres); + EsDtc_set_error(conn, errmsg, __FUNCTION__); + return SQL_ERROR; + } + ret = EnlistInDtc_1pipe(conn, (ITransaction *)pTra, pDtc, method); + if (SQL_SUCCEEDED(ret)) + EsDtc_set_property(conn, enlisted, (void *)1); + return ret; +} + +EXTERN_C RETCODE DtcOnDisconnect(void *conn) { + mylog("DtcOnDisconnect\n"); + LIFELOCK_ACQUIRE; + IAsyncES *asdum = (IAsyncES *)EsDtc_get_async(conn); + if (asdum) { + asdum->AddRef(); + LIFELOCK_RELEASE; + asdum->ReleaseConnection(); + asdum->Release(); + } else + LIFELOCK_RELEASE; + return SQL_SUCCESS; +} + +#endif /* _HANDLE_ENLIST_IN_DTC_ */ diff --git a/sql-odbc/src/odfesqlodbc/CMakeLists.txt b/sql-odbc/src/odfesqlodbc/CMakeLists.txt new file mode 100644 index 0000000000..2910d4cf32 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/CMakeLists.txt @@ -0,0 +1,77 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +project(odfesqlodbc) + +# Source files for odfesqlodbc +set( SOURCE_FILES + bind.c columninfo.c connection.c convert.c + descriptor.c dlg_specific.c drvconn.c options.c + environ.c execute.c info.c loadlib.c + misc.c multibyte.c mylog.c tuple.c + parse.c results.c statement.c odbcapi30.c + qresult.c odbcapi30w.c es_api30.c es_types.c + es_utility.cpp es_communication.cpp es_connection.cpp es_odbc.c + es_driver_connect.cpp es_helper.cpp es_info.cpp es_parse_result.cpp + es_semaphore.cpp es_statement.cpp win_unicode.c odbcapi.c + odbcapiw.c es_result_queue.cpp + ) +if(WIN32) +set(SOURCE_FILES ${SOURCE_FILES} dlg_wingui.c setup.c) +endif() + +set( HEADER_FILES + bind.h catfunc.h columninfo.h + convert.h descriptor.h dlg_specific.h drvconn.h + environ.h es_apifunc.h es_communication.h es_parse_result.h + es_connection.h es_driver_connect.h es_helper.h es_info.h + es_statement.h es_types.h loadlib.h + misc.h multibyte.h mylog.h es_utility.h + resource.h statement.h tuple.h unicode_support.h + es_apifunc.h es_odbc.h es_semaphore.h qresult.h + version.h win_setup.h es_result_queue.h + ) + +# Generate dll (SHARED) +if(WIN32) +set(RESOURCE_FILES es_odbc.rc) +add_library(odfesqlodbc SHARED ${SOURCE_FILES} ${HEADER_FILES} ${RESOURCE_FILES} ${AWSSDK_LIB_DIR}) +else() +add_library(odfesqlodbc SHARED ${SOURCE_FILES} ${HEADER_FILES}) +endif() + +include_directories( + ${LIBRARY_DIRECTORY}/../src + ${CMAKE_CURRENT_SOURCE_DIR} + ${ODFEENLIST_SRC} + ${RABBIT_SRC} + ${RAPIDJSON_SRC} + ${AWSSDK_INCLUDE_DIR} + ) + +# Platform specific library dependencies +if(WIN32) + # Windows specifiec + target_link_libraries(odfesqlodbc wsock32 ws2_32 winmm user32 gdi32 legacy_stdio_definitions aws-cpp-sdk-core kernel32 advapi32 secur32 XOleHlp Wldap32 crypt32 Normaliz odbccp32 odbc32) + target_link_libraries(odfesqlodbc debug msvcrtd) + target_link_libraries(odfesqlodbc optimized msvcrt) +elseif(APPLE) + # Apple specific + target_link_libraries(odfesqlodbc iodbc iodbcinst aws-cpp-sdk-core) +elseif(UNIX) + # Unix specific + include_directories(/usr/src/linux-headers-5.0.0-27/include) + target_link_libraries(odfesqlodbc aws-cpp-sdk-core odbc odbcinst) +endif() diff --git a/sql-odbc/src/odfesqlodbc/bind.c b/sql-odbc/src/odfesqlodbc/bind.c new file mode 100644 index 0000000000..a4795203c1 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/bind.c @@ -0,0 +1,703 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "bind.h" + +#include +#include + +#include "descriptor.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_types.h" +#include "misc.h" +#include "multibyte.h" +#include "qresult.h" +#include "statement.h" + +/* Associate a user-supplied buffer with a database column. */ +RETCODE SQL_API ESAPI_BindCol(HSTMT hstmt, SQLUSMALLINT icol, + SQLSMALLINT fCType, PTR rgbValue, + SQLLEN cbValueMax, SQLLEN *pcbValue) { + StatementClass *stmt = (StatementClass *)hstmt; + CSTR func = "ESAPI_BindCol"; + ARDFields *opts; + GetDataInfo *gdata_info; + BindInfoClass *bookmark; + RETCODE ret = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering...\n"); + + MYLOG(ES_DEBUG, "**** : stmt = %p, icol = %d\n", stmt, icol); + MYLOG(ES_DEBUG, "**** : fCType=%d rgb=%p valusMax=" FORMAT_LEN " pcb=%p\n", + fCType, rgbValue, cbValueMax, pcbValue); + + if (!stmt) { + SC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + opts = SC_get_ARDF(stmt); + if (stmt->status == STMT_EXECUTING) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Can't bind columns while statement is still executing.", + func); + return SQL_ERROR; + } + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + SC_clear_error(stmt); + /* If the bookmark column is being bound, then just save it */ + if (icol == 0) { + bookmark = opts->bookmark; + if (rgbValue == NULL) { + if (bookmark) { + bookmark->buffer = NULL; + bookmark->used = bookmark->indicator = NULL; + } + } else { + /* Make sure it is the bookmark data type */ + switch (fCType) { + case SQL_C_BOOKMARK: + case SQL_C_VARBOOKMARK: + break; + default: + SC_set_error(stmt, STMT_PROGRAM_TYPE_OUT_OF_RANGE, + "Bind column 0 is not of type SQL_C_BOOKMARK", + func); + MYLOG( + ES_ERROR, + "Bind column 0 is type %d not of type SQL_C_BOOKMARK\n", + fCType); + ret = SQL_ERROR; + goto cleanup; + } + + bookmark = ARD_AllocBookmark(opts); + bookmark->buffer = rgbValue; + bookmark->used = bookmark->indicator = pcbValue; + bookmark->buflen = cbValueMax; + bookmark->returntype = fCType; + } + goto cleanup; + } + + /* + * Allocate enough bindings if not already done. Most likely, + * execution of a statement would have setup the necessary bindings. + * But some apps call BindCol before any statement is executed. + */ + if (icol > opts->allocated) + extend_column_bindings(opts, icol); + gdata_info = SC_get_GDTI(stmt); + if (icol > gdata_info->allocated) + extend_getdata_info(gdata_info, icol, FALSE); + + /* check to see if the bindings were allocated */ + if (!opts->bindings || !gdata_info->gdata) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Could not allocate memory for bindings.", func); + ret = SQL_ERROR; + goto cleanup; + } + + /* use zero based col numbers from here out */ + icol--; + + /* Reset for SQLGetData */ + GETDATA_RESET(gdata_info->gdata[icol]); + + if (rgbValue == NULL) { + /* we have to unbind the column */ + opts->bindings[icol].buflen = 0; + opts->bindings[icol].buffer = NULL; + opts->bindings[icol].used = opts->bindings[icol].indicator = NULL; + opts->bindings[icol].returntype = SQL_C_CHAR; + opts->bindings[icol].precision = 0; + opts->bindings[icol].scale = 0; + if (gdata_info->gdata[icol].ttlbuf) + free(gdata_info->gdata[icol].ttlbuf); + gdata_info->gdata[icol].ttlbuf = NULL; + gdata_info->gdata[icol].ttlbuflen = 0; + gdata_info->gdata[icol].ttlbufused = 0; + } else { + /* ok, bind that column */ + opts->bindings[icol].buflen = cbValueMax; + opts->bindings[icol].buffer = rgbValue; + opts->bindings[icol].used = opts->bindings[icol].indicator = pcbValue; + opts->bindings[icol].returntype = fCType; + opts->bindings[icol].precision = 0; + switch (fCType) { + case SQL_C_NUMERIC: + opts->bindings[icol].precision = 32; + break; + case SQL_C_TIMESTAMP: + case SQL_C_INTERVAL_DAY_TO_SECOND: + case SQL_C_INTERVAL_HOUR_TO_SECOND: + case SQL_C_INTERVAL_MINUTE_TO_SECOND: + case SQL_C_INTERVAL_SECOND: + opts->bindings[icol].precision = 6; + break; + } + opts->bindings[icol].scale = 0; + + MYLOG(ES_DEBUG, " bound buffer[%d] = %p\n", icol, + opts->bindings[icol].buffer); + } + +cleanup: +#undef return + return ret; +} + +RETCODE SQL_API ESAPI_NumParams(HSTMT hstmt, SQLSMALLINT *pcpar) { + StatementClass *stmt = (StatementClass *)hstmt; + if (pcpar != NULL) { + *pcpar = 0; + } else { + SC_set_error(stmt, STMT_EXEC_ERROR, "Parameter count address is null", + "ESAPI_NumParams"); + return SQL_ERROR; + } + return SQL_SUCCESS; +} + +/* + * Bindings Implementation + */ +static BindInfoClass *create_empty_bindings(int num_columns) { + BindInfoClass *new_bindings; + int i; + + new_bindings = (BindInfoClass *)malloc(num_columns * sizeof(BindInfoClass)); + if (!new_bindings) + return NULL; + + for (i = 0; i < num_columns; i++) { + new_bindings[i].buflen = 0; + new_bindings[i].buffer = NULL; + new_bindings[i].used = new_bindings[i].indicator = NULL; + } + + return new_bindings; +} + +void extend_parameter_bindings(APDFields *self, SQLSMALLINT num_params) { + ParameterInfoClass *new_bindings; + + MYLOG(ES_TRACE, + "entering ... self=%p, parameters_allocated=%d, num_params=%d,%p\n", + self, self->allocated, num_params, self->parameters); + + /* + * if we have too few, allocate room for more, and copy the old + * entries into the new structure + */ + if (self->allocated < num_params) { + new_bindings = (ParameterInfoClass *)realloc( + self->parameters, sizeof(ParameterInfoClass) * num_params); + if (!new_bindings) { + MYLOG(ES_DEBUG, + "unable to create %d new bindings from %d old bindings\n", + num_params, self->allocated); + + if (self->parameters) + free(self->parameters); + self->parameters = NULL; + self->allocated = 0; + return; + } + memset(&new_bindings[self->allocated], 0, + sizeof(ParameterInfoClass) * (num_params - self->allocated)); + + self->parameters = new_bindings; + self->allocated = num_params; + } + + MYLOG(ES_TRACE, "leaving %p\n", self->parameters); +} + +void extend_iparameter_bindings(IPDFields *self, SQLSMALLINT num_params) { + ParameterImplClass *new_bindings; + + MYLOG(ES_TRACE, + "entering ... self=%p, parameters_allocated=%d, num_params=%d\n", + self, self->allocated, num_params); + + /* + * if we have too few, allocate room for more, and copy the old + * entries into the new structure + */ + if (self->allocated < num_params) { + new_bindings = (ParameterImplClass *)realloc( + self->parameters, sizeof(ParameterImplClass) * num_params); + if (!new_bindings) { + MYLOG(ES_DEBUG, + "unable to create %d new bindings from %d old bindings\n", + num_params, self->allocated); + + if (self->parameters) + free(self->parameters); + self->parameters = NULL; + self->allocated = 0; + return; + } + memset(&new_bindings[self->allocated], 0, + sizeof(ParameterImplClass) * (num_params - self->allocated)); + + self->parameters = new_bindings; + self->allocated = num_params; + } + + MYLOG(ES_TRACE, "leaving %p\n", self->parameters); +} + +void reset_a_parameter_binding(APDFields *self, int ipar) { + MYLOG(ES_TRACE, "entering ... self=%p, parameters_allocated=%d, ipar=%d\n", + self, self->allocated, ipar); + + if (ipar < 1 || ipar > self->allocated) + return; + + ipar--; + self->parameters[ipar].buflen = 0; + self->parameters[ipar].buffer = NULL; + self->parameters[ipar].used = self->parameters[ipar].indicator = NULL; + self->parameters[ipar].CType = 0; + self->parameters[ipar].data_at_exec = FALSE; + self->parameters[ipar].precision = 0; + self->parameters[ipar].scale = 0; +} + +void reset_a_iparameter_binding(IPDFields *self, int ipar) { + MYLOG(ES_TRACE, "entering ... self=%p, parameters_allocated=%d, ipar=%d\n", + self, self->allocated, ipar); + + if (ipar < 1 || ipar > self->allocated) + return; + + ipar--; + NULL_THE_NAME(self->parameters[ipar].paramName); + self->parameters[ipar].paramType = 0; + self->parameters[ipar].SQLType = 0; + self->parameters[ipar].column_size = 0; + self->parameters[ipar].decimal_digits = 0; + self->parameters[ipar].precision = 0; + self->parameters[ipar].scale = 0; + PIC_set_estype(self->parameters[ipar], 0); +} + +int CountParameters(const StatementClass *self, Int2 *inputCount, Int2 *ioCount, + Int2 *outputCount) { + IPDFields *ipdopts = SC_get_IPDF(self); + int i, num_params, valid_count; + + if (inputCount) + *inputCount = 0; + if (ioCount) + *ioCount = 0; + if (outputCount) + *outputCount = 0; + if (!ipdopts) + return -1; + num_params = self->num_params; + if (ipdopts->allocated < num_params) + num_params = ipdopts->allocated; + for (i = 0, valid_count = 0; i < num_params; i++) { + if (SQL_PARAM_OUTPUT == ipdopts->parameters[i].paramType) { + if (outputCount) { + (*outputCount)++; + valid_count++; + } + } else if (SQL_PARAM_INPUT_OUTPUT == ipdopts->parameters[i].paramType) { + if (ioCount) { + (*ioCount)++; + valid_count++; + } + } else if (inputCount) { + (*inputCount)++; + valid_count++; + } + } + return valid_count; +} + +/* + * Free parameters and free the memory. + */ +void APD_free_params(APDFields *apdopts, char option) { + MYLOG(ES_TRACE, "entering self=%p\n", apdopts); + + if (!apdopts->parameters) + return; + + if (option == STMT_FREE_PARAMS_ALL) { + free(apdopts->parameters); + apdopts->parameters = NULL; + apdopts->allocated = 0; + } + + MYLOG(ES_TRACE, "leaving\n"); +} + +void PDATA_free_params(PutDataInfo *pdata, char option) { + int i; + + MYLOG(ES_TRACE, "entering self=%p\n", pdata); + + if (!pdata->pdata) + return; + + for (i = 0; i < pdata->allocated; i++) { + if (pdata->pdata[i].EXEC_used) { + free(pdata->pdata[i].EXEC_used); + pdata->pdata[i].EXEC_used = NULL; + } + if (pdata->pdata[i].EXEC_buffer) { + free(pdata->pdata[i].EXEC_buffer); + pdata->pdata[i].EXEC_buffer = NULL; + } + } + + if (option == STMT_FREE_PARAMS_ALL) { + free(pdata->pdata); + pdata->pdata = NULL; + pdata->allocated = 0; + } + + MYLOG(ES_TRACE, "leaving\n"); +} + +/* + * Free parameters and free the memory. + */ +void IPD_free_params(IPDFields *ipdopts, char option) { + MYLOG(ES_TRACE, "entering self=%p\n", ipdopts); + + if (!ipdopts->parameters) + return; + if (option == STMT_FREE_PARAMS_ALL) { + free(ipdopts->parameters); + ipdopts->parameters = NULL; + ipdopts->allocated = 0; + } + + MYLOG(ES_TRACE, "leaving\n"); +} + +void extend_column_bindings(ARDFields *self, SQLSMALLINT num_columns) { + BindInfoClass *new_bindings; + SQLSMALLINT i; + + MYLOG(ES_TRACE, + "entering ... self=%p, bindings_allocated=%d, num_columns=%d\n", self, + self->allocated, num_columns); + + /* + * if we have too few, allocate room for more, and copy the old + * entries into the new structure + */ + if (self->allocated < num_columns) { + new_bindings = create_empty_bindings(num_columns); + if (!new_bindings) { + MYLOG(ES_DEBUG, + "unable to create %d new bindings from %d old bindings\n", + num_columns, self->allocated); + + if (self->bindings) { + free(self->bindings); + self->bindings = NULL; + } + self->allocated = 0; + return; + } + + if (self->bindings) { + for (i = 0; i < self->allocated; i++) + new_bindings[i] = self->bindings[i]; + + free(self->bindings); + } + + self->bindings = new_bindings; + self->allocated = num_columns; + } + + /* + * There is no reason to zero out extra bindings if there are more + * than needed. If an app has allocated extra bindings, let it worry + * about it by unbinding those columns. + */ + + /* SQLBindCol(1..) ... SQLBindCol(10...) # got 10 bindings */ + /* SQLExecDirect(...) # returns 5 cols */ + /* SQLExecDirect(...) # returns 10 cols (now OK) */ + + MYLOG(ES_TRACE, "leaving %p\n", self->bindings); +} + +void reset_a_column_binding(ARDFields *self, int icol) { + BindInfoClass *bookmark; + + MYLOG(ES_TRACE, "entering ... self=%p, bindings_allocated=%d, icol=%d\n", + self, self->allocated, icol); + + if (icol > self->allocated) + return; + + /* use zero based col numbers from here out */ + if (0 == icol) { + if (bookmark = self->bookmark, bookmark != NULL) { + bookmark->buffer = NULL; + bookmark->used = bookmark->indicator = NULL; + } + } else { + icol--; + + /* we have to unbind the column */ + self->bindings[icol].buflen = 0; + self->bindings[icol].buffer = NULL; + self->bindings[icol].used = self->bindings[icol].indicator = NULL; + self->bindings[icol].returntype = SQL_C_CHAR; + } +} + +void ARD_unbind_cols(ARDFields *self, BOOL freeall) { + Int2 lf; + + MYLOG(ES_ALL, "freeall=%d allocated=%d bindings=%p\n", freeall, + self->allocated, self->bindings); + for (lf = 1; lf <= self->allocated; lf++) + reset_a_column_binding(self, lf); + if (freeall) { + if (self->bindings) + free(self->bindings); + self->bindings = NULL; + self->allocated = 0; + } +} +void GDATA_unbind_cols(GetDataInfo *self, BOOL freeall) { + Int2 lf; + + MYLOG(ES_ALL, "freeall=%d allocated=%d gdata=%p\n", freeall, + self->allocated, self->gdata); + if (self->fdata.ttlbuf) { + free(self->fdata.ttlbuf); + self->fdata.ttlbuf = NULL; + } + self->fdata.ttlbuflen = self->fdata.ttlbufused = 0; + GETDATA_RESET(self->fdata); + for (lf = 1; lf <= self->allocated; lf++) + reset_a_getdata_info(self, lf); + if (freeall) { + if (self->gdata) + free(self->gdata); + self->gdata = NULL; + self->allocated = 0; + } +} + +void GetDataInfoInitialize(GetDataInfo *gdata_info) { + GETDATA_RESET(gdata_info->fdata); + gdata_info->fdata.ttlbuf = NULL; + gdata_info->fdata.ttlbuflen = gdata_info->fdata.ttlbufused = 0; + gdata_info->allocated = 0; + gdata_info->gdata = NULL; +} +static GetDataClass *create_empty_gdata(int num_columns) { + GetDataClass *new_gdata; + int i; + + new_gdata = (GetDataClass *)malloc(num_columns * sizeof(GetDataClass)); + if (!new_gdata) + return NULL; + for (i = 0; i < num_columns; i++) { + GETDATA_RESET(new_gdata[i]); + new_gdata[i].ttlbuf = NULL; + new_gdata[i].ttlbuflen = 0; + new_gdata[i].ttlbufused = 0; + } + + return new_gdata; +} +void extend_getdata_info(GetDataInfo *self, SQLSMALLINT num_columns, + BOOL shrink) { + GetDataClass *new_gdata; + + MYLOG(ES_TRACE, + "entering ... self=%p, gdata_allocated=%d, num_columns=%d\n", self, + self->allocated, num_columns); + + /* + * if we have too few, allocate room for more, and copy the old + * entries into the new structure + */ + if (self->allocated < num_columns) { + new_gdata = create_empty_gdata(num_columns); + if (!new_gdata) { + MYLOG(ES_DEBUG, "unable to create %d new gdata from %d old gdata\n", + num_columns, self->allocated); + + if (self->gdata) { + free(self->gdata); + self->gdata = NULL; + } + self->allocated = 0; + return; + } + if (self->gdata) { + SQLSMALLINT i; + + for (i = 0; i < self->allocated; i++) + new_gdata[i] = self->gdata[i]; + free(self->gdata); + } + self->gdata = new_gdata; + self->allocated = num_columns; + } else if (shrink && self->allocated > num_columns) { + int i; + + for (i = self->allocated; i > num_columns; i--) + reset_a_getdata_info(self, i); + self->allocated = num_columns; + if (0 == num_columns) { + free(self->gdata); + self->gdata = NULL; + } + } + + /* + * There is no reason to zero out extra gdata if there are more + * than needed. If an app has allocated extra gdata, let it worry + * about it by unbinding those columns. + */ + + MYLOG(ES_TRACE, "leaving %p\n", self->gdata); +} +void reset_a_getdata_info(GetDataInfo *gdata_info, int icol) { + if (icol < 1 || icol > gdata_info->allocated) + return; + icol--; + if (gdata_info->gdata[icol].ttlbuf) { + free(gdata_info->gdata[icol].ttlbuf); + gdata_info->gdata[icol].ttlbuf = NULL; + } + gdata_info->gdata[icol].ttlbuflen = gdata_info->gdata[icol].ttlbufused = 0; + GETDATA_RESET(gdata_info->gdata[icol]); +} + +void PutDataInfoInitialize(PutDataInfo *pdata_info) { + pdata_info->allocated = 0; + pdata_info->pdata = NULL; +} +void extend_putdata_info(PutDataInfo *self, SQLSMALLINT num_params, + BOOL shrink) { + PutDataClass *new_pdata; + + MYLOG(ES_TRACE, + "entering ... self=%p, parameters_allocated=%d, num_params=%d\n", + self, self->allocated, num_params); + + /* + * if we have too few, allocate room for more, and copy the old + * entries into the new structure + */ + if (self->allocated < num_params) { + if (self->allocated <= 0 && self->pdata) { + MYLOG(ES_DEBUG, "??? pdata is not null while allocated == 0\n"); + self->pdata = NULL; + } + new_pdata = (PutDataClass *)realloc(self->pdata, + sizeof(PutDataClass) * num_params); + if (!new_pdata) { + MYLOG(ES_DEBUG, "unable to create %d new pdata from %d old pdata\n", + num_params, self->allocated); + + self->pdata = NULL; + self->allocated = 0; + return; + } + memset(&new_pdata[self->allocated], 0, + sizeof(PutDataClass) * (num_params - self->allocated)); + + self->pdata = new_pdata; + self->allocated = num_params; + } else if (shrink && self->allocated > num_params) { + int i; + + for (i = self->allocated; i > num_params; i--) + reset_a_putdata_info(self, i); + self->allocated = num_params; + if (0 == num_params) { + free(self->pdata); + self->pdata = NULL; + } + } + + MYLOG(ES_TRACE, "leaving %p\n", self->pdata); +} +void reset_a_putdata_info(PutDataInfo *pdata_info, int ipar) { + if (ipar < 1 || ipar > pdata_info->allocated) + return; + ipar--; + if (pdata_info->pdata[ipar].EXEC_used) { + free(pdata_info->pdata[ipar].EXEC_used); + pdata_info->pdata[ipar].EXEC_used = NULL; + } + if (pdata_info->pdata[ipar].EXEC_buffer) { + free(pdata_info->pdata[ipar].EXEC_buffer); + pdata_info->pdata[ipar].EXEC_buffer = NULL; + } + pdata_info->pdata[ipar].lobj_oid = 0; +} + +void SC_param_next(const StatementClass *stmt, int *param_number, + ParameterInfoClass **apara, ParameterImplClass **ipara) { + int next; + IPDFields *ipdopts = SC_get_IPDF(stmt); + + if (*param_number < 0) + next = stmt->proc_return; + else + next = *param_number + 1; + if (stmt->discard_output_params) { + for (; next < ipdopts->allocated + && SQL_PARAM_OUTPUT == ipdopts->parameters[next].paramType; + next++) + ; + } + *param_number = next; + if (ipara) { + if (next < ipdopts->allocated) + *ipara = ipdopts->parameters + next; + else + *ipara = NULL; + } + if (apara) { + APDFields *apdopts = SC_get_APDF(stmt); + if (next < apdopts->allocated) + *apara = apdopts->parameters + next; + else + *apara = NULL; + } +} diff --git a/sql-odbc/src/odfesqlodbc/bind.h b/sql-odbc/src/odfesqlodbc/bind.h new file mode 100644 index 0000000000..7ddb1d76f4 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/bind.h @@ -0,0 +1,148 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __BIND_H__ +#define __BIND_H__ + +#include "descriptor.h" +#include "es_odbc.h" + +// C Interface +#ifdef __cplusplus +extern "C" { +#endif +/* + * BindInfoClass -- stores information about a bound column + */ +struct BindInfoClass_ { + SQLLEN buflen; /* size of buffer */ + char *buffer; /* pointer to the buffer */ + SQLLEN *used; /* used space in the buffer (for strings + * not counting the '\0') */ + SQLLEN *indicator; /* indicator == used in many cases ? */ + SQLSMALLINT returntype; /* kind of conversion to be applied when + * returning (SQL_C_DEFAULT, + * SQL_C_CHAR... etc) */ + SQLSMALLINT precision; /* the precision for numeric or timestamp type */ + SQLSMALLINT scale; /* the scale for numeric type */ + /* area for work variables */ + char dummy_data; /* currently not used */ +}; + +/* struct for SQLGetData */ +typedef struct { + /* for BLOBs which don't hold the data */ + struct GetBlobDataClass { + Int8 data_left64; /* amount of large object data + left to read before conversion */ + } blob; + /* for non-BLOBs which hold the data in ttlbuf after conversion */ + char *ttlbuf; /* to save the large result */ + SQLLEN ttlbuflen; /* the buffer length */ + SQLLEN ttlbufused; /* used length of the buffer */ + SQLLEN data_left; /* amount of data left to read */ +} GetDataClass; +#define GETDATA_RESET(gdc) ((gdc).blob.data_left64 = (gdc).data_left = -1) + +/* + * ParameterInfoClass -- stores information about a bound parameter + */ +struct ParameterInfoClass_ { + SQLLEN buflen; + char *buffer; + SQLLEN *used; + SQLLEN *indicator; /* indicator == used in many cases ? */ + SQLSMALLINT CType; + SQLSMALLINT precision; /* the precision for numeric or timestamp type */ + SQLSMALLINT scale; /* the scale for numeric type */ + /* area for work variables */ + char data_at_exec; +}; + +typedef struct { + SQLLEN *EXEC_used; /* amount of data */ + char *EXEC_buffer; /* the data */ + OID lobj_oid; +} PutDataClass; + +/* + * ParameterImplClass -- stores implementation information about a parameter + */ +struct ParameterImplClass_ { + esNAME paramName; /* this is unavailable even in 8.1 */ + SQLSMALLINT paramType; + SQLSMALLINT SQLType; + OID ESType; + SQLULEN column_size; + SQLSMALLINT decimal_digits; + SQLSMALLINT precision; /* the precision for numeric or timestamp type */ + SQLSMALLINT scale; /* the scale for numeric type */ +}; + +typedef struct { + GetDataClass fdata; + SQLSMALLINT allocated; + GetDataClass *gdata; +} GetDataInfo; +typedef struct { + SQLSMALLINT allocated; + PutDataClass *pdata; +} PutDataInfo; + +#define PARSE_PARAM_CAST FALSE +#define EXEC_PARAM_CAST TRUE +#define SIMPLE_PARAM_CAST TRUE + +#define CALC_BOOKMARK_ADDR(book, offset, bind_size, index) \ + (book->buffer + offset \ + + (bind_size > 0 \ + ? bind_size \ + : (SQL_C_VARBOOKMARK == book->returntype ? book->buflen \ + : sizeof(UInt4))) \ + * index) + +/* Macros to handle estype of parameters */ +#define PIC_get_estype(pari) ((pari).ESType) +#define PIC_set_estype(pari, type) ((pari).ESType = (type)) +#define PIC_dsp_estype(conn, pari) \ + ((pari).ESType ? (pari).ESType : sqltype_to_estype(conn, (pari).SQLType)) + +void extend_column_bindings(ARDFields *opts, SQLSMALLINT num_columns); +void reset_a_column_binding(ARDFields *opts, int icol); +void extend_parameter_bindings(APDFields *opts, SQLSMALLINT num_params); +void extend_iparameter_bindings(IPDFields *opts, SQLSMALLINT num_params); +void reset_a_parameter_binding(APDFields *opts, int ipar); +void reset_a_iparameter_binding(IPDFields *opts, int ipar); +int CountParameters(const StatementClass *stmt, Int2 *inCount, Int2 *ioCount, + Int2 *outputCount); +void GetDataInfoInitialize(GetDataInfo *gdata); +void extend_getdata_info(GetDataInfo *gdata, SQLSMALLINT num_columns, + BOOL shrink); +void reset_a_getdata_info(GetDataInfo *gdata, int icol); +void GDATA_unbind_cols(GetDataInfo *gdata, BOOL freeall); +void PutDataInfoInitialize(PutDataInfo *pdata); +void extend_putdata_info(PutDataInfo *pdata, SQLSMALLINT num_params, + BOOL shrink); +void reset_a_putdata_info(PutDataInfo *pdata, int ipar); +void PDATA_free_params(PutDataInfo *pdata, char option); +void SC_param_next(const StatementClass *, int *param_number, + ParameterInfoClass **, ParameterImplClass **); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sql-odbc/src/odfesqlodbc/catfunc.h b/sql-odbc/src/odfesqlodbc/catfunc.h new file mode 100644 index 0000000000..647e68f4d8 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/catfunc.h @@ -0,0 +1,236 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __CATFUNC_H__ +#define __CATFUNC_H__ + +#include "es_odbc.h" + +/* SQLTables field position */ +enum { + TABLES_CATALOG_NAME = 0, + TABLES_SCHEMA_NAME, + TABLES_TABLE_NAME, + TABLES_TABLE_TYPE, + TABLES_REMARKS, + NUM_OF_TABLES_FIELDS +}; + +/* SQLColumns field position */ +enum { + COLUMNS_CATALOG_NAME = 0, + COLUMNS_SCHEMA_NAME, + COLUMNS_TABLE_NAME, + COLUMNS_COLUMN_NAME, + COLUMNS_DATA_TYPE, + COLUMNS_TYPE_NAME, + COLUMNS_PRECISION, + COLUMNS_LENGTH, + COLUMNS_SCALE, + COLUMNS_RADIX, + COLUMNS_NULLABLE, + COLUMNS_REMARKS, + COLUMNS_COLUMN_DEF /* ODBC 3.0 but always use it */ + , + COLUMNS_SQL_DATA_TYPE, + COLUMNS_SQL_DATETIME_SUB, + COLUMNS_CHAR_OCTET_LENGTH, + COLUMNS_ORDINAL_POSITION, + COLUMNS_IS_NULLABLE, + COLUMNS_DISPLAY_SIZE, + COLUMNS_FIELD_TYPE, + COLUMNS_AUTO_INCREMENT, + COLUMNS_PHYSICAL_NUMBER, + COLUMNS_TABLE_OID, + COLUMNS_BASE_TYPEID, + COLUMNS_ATTTYPMOD, + COLUMNS_TABLE_INFO, + NUM_OF_COLUMNS_FIELDS +}; +/* SQLPrimaryKeys field position */ +enum { + PKS_TABLE_CAT = 0, + PKS_TABLE_SCHEM, + PKS_TABLE_NAME, + PKS_COLUMN_NAME, + PKS_KEY_SQ, + PKS_PK_NAME, + NUM_OF_PKS_FIELDS +}; +/* SQLForeignKeys field position */ +enum { + FKS_PKTABLE_CAT = 0, + FKS_PKTABLE_SCHEM, + FKS_PKTABLE_NAME, + FKS_PKCOLUMN_NAME, + FKS_FKTABLE_CAT, + FKS_FKTABLE_SCHEM, + FKS_FKTABLE_NAME, + FKS_FKCOLUMN_NAME, + FKS_KEY_SEQ, + FKS_UPDATE_RULE, + FKS_DELETE_RULE, + FKS_FK_NAME, + FKS_PK_NAME, + FKS_DEFERRABILITY, + FKS_TRIGGER_NAME, + NUM_OF_FKS_FIELDS +}; +/* SQLColAttribute */ +enum { + COLATTR_DESC_COUNT = -1, + COLATTR_DESC_AUTO_UNIQUE_VALUE = 0, + COLATTR_DESC_BASE_COLUMN_NAME, + COLATTR_DESC_BASE_TABLE_NAME, + COLATTR_DESC_CASE_SENSITIVE, + COLATTR_DESC_CATALOG_NAME, + COLATTR_DESC_CONCISE_TYPE, + COLATTR_DESC_DISPLAY_SIZE, + COLATTR_DESC_FIXED_PREC_SCALE, + COLATTR_DESC_LABEL, + COLATTR_DESC_LENGTH, + COLATTR_DESC_LITERAL_PREFIX, + COLATTR_DESC_LITERAL_SUFFIX, + COLATTR_DESC_LOCAL_TYPE_NAME, + COLATTR_DESC_NAME, + COLATTR_DESC_NULLABLE, + COLATTR_DESC_NUM_PREX_RADIX, + COLATTR_DESC_OCTET_LENGTH, + COLATTR_DESC_PRECISION, + COLATTR_DESC_SCALE, + COLATTR_DESC_SCHEMA_NAME, + COLATTR_DESC_SEARCHABLE, + COLATTR_DESC_TABLE_NAME, + COLATTR_DESC_TYPE, + COLATTR_DESC_TYPE_NAME, + COLATTR_DESC_UNNAMED, + COLATTR_DESC_UNSIGNED, + COLATTR_DESC_UPDATABLE +}; + +/* SQLStatistics field position */ +enum { + STATS_CATALOG_NAME = 0, + STATS_SCHEMA_NAME, + STATS_TABLE_NAME, + STATS_NON_UNIQUE, + STATS_INDEX_QUALIFIER, + STATS_INDEX_NAME, + STATS_TYPE, + STATS_SEQ_IN_INDEX, + STATS_COLUMN_NAME, + STATS_COLLATION, + STATS_CARDINALITY, + STATS_PAGES, + STATS_FILTER_CONDITION, + NUM_OF_STATS_FIELDS +}; + +/* SQLProcedure field position */ +enum { + PRO_PROCEDURE_CAT = 0, + PRO_PROCEDURE_SCHEM, + PRO_PROCEDURE_NAME, + PRO_NUM_INPUT_PARAMS, + PRO_NUM_OUTPUT_PARAMS, + PRO_RESULT_SETS, + PRO_REMARKS, + PRO_PROCEDURE_TYPE, + NUM_OF_PRO_FIELDS +}; + +/* SQLProcedureColumns field position */ +enum { + PROCOLS_PROCEDURE_CAT = 0, + PROCOLS_PROCEDURE_SCHEM, + PROCOLS_PROCEDURE_NAME, + PROCOLS_COLUMN_NAME, + PROCOLS_COLUMN_TYPE, + PROCOLS_DATA_TYPE, + PROCOLS_TYPE_NAME, + PROCOLS_COLUMN_SIZE, + PROCOLS_BUFFER_LENGTH, + PROCOLS_DECIMAL_DIGITS, + PROCOLS_NUM_PREC_RADIX, + PROCOLS_NULLABLE, + PROCOLS_REMARKS, + PROCOLS_COLUMN_DEF, + PROCOLS_SQL_DATA_TYPE, + PROCOLS_SQL_DATETIME_SUB, + PROCOLS_CHAR_OCTET_LENGTH, + PROCOLS_ORDINAL_POSITION, + PROCOLS_IS_NULLABLE, + NUM_OF_PROCOLS_FIELDS +}; +/* SQLGetTypeInfo field position */ +enum { + GETTYPE_TYPE_NAME = 0, + GETTYPE_DATA_TYPE, + GETTYPE_COLUMN_SIZE, + GETTYPE_LITERAL_PREFIX, + GETTYPE_LITERAL_SUFFIX, + GETTYPE_CREATE_PARAMS, + GETTYPE_NULLABLE, + GETTYPE_CASE_SENSITIVE, + GETTYPE_SEARCHABLE, + GETTYPE_UNSIGNED_ATTRIBUTE, + GETTYPE_FIXED_PREC_SCALE, + GETTYPE_AUTO_UNIQUE_VALUE, + GETTYPE_LOCAL_TYPE_NAME, + GETTYPE_MINIMUM_SCALE, + GETTYPE_MAXIMUM_SCALE, + GETTYPE_SQL_DATA_TYPE, + GETTYPE_SQL_DATETIME_SUB, + GETTYPE_NUM_PREC_RADIX, + GETTYPE_INTERVAL_PRECISION, + NUM_OF_GETTYPE_FIELDS +}; +/* SQLSpecialColumns field position */ +enum { + SPECOLS_SCOPE = 0, + SPECOLS_COLUMN_NAME, + SPECOLS_DATA_TYPE, + SPECOLS_TYPE_NAME, + SPECOLS_COLUMN_SIZE, + SPECOLS_BUFFER_LENGTH, + SPECOLS_DECIMAL_DIGITS, + SPECOLS_PSEUDO_COLUMN, + NUM_OF_SPECOLS_FIELDS +}; +/* SQLColumnPrivileges field position */ +enum { + COLPRIV_TABLE_CAT = 0, + COLPRIV_TABLE_SCHEM, + COLPRIV_TABLE_NAME, + COLPRIV_COLUMN_NAME, + COLPRIV_GRANTOR, + COLPRIV_GRANTEE, + COLPRIV_PRIVILEGE, + COLPRIV_IS_GRANTABLE, + NUM_OF_COLPRIV_FIELDS +}; +/* SQLTablePrivileges field position */ +enum { + TABPRIV_TABLE_CAT = 0, + TABPRIV_TABLE_SCHEM, + TABPRIV_TABLE_NAME, + TABPRIV_GRANTOR, + TABPRIV_GRANTEE, + TABPRIV_PRIVILEGE, + TABPRIV_IS_GRANTABLE, + NUM_OF_TABPRIV_FIELDS +}; +#endif /* __CARFUNC_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/columninfo.c b/sql-odbc/src/odfesqlodbc/columninfo.c new file mode 100644 index 0000000000..5512a628b7 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/columninfo.c @@ -0,0 +1,88 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "columninfo.h" +#include "es_types.h" + +#include +#include +#include "es_apifunc.h" +#include "es_connection.h" + +ColumnInfoClass *CI_Constructor(void) { + ColumnInfoClass *rv; + + rv = (ColumnInfoClass *)malloc(sizeof(ColumnInfoClass)); + + if (rv) { + rv->refcount = 0; + rv->num_fields = 0; + rv->coli_array = NULL; + } + + return rv; +} + +void CI_Destructor(ColumnInfoClass *self) { + CI_free_memory(self); + + free(self); +} + +void CI_free_memory(ColumnInfoClass *self) { + register Int2 lf; + int num_fields = self->num_fields; + + /* Safe to call even if null */ + self->num_fields = 0; + if (self->coli_array) { + for (lf = 0; lf < num_fields; lf++) { + if (self->coli_array[lf].name) { + free(self->coli_array[lf].name); + self->coli_array[lf].name = NULL; + } + } + free(self->coli_array); + self->coli_array = NULL; + } +} + +void CI_set_num_fields(ColumnInfoClass *self, SQLSMALLINT new_num_fields) { + CI_free_memory(self); /* always safe to call */ + + self->num_fields = new_num_fields; + + self->coli_array = + (struct srvr_info *)calloc(sizeof(struct srvr_info), self->num_fields); +} + +void CI_set_field_info(ColumnInfoClass *self, int field_num, + const char *new_name, OID new_adtid, Int2 new_adtsize, + Int4 new_atttypmod, OID new_relid, OID new_attid) { + /* check bounds */ + if ((field_num < 0) || (field_num >= self->num_fields)) + return; + + /* store the info */ + self->coli_array[field_num].name = strdup(new_name); + self->coli_array[field_num].adtid = new_adtid; + self->coli_array[field_num].adtsize = new_adtsize; + self->coli_array[field_num].atttypmod = new_atttypmod; + + self->coli_array[field_num].display_size = ES_ADT_UNSET; + self->coli_array[field_num].relid = new_relid; + self->coli_array[field_num].attid = (short)new_attid; +} diff --git a/sql-odbc/src/odfesqlodbc/columninfo.h b/sql-odbc/src/odfesqlodbc/columninfo.h new file mode 100644 index 0000000000..c66b93d717 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/columninfo.h @@ -0,0 +1,65 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __COLUMNINFO_H__ +#define __COLUMNINFO_H__ + +#include "es_odbc.h" + +struct ColumnInfoClass_ { + UInt4 refcount; /* reference count. A ColumnInfo can be shared by + * several qresults. */ + Int2 num_fields; + struct srvr_info { + char *name; /* field name */ + OID adtid; /* type oid */ + Int2 adtsize; /* type size */ + Int4 display_size; /* the display size (longest row) */ + Int4 atttypmod; /* the length of bpchar/varchar */ + OID relid; /* the relation id */ + Int2 attid; /* the attribute number */ + } * coli_array; +}; + +#define CI_get_num_fields(self) (self->num_fields) +#define CI_get_oid(self, col) (self->coli_array[col].adtid) +#define CI_get_fieldname(self, col) (self->coli_array[col].name) +#define CI_get_fieldsize(self, col) (self->coli_array[col].adtsize) +#define CI_get_display_size(self, col) (self->coli_array[col].display_size) +#define CI_get_atttypmod(self, col) (self->coli_array[col].atttypmod) +#define CI_get_relid(self, col) (self->coli_array[col].relid) +#define CI_get_attid(self, col) (self->coli_array[col].attid) + +ColumnInfoClass *CI_Constructor(void); +void CI_Destructor(ColumnInfoClass *self); +void CI_free_memory(ColumnInfoClass *self); + +/* functions for setting up the fields from within the program, */ +/* without reading from a socket */ +void CI_set_num_fields(ColumnInfoClass *self, SQLSMALLINT new_num_fields); + +// Used in es_parse_results.cpp +#ifdef __cplusplus +extern "C" { +#endif +void CI_set_field_info(ColumnInfoClass *self, int field_num, + const char *new_name, OID new_adtid, Int2 new_adtsize, + Int4 atttypmod, OID new_relid, OID new_attid); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sql-odbc/src/odfesqlodbc/connection.c b/sql-odbc/src/odfesqlodbc/connection.c new file mode 100644 index 0000000000..afb8cdc1d5 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/connection.c @@ -0,0 +1,712 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +/* TryEnterCritiaclSection needs the following #define */ +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0400 +#endif /* _WIN32_WINNT */ + +#include +#include +#include + +#include "es_connection.h" +#include "misc.h" + +/* for htonl */ +#ifdef WIN32 +#include +#else +#include +#endif + +#include "dlg_specific.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_helper.h" +#include "loadlib.h" +#include "multibyte.h" +#include "qresult.h" +#include "statement.h" +#ifndef WIN32 +#include +#endif +#define SAFE_STR(s) (NULL != (s) ? (s) : "(null)") + +#define ELASTIC_MAXIMUM_ID_LEN SHRT_MAX // Max 16-bit signed int +#define ELASTIC_TRANSACTION_SUPPORT 0 // Not supported +#define STMT_INCREMENT \ + 16 /* how many statement holders to allocate \ \ + * at a time */ + +#define PROTOCOL3_OPTS_MAX 30 + +RETCODE SQL_API ESAPI_AllocConnect(HENV henv, HDBC *phdbc) { + EnvironmentClass *env = (EnvironmentClass *)henv; + ConnectionClass *conn; + CSTR func = "ESAPI_AllocConnect"; + + MYLOG(ES_TRACE, "entering...\n"); + + conn = CC_Constructor(); + MYLOG(ES_DEBUG, "**** henv = %p, conn = %p\n", henv, conn); + + if (!conn) { + env->errormsg = "Couldn't allocate memory for Connection object."; + env->errornumber = ENV_ALLOC_ERROR; + *phdbc = SQL_NULL_HDBC; + EN_log_error(func, "", env); + return SQL_ERROR; + } + + if (!EN_add_connection(env, conn)) { + env->errormsg = "Maximum number of connections exceeded."; + env->errornumber = ENV_ALLOC_ERROR; + CC_Destructor(conn); + *phdbc = SQL_NULL_HDBC; + EN_log_error(func, "", env); + return SQL_ERROR; + } + + if (phdbc) + *phdbc = (HDBC)conn; + + return SQL_SUCCESS; +} + +RETCODE SQL_API ESAPI_Connect(HDBC hdbc, const SQLCHAR *szDSN, + SQLSMALLINT cbDSN, const SQLCHAR *szUID, + SQLSMALLINT cbUID, const SQLCHAR *szAuthStr, + SQLSMALLINT cbAuthStr) { + ConnectionClass *conn = (ConnectionClass *)hdbc; + ConnInfo *ci; + CSTR func = "ESAPI_Connect"; + RETCODE ret = SQL_SUCCESS; + char fchar, *tmpstr; + + MYLOG(ES_TRACE, "entering..cbDSN=%hi.\n", cbDSN); + + if (!conn) { + CC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + ci = &conn->connInfo; + CC_conninfo_init(ci, INIT_GLOBALS); + + make_string(szDSN, cbDSN, ci->dsn, sizeof(ci->dsn)); + + /* get the values for the DSN from the registry */ + getDSNinfo(ci, NULL); + + logs_on_off(1, ci->drivers.loglevel, ci->drivers.loglevel); + /* initialize es_version from connInfo.protocol */ + CC_initialize_es_version(conn); + + /* + * override values from DSN info with UID and authStr(pwd) This only + * occurs if the values are actually there. + */ + fchar = ci->username[0]; /* save the first byte */ + make_string(szUID, cbUID, ci->username, sizeof(ci->username)); + if ('\0' == ci->username[0]) /* an empty string is specified */ + ci->username[0] = fchar; /* restore the original username */ + tmpstr = make_string(szAuthStr, cbAuthStr, NULL, 0); + if (tmpstr) { + if (tmpstr[0]) /* non-empty string is specified */ + STR_TO_NAME(ci->password, tmpstr); + free(tmpstr); + } + + MYLOG(ES_DEBUG, "conn = %p (DSN='%s', UID='%s', PWD='%s')\n", conn, ci->dsn, + ci->username, NAME_IS_VALID(ci->password) ? "xxxxx" : ""); + + if ((fchar = CC_connect(conn)) <= 0) { + /* Error messages are filled in */ + CC_log_error(func, "Error on CC_connect", conn); + ret = SQL_ERROR; + } + if (SQL_SUCCESS == ret && 2 == fchar) + ret = SQL_SUCCESS_WITH_INFO; + + MYLOG(ES_TRACE, "leaving..%d.\n", ret); + + return ret; +} + +RETCODE SQL_API ESAPI_BrowseConnect(HDBC hdbc, const SQLCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut) { + UNUSED(szConnStrIn, cbConnStrIn, szConnStrOut, cbConnStrOutMax, + cbConnStrOutMax, pcbConnStrOut); + CSTR func = "ESAPI_BrowseConnect"; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering...\n"); + + CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, "Function not implemented", + func); + return SQL_ERROR; +} + +/* Drop any hstmts open on hdbc and disconnect from database */ +RETCODE SQL_API ESAPI_Disconnect(HDBC hdbc) { + ConnectionClass *conn = (ConnectionClass *)hdbc; + CSTR func = "ESAPI_Disconnect"; + RETCODE ret = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering...\n"); + + if (!conn) { + CC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + if (conn->status == CONN_EXECUTING) { + // This should only be possible if transactions are supported, but they + // are not. Return an error regardless + CC_set_error(conn, CONN_IN_USE, "Connection is currently in use!", + func); + return SQL_ERROR; + } + + logs_on_off(-1, conn->connInfo.drivers.loglevel, + conn->connInfo.drivers.loglevel); + MYLOG(ES_DEBUG, "about to CC_cleanup\n"); + + /* Close the connection and free statements */ + ret = CC_cleanup(conn, FALSE); + + MYLOG(ES_DEBUG, "done CC_cleanup\n"); + MYLOG(ES_TRACE, "leaving...\n"); + + return ret; +} + +RETCODE SQL_API ESAPI_FreeConnect(HDBC hdbc) { + ConnectionClass *conn = (ConnectionClass *)hdbc; + CSTR func = "ESAPI_FreeConnect"; + EnvironmentClass *env; + + MYLOG(ES_TRACE, "entering...hdbc=%p\n", hdbc); + + if (!conn) { + CC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + /* Remove the connection from the environment */ + if (NULL != (env = CC_get_env(conn)) && !EN_remove_connection(env, conn)) { + // This should only be possible if transactions are supported, but they + // are not. Return an error regardless + CC_set_error(conn, CONN_IN_USE, "Connection is currently in use!", + func); + return SQL_ERROR; + } + + CC_Destructor(conn); + + MYLOG(ES_TRACE, "leaving...\n"); + + return SQL_SUCCESS; +} + +/* + * IMPLEMENTATION CONNECTION CLASS + */ + +static void reset_current_schema(ConnectionClass *self) { + if (self->current_schema) { + free(self->current_schema); + self->current_schema = NULL; + } + self->current_schema_valid = FALSE; +} + +static ConnectionClass *CC_alloc(void) { + return (ConnectionClass *)calloc(sizeof(ConnectionClass), 1); +} + +static void CC_lockinit(ConnectionClass *self) { + UNUSED(self); + INIT_CONNLOCK(self); + INIT_CONN_CS(self); +} + +static ConnectionClass *CC_initialize(ConnectionClass *rv, BOOL lockinit) { + size_t clear_size; + + clear_size = (char *)&(rv->cs) - (char *)rv; + + memset(rv, 0, clear_size); + rv->status = CONN_NOT_CONNECTED; + rv->transact_status = CONN_IN_AUTOCOMMIT; /* autocommit by default */ + rv->unnamed_prepared_stmt = NULL; + + rv->stmts = + (StatementClass **)malloc(sizeof(StatementClass *) * STMT_INCREMENT); + if (!rv->stmts) + goto cleanup; + memset(rv->stmts, 0, sizeof(StatementClass *) * STMT_INCREMENT); + + rv->num_stmts = STMT_INCREMENT; + rv->descs = + (DescriptorClass **)malloc(sizeof(DescriptorClass *) * STMT_INCREMENT); + if (!rv->descs) + goto cleanup; + memset(rv->descs, 0, sizeof(DescriptorClass *) * STMT_INCREMENT); + + rv->num_descs = STMT_INCREMENT; + + rv->lobj_type = ES_TYPE_LO_UNDEFINED; + if (isMsAccess()) + rv->ms_jet = 1; + rv->isolation = 0; // means initially unknown server's default isolation + rv->mb_maxbyte_per_char = 1; + rv->max_identifier_length = ELASTIC_MAXIMUM_ID_LEN; + rv->autocommit_public = SQL_AUTOCOMMIT_ON; + + /* Initialize statement options to defaults */ + /* Statements under this conn will inherit these options */ + + InitializeStatementOptions(&rv->stmtOptions); + InitializeARDFields(&rv->ardOptions); + InitializeAPDFields(&rv->apdOptions); +#ifdef _HANDLE_ENLIST_IN_DTC_ + rv->asdum = NULL; + rv->gTranInfo = 0; +#endif /* _HANDLE_ENLIST_IN_DTC_ */ + if (lockinit) + CC_lockinit(rv); + + return rv; + +cleanup: + CC_Destructor(rv); + return NULL; +} + +ConnectionClass *CC_Constructor() { + ConnectionClass *rv, *retrv = NULL; + + if (rv = CC_alloc(), NULL != rv) + retrv = CC_initialize(rv, TRUE); + return retrv; +} + +char CC_Destructor(ConnectionClass *self) { + MYLOG(ES_TRACE, "entering self=%p\n", self); + + if (self->status == CONN_EXECUTING) + return 0; + + CC_cleanup(self, FALSE); /* cleanup socket and statements */ + + MYLOG(ES_DEBUG, "after CC_Cleanup\n"); + + /* Free up statement holders */ + if (self->stmts) { + free(self->stmts); + self->stmts = NULL; + } + if (self->descs) { + free(self->descs); + self->descs = NULL; + } + MYLOG(ES_DEBUG, "after free statement holders\n"); + + NULL_THE_NAME(self->schemaIns); + NULL_THE_NAME(self->tableIns); + CC_conninfo_release(&self->connInfo); + if (self->__error_message) + free(self->__error_message); + DELETE_CONN_CS(self); + DELETE_CONNLOCK(self); + free(self); + + MYLOG(ES_TRACE, "leaving\n"); + + return 1; +} + +void CC_clear_error(ConnectionClass *self) { + if (!self) + return; + CONNLOCK_ACQUIRE(self); + self->__error_number = 0; + if (self->__error_message) { + free(self->__error_message); + self->__error_message = NULL; + } + self->sqlstate[0] = '\0'; + CONNLOCK_RELEASE(self); +} + +/* This is called by SQLSetConnectOption etc also */ +BOOL CC_set_autocommit(ConnectionClass *self, BOOL on) { + BOOL currsts = CC_is_in_autocommit(self); + + if ((on && currsts) || (!on && !currsts)) + return on; + MYLOG(ES_DEBUG, " %d->%d\n", currsts, on); + if (on) + self->transact_status |= CONN_IN_AUTOCOMMIT; + else + self->transact_status &= ~CONN_IN_AUTOCOMMIT; + + return on; +} + +/* Clear cached table info */ +static void CC_clear_col_info(ConnectionClass *self, BOOL destroy) { + if (self->col_info) { + int i; + COL_INFO *coli; + + for (i = 0; i < self->ntables; i++) { + if (coli = self->col_info[i], NULL != coli) { + if (destroy || coli->refcnt == 0) { + free_col_info_contents(coli); + free(coli); + self->col_info[i] = NULL; + } else + coli->acc_time = 0; + } + } + self->ntables = 0; + if (destroy) { + free(self->col_info); + self->col_info = NULL; + self->coli_allocated = 0; + } + } +} + +/* This is called by SQLDisconnect also */ +RETCODE +CC_cleanup(ConnectionClass *self, BOOL keepCommunication) { + int i; + StatementClass *stmt; + DescriptorClass *desc; + RETCODE ret = SQL_SUCCESS; + CSTR func = "CC_cleanup"; + + if (self->status == CONN_EXECUTING) + return FALSE; + + MYLOG(ES_TRACE, "entering self=%p\n", self); + + ENTER_CONN_CS(self); + /* Cancel an ongoing transaction */ + /* We are always in the middle of a transaction, */ + /* even if we are in auto commit. */ + if (self->esconn) { + QLOG(0, "LIBES_disconnect: %p\n", self->esconn); + LIBES_disconnect(self->esconn); + self->esconn = NULL; + } else { + ret = SQL_ERROR; + CC_set_error(self, CC_not_connected(self), "Connection not open", func); + } + + MYLOG(ES_DEBUG, "after LIBES_disconnect\n"); + + /* Free all the stmts on this connection */ + for (i = 0; i < self->num_stmts; i++) { + stmt = self->stmts[i]; + if (stmt) { + stmt->hdbc = NULL; /* prevent any more dbase interactions */ + + SC_Destructor(stmt); + + self->stmts[i] = NULL; + } + } + /* Free all the descs on this connection */ + for (i = 0; i < self->num_descs; i++) { + desc = self->descs[i]; + if (desc) { + DC_get_conn(desc) = NULL; /* prevent any more dbase interactions */ + DC_Destructor(desc); + free(desc); + self->descs[i] = NULL; + } + } + + /* Check for translation dll */ +#ifdef WIN32 + if (!keepCommunication && self->translation_handle) { + FreeLibrary(self->translation_handle); + self->translation_handle = NULL; + } +#endif + + if (!keepCommunication) { + self->status = CONN_NOT_CONNECTED; + self->transact_status = CONN_IN_AUTOCOMMIT; + self->unnamed_prepared_stmt = NULL; + } + if (!keepCommunication) { + CC_conninfo_init(&(self->connInfo), CLEANUP_FOR_REUSE); + if (self->original_client_encoding) { + free(self->original_client_encoding); + self->original_client_encoding = NULL; + } + if (self->locale_encoding) { + free(self->locale_encoding); + self->locale_encoding = NULL; + } + if (self->server_encoding) { + free(self->server_encoding); + self->server_encoding = NULL; + } + reset_current_schema(self); + } + /* Free cached table info */ + CC_clear_col_info(self, TRUE); + if (self->num_discardp > 0 && self->discardp) { + for (i = 0; i < self->num_discardp; i++) + free(self->discardp[i]); + self->num_discardp = 0; + } + if (self->discardp) { + free(self->discardp); + self->discardp = NULL; + } + + LEAVE_CONN_CS(self); + MYLOG(ES_TRACE, "leaving\n"); + return ret; +} + +#ifndef ES_DIAG_SEVERITY_NONLOCALIZED +#define ES_DIAG_SEVERITY_NONLOCALIZED 'V' +#endif + +#define TRANSACTION_ISOLATION "transaction_isolation" +#define ISOLATION_SHOW_QUERY "show " TRANSACTION_ISOLATION + +char CC_add_statement(ConnectionClass *self, StatementClass *stmt) { + int i; + char ret = TRUE; + + MYLOG(ES_DEBUG, "self=%p, stmt=%p\n", self, stmt); + + CONNLOCK_ACQUIRE(self); + for (i = 0; i < self->num_stmts; i++) { + if (!self->stmts[i]) { + stmt->hdbc = self; + self->stmts[i] = stmt; + break; + } + } + + if (i >= self->num_stmts) /* no more room -- allocate more memory */ + { + StatementClass **newstmts; + Int2 new_num_stmts; + + new_num_stmts = STMT_INCREMENT + self->num_stmts; + + if (new_num_stmts > 0) + newstmts = (StatementClass **)realloc( + self->stmts, sizeof(StatementClass *) * new_num_stmts); + else + newstmts = NULL; /* num_stmts overflowed */ + if (!newstmts) + ret = FALSE; + else { + self->stmts = newstmts; + memset(&self->stmts[self->num_stmts], 0, + sizeof(StatementClass *) * STMT_INCREMENT); + + stmt->hdbc = self; + self->stmts[self->num_stmts] = stmt; + + self->num_stmts = new_num_stmts; + } + } + CONNLOCK_RELEASE(self); + + return ret; +} + +static void CC_set_error_statements(ConnectionClass *self) { + int i; + + MYLOG(ES_TRACE, "entering self=%p\n", self); + + for (i = 0; i < self->num_stmts; i++) { + if (NULL != self->stmts[i]) + SC_ref_CC_error(self->stmts[i]); + } +} + +char CC_remove_statement(ConnectionClass *self, StatementClass *stmt) { + int i; + char ret = FALSE; + + CONNLOCK_ACQUIRE(self); + for (i = 0; i < self->num_stmts; i++) { + if (self->stmts[i] == stmt && stmt->status != STMT_EXECUTING) { + self->stmts[i] = NULL; + ret = TRUE; + break; + } + } + CONNLOCK_RELEASE(self); + + return ret; +} + +char CC_get_escape(const ConnectionClass *self) { + UNUSED(self); + return ESCAPE_IN_LITERAL; +} + +int CC_get_max_idlen(ConnectionClass *self) { + UNUSED(self); + return self->max_identifier_length; +} + +SQLUINTEGER CC_get_isolation(ConnectionClass *self) { + UNUSED(self); + return ELASTIC_TRANSACTION_SUPPORT; +} + +void CC_set_error(ConnectionClass *self, int number, const char *message, + const char *func) { + CONNLOCK_ACQUIRE(self); + if (self->__error_message) + free(self->__error_message); + self->__error_number = number; + self->__error_message = message ? strdup(message) : NULL; + if (0 != number) + CC_set_error_statements(self); + if (func && number != 0) + CC_log_error(func, "", self); + CONNLOCK_RELEASE(self); +} + +void CC_set_errormsg(ConnectionClass *self, const char *message) { + CONNLOCK_ACQUIRE(self); + if (self->__error_message) + free(self->__error_message); + self->__error_message = message ? strdup(message) : NULL; + CONNLOCK_RELEASE(self); +} + +int CC_get_error(ConnectionClass *self, int *number, char **message) { + int rv; + + MYLOG(ES_TRACE, "entering\n"); + + CONNLOCK_ACQUIRE(self); + + if (CC_get_errornumber(self)) { + *number = CC_get_errornumber(self); + *message = CC_get_errormsg(self); + } + rv = (CC_get_errornumber(self) != 0); + + CONNLOCK_RELEASE(self); + + MYLOG(ES_TRACE, "leaving\n"); + + return rv; +} +void CC_log_error(const char *func, const char *desc, + const ConnectionClass *self) { +#define NULLCHECK(a) (a ? a : "(NULL)") + + if (self) { + MYLOG(ES_ERROR, "CONN ERROR: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", + func, desc, self->__error_number, + NULLCHECK(self->__error_message)); + MYLOG(ES_ERROR, + " " + "------------------------------------------------------------\n"); + MYLOG(ES_ERROR, + " henv=%p, conn=%p, status=%u, num_stmts=%d\n", + self->henv, self, self->status, self->num_stmts); + MYLOG(ES_ERROR, + " esconn=%p, stmts=%p, lobj_type=%d\n", self->esconn, + self->stmts, self->lobj_type); + } else { + MYLOG(ES_ERROR, "INVALID CONNECTION HANDLE ERROR: func=%s, desc='%s'\n", func, + desc); + } +} + +const char *CurrCat(const ConnectionClass *conn) { + return conn->cluster_name; +} + +const char *CurrCatString(const ConnectionClass *conn) { + const char *cat = CurrCat(conn); + + if (!cat) + cat = NULL_STRING; + return cat; +} + +/*------ + * Create a null terminated lower-case string if the + * original string contains upper-case characters. + * The SQL_NTS length is considered. + *------ + */ +SQLCHAR *make_lstring_ifneeded(ConnectionClass *conn, const SQLCHAR *s, + ssize_t len, BOOL ifallupper) { + ssize_t length = len; + char *str = NULL; + const char *ccs = (const char *)s; + + if (s && (len > 0 || (len == SQL_NTS && (length = strlen(ccs)) > 0))) { + int i; + int tchar; + encoded_str encstr; + + make_encoded_str(&encstr, conn, ccs); + for (i = 0; i < length; i++) { + tchar = encoded_nextchar(&encstr); + if (MBCS_NON_ASCII(encstr)) + continue; + if (ifallupper && islower(tchar)) { + if (str) { + free(str); + str = NULL; + } + break; + } + if (tolower(tchar) != tchar) { + if (!str) { + str = malloc(length + 1); + if (!str) + return NULL; + memcpy(str, s, length); + str[length] = '\0'; + } + str[i] = (char)tolower(tchar); + } + } + } + + return (SQLCHAR *)str; +} diff --git a/sql-odbc/src/odfesqlodbc/convert.c b/sql-odbc/src/odfesqlodbc/convert.c new file mode 100644 index 0000000000..7a03f6fa73 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/convert.c @@ -0,0 +1,2298 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "convert.h" + +#include "misc.h" +#include "unicode_support.h" +#ifdef WIN32 +#include +#define HAVE_LOCALE_H +#endif /* WIN32 */ + +#include +#include +#include +#include + +#include "multibyte.h" +#ifdef HAVE_LOCALE_H +#include +#endif +#include +#include +#include + +#include "bind.h" +#include "catfunc.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_types.h" +#include "qresult.h" +#include "statement.h" + +CSTR NAN_STRING = "NaN"; +CSTR INFINITY_STRING = "Infinity"; +CSTR MINFINITY_STRING = "-Infinity"; + +#if defined(WIN32) || defined(__CYGWIN__) +#define TIMEZONE_GLOBAL _timezone +#define TZNAME_GLOBAL _tzname +#define DAYLIGHT_GLOBAL _daylight +#elif defined(HAVE_INT_TIMEZONE) +#define TIMEZONE_GLOBAL timezone +#define TZNAME_GLOBAL tzname +#define DAYLIGHT_GLOBAL daylight +#endif + +typedef struct { + int infinity; + int m; + int d; + int y; + int hh; + int mm; + int ss; + int fr; +} SIMPLE_TIME; + +static BOOL convert_money(const char *s, char *sout, size_t soutmax); +size_t convert_linefeeds(const char *s, char *dst, size_t max, BOOL convlf, + BOOL *changed); +static size_t convert_from_esbinary(const char *value, char *rgbValue, + SQLLEN cbValueMax); +static int convert_lo(StatementClass *stmt, const void *value, + SQLSMALLINT fCType, PTR rgbValue, SQLLEN cbValueMax, + SQLLEN *pcbValue); +static int conv_from_octal(const char *s); +static SQLLEN es_bin2hex(const char *src, char *dst, SQLLEN length); +#ifdef UNICODE_SUPPORT +static SQLLEN es_bin2whex(const char *src, SQLWCHAR *dst, SQLLEN length); +#endif /* UNICODE_SUPPORT */ + +/*--------- + * A Guide for date/time/timestamp conversions + * + * field_type fCType Output + * ---------- ------ ---------- + * ES_TYPE_DATE SQL_C_DEFAULT SQL_C_DATE + * ES_TYPE_DATE SQL_C_DATE SQL_C_DATE + * ES_TYPE_DATE SQL_C_TIMESTAMP SQL_C_TIMESTAMP (time = 0 + *(midnight)) ES_TYPE_TIME SQL_C_DEFAULT SQL_C_TIME ES_TYPE_TIME + *SQL_C_TIME SQL_C_TIME + * ES_TYPE_TIME SQL_C_TIMESTAMP SQL_C_TIMESTAMP (date = + *current date) ES_TYPE_ABSTIME SQL_C_DEFAULT SQL_C_TIMESTAMP + *ES_TYPE_ABSTIME SQL_C_DATE SQL_C_DATE (time is truncated) + *ES_TYPE_ABSTIME SQL_C_TIME SQL_C_TIME (date is truncated) + *ES_TYPE_ABSTIME SQL_C_TIMESTAMP SQL_C_TIMESTAMP + *--------- + */ + +/* + * Macros for unsigned long handling. + */ +#ifdef WIN32 +#define ATOI32U(val) strtoul(val, NULL, 10) +#elif defined(HAVE_STRTOUL) +#define ATOI32U(val) strtoul(val, NULL, 10) +#else /* HAVE_STRTOUL */ +#define ATOI32U atol +#endif /* WIN32 */ + +/* + * Macros for BIGINT handling. + */ +#ifdef ODBCINT64 +#ifdef WIN32 +#define ATOI64(val) _strtoi64(val, NULL, 10) +#define ATOI64U(val) _strtoui64(val, NULL, 10) +#elif (SIZEOF_LONG == 8) +#define ATOI64(val) strtol(val, NULL, 10) +#define ATOI64U(val) strtoul(val, NULL, 10) +#else +#if defined(HAVE_STRTOLL) +#define ATOI64(val) strtoll(val, NULL, 10) +#define ATOI64U(val) strtoull(val, NULL, 10) +#else +static ODBCINT64 ATOI64(const char *val) { + ODBCINT64 ll; + sscanf(val, "%lld", &ll); + return ll; +} +static unsigned ODBCINT64 ATOI64U(const char *val) { + unsigned ODBCINT64 ll; + sscanf(val, "%llu", &ll); + return ll; +} +#endif /* HAVE_STRTOLL */ +#endif /* WIN32 */ +#endif /* ODBCINT64 */ + +static void parse_to_numeric_struct(const char *wv, SQL_NUMERIC_STRUCT *ns, + BOOL *overflow); + +/* + * TIMESTAMP <-----> SIMPLE_TIME + * precision support since 7.2. + * time zone support is unavailable(the stuff is unreliable) + */ +static BOOL timestamp2stime(const char *str, SIMPLE_TIME *st, BOOL *bZone, + int *zone) { + char rest[64], bc[16], *ptr; + int scnt, i; + int y, m, d, hh, mm, ss; +#ifdef TIMEZONE_GLOBAL + long timediff; +#endif + BOOL withZone = *bZone; + + *bZone = FALSE; + *zone = 0; + st->fr = 0; + st->infinity = 0; + rest[0] = '\0'; + bc[0] = '\0'; + if ((scnt = sscanf(str, "%4d-%2d-%2d %2d:%2d:%2d%31s %15s", &y, &m, &d, &hh, + &mm, &ss, rest, bc)) + < 6) { + if (scnt == 3) /* date */ + { + st->y = y; + st->m = m; + st->d = d; + st->hh = 0; + st->mm = 0; + st->ss = 0; + return TRUE; + } + if ((scnt = + sscanf(str, "%2d:%2d:%2d%31s %15s", &hh, &mm, &ss, rest, bc)) + < 3) + return FALSE; + else { + st->hh = hh; + st->mm = mm; + st->ss = ss; + if (scnt == 3) /* time */ + return TRUE; + } + } else { + st->y = y; + st->m = m; + st->d = d; + st->hh = hh; + st->mm = mm; + st->ss = ss; + if (scnt == 6) + return TRUE; + } + switch (rest[0]) { + case '+': + *bZone = TRUE; + *zone = atoi(&rest[1]); + break; + case '-': + *bZone = TRUE; + *zone = -atoi(&rest[1]); + break; + case '.': + if ((ptr = strchr(rest, '+')) != NULL) { + *bZone = TRUE; + *zone = atoi(&ptr[1]); + *ptr = '\0'; + } else if ((ptr = strchr(rest, '-')) != NULL) { + *bZone = TRUE; + *zone = -atoi(&ptr[1]); + *ptr = '\0'; + } + for (i = 1; i < 10; i++) { + if (!isdigit((UCHAR)rest[i])) + break; + } + for (; i < 10; i++) + rest[i] = '0'; + rest[i] = '\0'; + st->fr = atoi(&rest[1]); + break; + case 'B': + if (stricmp(rest, "BC") == 0) + st->y *= -1; + return TRUE; + default: + return TRUE; + } + if (stricmp(bc, "BC") == 0) { + st->y *= -1; + } + if (!withZone || !*bZone || st->y < 1970) + return TRUE; +#ifdef TIMEZONE_GLOBAL + if (!TZNAME_GLOBAL[0] || !TZNAME_GLOBAL[0][0]) { + *bZone = FALSE; + return TRUE; + } + timediff = TIMEZONE_GLOBAL + (*zone) * 3600; + if (!DAYLIGHT_GLOBAL && timediff == 0) /* the same timezone */ + return TRUE; + else { + struct tm tm, *tm2; + time_t time0; + + *bZone = FALSE; + tm.tm_year = st->y - 1900; + tm.tm_mon = st->m - 1; + tm.tm_mday = st->d; + tm.tm_hour = st->hh; + tm.tm_min = st->mm; + tm.tm_sec = st->ss; + tm.tm_isdst = -1; + time0 = mktime(&tm); + if (time0 < 0) + return TRUE; + if (tm.tm_isdst > 0) + timediff -= 3600; + if (timediff == 0) /* the same time zone */ + return TRUE; + time0 -= timediff; +#ifdef HAVE_LOCALTIME_R + if (time0 >= 0 && (tm2 = localtime_r(&time0, &tm)) != NULL) +#else + if (time0 >= 0 && (tm2 = localtime(&time0)) != NULL) +#endif /* HAVE_LOCALTIME_R */ + { + st->y = tm2->tm_year + 1900; + st->m = tm2->tm_mon + 1; + st->d = tm2->tm_mday; + st->hh = tm2->tm_hour; + st->mm = tm2->tm_min; + st->ss = tm2->tm_sec; + *bZone = TRUE; + } + } +#endif /* TIMEZONE_GLOBAL */ + return TRUE; +} + +static int stime2timestamp(const SIMPLE_TIME *st, char *str, size_t bufsize, + BOOL bZone, int precision) { + UNUSED(bZone); + char precstr[16], zonestr[16]; + int i; + + precstr[0] = '\0'; + if (st->infinity > 0) { + return snprintf(str, bufsize, "%s", INFINITY_STRING); + } else if (st->infinity < 0) { + return snprintf(str, bufsize, "%s", MINFINITY_STRING); + } + if (precision > 0 && st->fr) { + SPRINTF_FIXED(precstr, ".%09d", st->fr); + if (precision < 9) + precstr[precision + 1] = '\0'; + else if (precision > 9) + precision = 9; + for (i = precision; i > 0; i--) { + if (precstr[i] != '0') + break; + precstr[i] = '\0'; + } + if (i == 0) + precstr[i] = '\0'; + } + zonestr[0] = '\0'; +#ifdef TIMEZONE_GLOBAL + if (bZone && TZNAME_GLOBAL[0] && TZNAME_GLOBAL[0][0] && st->y >= 1970) { + long zoneint; + struct tm tm; + time_t time0; + + zoneint = TIMEZONE_GLOBAL; + if (DAYLIGHT_GLOBAL && st->y >= 1900) { + tm.tm_year = st->y - 1900; + tm.tm_mon = st->m - 1; + tm.tm_mday = st->d; + tm.tm_hour = st->hh; + tm.tm_min = st->mm; + tm.tm_sec = st->ss; + tm.tm_isdst = -1; + time0 = mktime(&tm); + if (time0 >= 0 && tm.tm_isdst > 0) + zoneint -= 3600; + } + if (zoneint > 0) + SPRINTF_FIXED(zonestr, "-%02d", (int)zoneint / 3600); + else + SPRINTF_FIXED(zonestr, "+%02d", -(int)zoneint / 3600); + } +#endif /* TIMEZONE_GLOBAL */ + if (st->y < 0) + return snprintf(str, bufsize, "%.4d-%.2d-%.2d %.2d:%.2d:%.2d%s%s BC", + -st->y, st->m, st->d, st->hh, st->mm, st->ss, precstr, + zonestr); + else + return snprintf(str, bufsize, "%.4d-%.2d-%.2d %.2d:%.2d:%.2d%s%s", + st->y, st->m, st->d, st->hh, st->mm, st->ss, precstr, + zonestr); +} + +static SQLINTERVAL interval2itype(SQLSMALLINT ctype) { + SQLINTERVAL sqlitv = 0; + + switch (ctype) { + case SQL_C_INTERVAL_YEAR: + sqlitv = SQL_IS_YEAR; + break; + case SQL_C_INTERVAL_MONTH: + sqlitv = SQL_IS_MONTH; + break; + case SQL_C_INTERVAL_YEAR_TO_MONTH: + sqlitv = SQL_IS_YEAR_TO_MONTH; + break; + case SQL_C_INTERVAL_DAY: + sqlitv = SQL_IS_DAY; + break; + case SQL_C_INTERVAL_HOUR: + sqlitv = SQL_IS_HOUR; + break; + case SQL_C_INTERVAL_DAY_TO_HOUR: + sqlitv = SQL_IS_DAY_TO_HOUR; + break; + case SQL_C_INTERVAL_MINUTE: + sqlitv = SQL_IS_MINUTE; + break; + case SQL_C_INTERVAL_DAY_TO_MINUTE: + sqlitv = SQL_IS_DAY_TO_MINUTE; + break; + case SQL_C_INTERVAL_HOUR_TO_MINUTE: + sqlitv = SQL_IS_HOUR_TO_MINUTE; + break; + case SQL_C_INTERVAL_SECOND: + sqlitv = SQL_IS_SECOND; + break; + case SQL_C_INTERVAL_DAY_TO_SECOND: + sqlitv = SQL_IS_DAY_TO_SECOND; + break; + case SQL_C_INTERVAL_HOUR_TO_SECOND: + sqlitv = SQL_IS_HOUR_TO_SECOND; + break; + case SQL_C_INTERVAL_MINUTE_TO_SECOND: + sqlitv = SQL_IS_MINUTE_TO_SECOND; + break; + } + return sqlitv; +} + +/* + * Interval data <-----> SQL_INTERVAL_STRUCT + */ + +static int getPrecisionPart(int precision, const char *precPart) { + char fraction[] = "000000000"; + size_t fracs = (size_t)(sizeof(fraction) - 1); + size_t cpys; + + if (precision < 0) + precision = 6; /* default */ + if (precision == 0) + return 0; + cpys = strlen(precPart); + if (cpys > fracs) + cpys = fracs; + memcpy(fraction, precPart, cpys); + fraction[precision] = '\0'; + + return atoi(fraction); +} + +static BOOL interval2istruct(SQLSMALLINT ctype, int precision, const char *str, + SQL_INTERVAL_STRUCT *st) { + char lit1[64], lit2[64]; + int scnt, years, mons, days, hours, minutes, seconds; + SQLSMALLINT sign; + SQLINTERVAL itype = interval2itype(ctype); + + memset(st, 0, sizeof(SQL_INTERVAL_STRUCT)); + if ((scnt = sscanf(str, "%d-%d", &years, &mons)) >= 2) { + if (SQL_IS_YEAR_TO_MONTH == itype) { + sign = years < 0 ? SQL_TRUE : SQL_FALSE; + st->interval_type = itype; + st->interval_sign = sign; + st->intval.year_month.year = sign ? (-years) : years; + st->intval.year_month.month = mons; + return TRUE; + } + return FALSE; + } else if (scnt = sscanf(str, "%d %02d:%02d:%02d.%09s", &days, &hours, + &minutes, &seconds, lit2), + 5 == scnt || 4 == scnt) { + sign = days < 0 ? SQL_TRUE : SQL_FALSE; + st->interval_type = itype; + st->interval_sign = sign; + st->intval.day_second.day = sign ? (-days) : days; + st->intval.day_second.hour = hours; + st->intval.day_second.minute = minutes; + st->intval.day_second.second = seconds; + if (scnt > 4) + st->intval.day_second.fraction = getPrecisionPart(precision, lit2); + return TRUE; + } else if ((scnt = + sscanf(str, "%d %10s %d %10s", &years, lit1, &mons, lit2)) + >= 4) { + if (strnicmp(lit1, "year", 4) == 0 && strnicmp(lit2, "mon", 2) == 0 + && (SQL_IS_MONTH == itype || SQL_IS_YEAR_TO_MONTH == itype)) { + sign = years < 0 ? SQL_TRUE : SQL_FALSE; + st->interval_type = itype; + st->interval_sign = sign; + st->intval.year_month.year = sign ? (-years) : years; + st->intval.year_month.month = sign ? (-mons) : mons; + return TRUE; + } + return FALSE; + } + if ((scnt = sscanf(str, "%d %10s %d", &years, lit1, &days)) == 2) { + sign = years < 0 ? SQL_TRUE : SQL_FALSE; + if (SQL_IS_YEAR == itype + && (stricmp(lit1, "year") == 0 || stricmp(lit1, "years") == 0)) { + st->interval_type = itype; + st->interval_sign = sign; + st->intval.year_month.year = sign ? (-years) : years; + return TRUE; + } + if (SQL_IS_MONTH == itype + && (stricmp(lit1, "mon") == 0 || stricmp(lit1, "mons") == 0)) { + st->interval_type = itype; + st->interval_sign = sign; + st->intval.year_month.month = sign ? (-years) : years; + return TRUE; + } + if (SQL_IS_DAY == itype + && (stricmp(lit1, "day") == 0 || stricmp(lit1, "days") == 0)) { + st->interval_type = itype; + st->interval_sign = sign; + st->intval.day_second.day = sign ? (-years) : years; + return TRUE; + } + return FALSE; + } + if (itype == SQL_IS_YEAR || itype == SQL_IS_MONTH + || itype == SQL_IS_YEAR_TO_MONTH) { + /* these formats should've been handled above already */ + return FALSE; + } + scnt = sscanf(str, "%d %10s %02d:%02d:%02d.%09s", &days, lit1, &hours, + &minutes, &seconds, lit2); + if (scnt == 5 || scnt == 6) { + if (strnicmp(lit1, "day", 3) != 0) + return FALSE; + sign = days < 0 ? SQL_TRUE : SQL_FALSE; + + st->interval_type = itype; + st->interval_sign = sign; + st->intval.day_second.day = sign ? (-days) : days; + st->intval.day_second.hour = sign ? (-hours) : hours; + st->intval.day_second.minute = minutes; + st->intval.day_second.second = seconds; + if (scnt > 5) + st->intval.day_second.fraction = getPrecisionPart(precision, lit2); + return TRUE; + } + scnt = sscanf(str, "%02d:%02d:%02d.%09s", &hours, &minutes, &seconds, lit2); + if (scnt == 3 || scnt == 4) { + sign = hours < 0 ? SQL_TRUE : SQL_FALSE; + + st->interval_type = itype; + st->interval_sign = sign; + st->intval.day_second.hour = sign ? (-hours) : hours; + st->intval.day_second.minute = minutes; + st->intval.day_second.second = seconds; + if (scnt > 3) + st->intval.day_second.fraction = getPrecisionPart(precision, lit2); + return TRUE; + } + + return FALSE; +} + +#ifdef HAVE_LOCALE_H +/* + * Get the decimal point of the current locale. + * + * XXX: This isn't thread-safe, if another thread changes the locale with + * setlocale() concurrently. There are two problems with that: + * + * 1. The pointer returned by localeconv(), or the lc->decimal_point string, + * might be invalidated by calls in other threads. Until someone comes up + * with a thread-safe version of localeconv(), there isn't much we can do + * about that. (libc implementations that return a static buffer (like glibc) + * happen to be safe from the lconv struct being invalidated, but the + * decimal_point string might still not point to a static buffer). + * + * 2. The between the call to sprintf() and get_current_decimal_point(), the + * decimal point might change. That would cause set_server_decimal_point() + * to fail to recognize a decimal separator, and we might send a numeric + * string to the server that the server won't recognize. This would cause + * the query to fail in the server. + * + * XXX: we only take into account the first byte of the decimal separator. + */ +static char get_current_decimal_point(void) { + struct lconv *lc = localeconv(); + + return lc->decimal_point[0]; +} + +/* + * Inverse of set_server_decimal_point. + */ +static void set_client_decimal_point(char *num) { + char current_decimal_point = get_current_decimal_point(); + char *str; + + if ('.' == current_decimal_point) + return; + for (str = num; '\0' != *str; str++) { + if (*str == '.') { + *str = current_decimal_point; + break; + } + } +} +#else +static void set_client_decimal_point(char *num) { + UNUSED(num); +} +#endif /* HAVE_LOCALE_H */ + +/* This is called by SQLFetch() */ +int copy_and_convert_field_bindinfo(StatementClass *stmt, OID field_type, + int atttypmod, void *value, int col) { + ARDFields *opts = SC_get_ARDF(stmt); + BindInfoClass *bic; + SQLULEN offset = opts->row_offset_ptr ? *opts->row_offset_ptr : 0; + + if (opts->allocated <= col) + extend_column_bindings(opts, (SQLSMALLINT)(col + 1)); + bic = &(opts->bindings[col]); + SC_set_current_col(stmt, -1); + return copy_and_convert_field(stmt, field_type, atttypmod, value, + bic->returntype, bic->precision, + (PTR)(bic->buffer + offset), bic->buflen, + LENADDR_SHIFT(bic->used, offset), + LENADDR_SHIFT(bic->indicator, offset)); +} + +static double get_double_value(const char *str) { + if (stricmp(str, NAN_STRING) == 0) +#ifdef NAN + return (double)NAN; +#else + { + double a = .0; + return .0 / a; + } +#endif /* NAN */ + else if (stricmp(str, INFINITY_STRING) == 0) +#ifdef INFINITY + return (double)INFINITY; +#else + return (double)(HUGE_VAL * HUGE_VAL); +#endif /* INFINITY */ + else if (stricmp(str, MINFINITY_STRING) == 0) +#ifdef INFINITY + return (double)-INFINITY; +#else + return (double)-(HUGE_VAL * HUGE_VAL); +#endif /* INFINITY */ + return atof(str); +} + +static int char2guid(const char *str, SQLGUID *g) { + /* + * SQLGUID.Data1 is an "unsigned long" on some platforms, and + * "unsigned int" on others. For format "%08X", it should be an + * "unsigned int", so use a temporary variable for it. + */ + unsigned int Data1; + if (sscanf(str, + "%08X-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%" + "02hhX", + &Data1, &g->Data2, &g->Data3, &g->Data4[0], &g->Data4[1], + &g->Data4[2], &g->Data4[3], &g->Data4[4], &g->Data4[5], + &g->Data4[6], &g->Data4[7]) + < 11) + return COPY_GENERAL_ERROR; + g->Data1 = Data1; + return COPY_OK; +} + +static int effective_fraction(int fraction, int *width) { + for (*width = 9; fraction % 10 == 0; (*width)--, fraction /= 10) + ; + return fraction; +} + +static int get_terminator_len(SQLSMALLINT fCType) { + switch (fCType) { +#ifdef UNICODE_SUPPORT + case SQL_C_WCHAR: + return WCLEN; +#endif /* UNICODE_SUPPORT */ + case SQL_C_BINARY: + return 0; + } + + /* SQL_C_CHAR or INTERNAL_ASIS_TYPE */ + return 1; +} + +static SQLLEN get_adjust_len(SQLSMALLINT fCType, SQLLEN len) { + switch (fCType) { +#ifdef UNICODE_SUPPORT + case SQL_C_WCHAR: + return (len / WCLEN) * WCLEN; +#endif /* UNICODE_SUPPORT */ + } + + return len; +} + +#define BYTEA_PROCESS_ESCAPE 1 +#define BYTEA_PROCESS_BINARY 2 + +static int setup_getdataclass(SQLLEN *const length_return, + const char **const ptr_return, + int *needbuflen_return, GetDataClass *const esdc, + const char *neut_str, const OID field_type, + const SQLSMALLINT fCType, const SQLLEN cbValueMax, + const ConnectionClass *const conn) { + SQLLEN len = (-2); + const char *ptr = NULL; + int needbuflen = 0; + int result = COPY_OK; + + BOOL lf_conv = 0; + int bytea_process_kind = 0; + BOOL already_processed = FALSE; + BOOL changed = FALSE; + int len_for_wcs_term = 0; + +#ifdef UNICODE_SUPPORT + char *allocbuf = NULL; + int unicode_count = -1; + BOOL localize_needed = FALSE; + BOOL hybrid = FALSE; +#endif /* UNICODE_SUPPORT */ + + if (ES_TYPE_BYTEA == field_type) { + if (SQL_C_BINARY == fCType) + bytea_process_kind = BYTEA_PROCESS_BINARY; + else if (0 == strnicmp(neut_str, "\\x", 2)) /* hex format */ + neut_str += 2; + else + bytea_process_kind = BYTEA_PROCESS_ESCAPE; + } + +#ifdef UNICODE_SUPPORT + if (0 == bytea_process_kind) { + if (get_convtype() + > 0) /* coversion between the current locale is available */ + { + BOOL wcs_debug = 0; + BOOL same_encoding = + (conn->ccsc == es_CS_code(conn->locale_encoding)); + BOOL is_utf8 = (UTF8 == conn->ccsc); + + switch (field_type) { + case ES_TYPE_UNKNOWN: + case ES_TYPE_BPCHAR: + case ES_TYPE_VARCHAR: + case ES_TYPE_TEXT: + case ES_TYPE_BPCHARARRAY: + case ES_TYPE_VARCHARARRAY: + case ES_TYPE_TEXTARRAY: + if (SQL_C_CHAR == fCType || SQL_C_BINARY == fCType) + localize_needed = (!same_encoding || wcs_debug); + if (SQL_C_WCHAR == fCType) + hybrid = (!is_utf8 || (same_encoding && wcs_debug)); + } + MYLOG(ES_DEBUG, + "localize=%d hybrid=%d is_utf8=%d same_encoding=%d " + "wcs_debug=%d\n", + localize_needed, hybrid, is_utf8, same_encoding, wcs_debug); + } + } + if (fCType == SQL_C_WCHAR) { + if (BYTEA_PROCESS_ESCAPE == bytea_process_kind) + unicode_count = (int)convert_from_esbinary(neut_str, NULL, 0) * 2; + else if (hybrid) { + MYLOG(ES_DEBUG, "hybrid estimate\n"); + if ((unicode_count = + (int)bindcol_hybrid_estimate(neut_str, lf_conv, &allocbuf)) + < 0) { + result = COPY_INVALID_STRING_CONVERSION; + goto cleanup; + } + } else /* normally */ + { + unicode_count = (int)utf8_to_ucs2_lf(neut_str, SQL_NTS, lf_conv, + NULL, 0, FALSE); + } + len = WCLEN * unicode_count; + already_processed = changed = TRUE; + } else if (localize_needed) { + if ((len = bindcol_localize_estimate(neut_str, lf_conv, &allocbuf)) + < 0) { + result = COPY_INVALID_STRING_CONVERSION; + goto cleanup; + } + already_processed = changed = TRUE; + } +#endif /* UNICODE_SUPPORT */ + + if (already_processed) /* skip */ + ; + else if (0 != bytea_process_kind) { + len = convert_from_esbinary(neut_str, NULL, 0); + if (BYTEA_PROCESS_BINARY != bytea_process_kind) + len *= 2; + changed = TRUE; + } else + /* convert linefeeds to carriage-return/linefeed */ + len = convert_linefeeds(neut_str, NULL, 0, lf_conv, &changed); + + /* just returns length info */ + if (cbValueMax == 0) { + result = COPY_RESULT_TRUNCATED; + goto cleanup; + } + + if (!esdc->ttlbuf) + esdc->ttlbuflen = 0; + needbuflen = (int)len + get_terminator_len(fCType); + if (SQL_C_BINARY == fCType) { + /* + * Though Binary doesn't have NULL terminator, + * bindcol_localize_exec() needs output buffer + * for NULL terminator. + */ + len_for_wcs_term = 1; + } + if (changed || needbuflen > cbValueMax) { + if (needbuflen > (SQLLEN)esdc->ttlbuflen) { + esdc->ttlbuf = realloc(esdc->ttlbuf, needbuflen + len_for_wcs_term); + esdc->ttlbuflen = needbuflen; + } + + already_processed = FALSE; +#ifdef UNICODE_SUPPORT + if (fCType == SQL_C_WCHAR) { + if (BYTEA_PROCESS_ESCAPE == bytea_process_kind) { + len = convert_from_esbinary(neut_str, esdc->ttlbuf, + esdc->ttlbuflen); + len = es_bin2whex(esdc->ttlbuf, (SQLWCHAR *)esdc->ttlbuf, len); + } else { + if (!hybrid) /* normally */ + utf8_to_ucs2_lf(neut_str, SQL_NTS, lf_conv, + (SQLWCHAR *)esdc->ttlbuf, unicode_count, + FALSE); + else /* hybrid */ + { + MYLOG(ES_DEBUG, "hybrid convert\n"); + if (bindcol_hybrid_exec((SQLWCHAR *)esdc->ttlbuf, neut_str, + unicode_count + 1, lf_conv, + &allocbuf) + < 0) { + result = COPY_INVALID_STRING_CONVERSION; + goto cleanup; + } + } + } + already_processed = TRUE; + } else if (localize_needed) { + if (bindcol_localize_exec(esdc->ttlbuf, len + 1, lf_conv, &allocbuf) + < 0) { + result = COPY_INVALID_STRING_CONVERSION; + goto cleanup; + } + already_processed = TRUE; + } +#endif /* UNICODE_SUPPORT */ + + if (already_processed) + ; + else if (0 != bytea_process_kind) { + len = + convert_from_esbinary(neut_str, esdc->ttlbuf, esdc->ttlbuflen); + if (BYTEA_PROCESS_ESCAPE == bytea_process_kind) + len = es_bin2hex(esdc->ttlbuf, esdc->ttlbuf, len); + } else + convert_linefeeds(neut_str, esdc->ttlbuf, esdc->ttlbuflen, lf_conv, + &changed); + ptr = esdc->ttlbuf; + esdc->ttlbufused = len; + } else { + if (esdc->ttlbuf) { + free(esdc->ttlbuf); + esdc->ttlbuf = NULL; + } + ptr = neut_str; + } +cleanup: +#ifdef UNICODE_SUPPORT + if (allocbuf) + free(allocbuf); +#endif /* UNICODE_SUPPORT */ + + *length_return = len; + *ptr_return = ptr; + *needbuflen_return = needbuflen; + + return result; +} + +/* + gdata SC_get_GDTI(stmt) + current_col stmt->current_col + */ + +/* + * fCType treated in the following function is + * + * SQL_C_CHAR, SQL_C_BINARY, SQL_C_WCHAR or INTERNAL_ASIS_TYPE + */ +static int convert_text_field_to_sql_c( + GetDataInfo *const gdata, const int current_col, const char *const neut_str, + const OID field_type, const SQLSMALLINT fCType, char *const rgbValueBindRow, + const SQLLEN cbValueMax, const ConnectionClass *const conn, + SQLLEN *const length_return) { + int result = COPY_OK; + SQLLEN len = (-2); + GetDataClass *esdc; + int copy_len = 0, needbuflen = 0, i; + const char *ptr; + + MYLOG(ES_DEBUG, "field_type=%u type=%d\n", field_type, fCType); + + switch (field_type) { + case ES_TYPE_FLOAT4: + case ES_TYPE_FLOAT8: + case ES_TYPE_NUMERIC: + set_client_decimal_point((char *)neut_str); + break; + } + + if (current_col < 0) { + esdc = &(gdata->fdata); + esdc->data_left = -1; + } else + esdc = &gdata->gdata[current_col]; + if (esdc->data_left < 0) { + if (COPY_OK + != (result = + setup_getdataclass(&len, &ptr, &needbuflen, esdc, neut_str, + field_type, fCType, cbValueMax, conn))) + goto cleanup; + } else { + ptr = esdc->ttlbuf; + len = esdc->ttlbufused; + } + + MYLOG(ES_DEBUG, "DEFAULT: len = " FORMAT_LEN ", ptr = '%.*s'\n", len, + (int)len, ptr); + + if (current_col >= 0) { + if (esdc->data_left > 0) { + ptr += (len - esdc->data_left); + len = esdc->data_left; + needbuflen = (int)len + (int)(esdc->ttlbuflen - esdc->ttlbufused); + } else + esdc->data_left = len; + } + + if (cbValueMax > 0) { + BOOL already_copied = FALSE; + int terminatorlen; + + terminatorlen = get_terminator_len(fCType); + if (terminatorlen >= cbValueMax) + copy_len = 0; + else if (len + terminatorlen > cbValueMax) + copy_len = (int)get_adjust_len(fCType, cbValueMax - terminatorlen); + else + copy_len = (int)len; + + if (!already_copied) { + /* Copy the data */ + if (copy_len > 0) + memcpy(rgbValueBindRow, ptr, copy_len); + /* Add null terminator */ + for (i = 0; i < terminatorlen && copy_len + i < cbValueMax; i++) + rgbValueBindRow[copy_len + i] = '\0'; + } + /* Adjust data_left for next time */ + if (current_col >= 0) + esdc->data_left -= copy_len; + } + + /* + * Finally, check for truncation so that proper status can + * be returned + */ + if (cbValueMax > 0 && needbuflen > cbValueMax) + result = COPY_RESULT_TRUNCATED; + else { + if (esdc->ttlbuf != NULL) { + free(esdc->ttlbuf); + esdc->ttlbuf = NULL; + } + } + +#ifdef UNICODE_SUPPORT + if (SQL_C_WCHAR == fCType) + MYLOG(ES_DEBUG, + " SQL_C_WCHAR, default: len = " FORMAT_LEN + ", cbValueMax = " FORMAT_LEN ", rgbValueBindRow = '%s'\n", + len, cbValueMax, rgbValueBindRow); + else +#endif /* UNICODE_SUPPORT */ + if (SQL_C_BINARY == fCType) + MYLOG(ES_DEBUG, + " SQL_C_BINARY, default: len = " FORMAT_LEN + ", cbValueMax = " FORMAT_LEN ", rgbValueBindRow = '%.*s'\n", + len, cbValueMax, copy_len, rgbValueBindRow); + else + MYLOG(ES_DEBUG, + " SQL_C_CHAR, default: len = " FORMAT_LEN + ", cbValueMax = " FORMAT_LEN ", rgbValueBindRow = '%s'\n", + len, cbValueMax, rgbValueBindRow); + +cleanup: + *length_return = len; + + return result; +} + +/* This is called by SQLGetData() */ +int copy_and_convert_field(StatementClass *stmt, OID field_type, int atttypmod, + void *valuei, SQLSMALLINT fCType, int precision, + PTR rgbValue, SQLLEN cbValueMax, SQLLEN *pcbValue, + SQLLEN *pIndicator) { + CSTR func = "copy_and_convert_field"; + const char *value = valuei; + ARDFields *opts = SC_get_ARDF(stmt); + GetDataInfo *gdata = SC_get_GDTI(stmt); + SQLLEN len = 0; + SIMPLE_TIME std_time; +#ifdef HAVE_LOCALTIME_R + struct tm tm; +#endif /* HAVE_LOCALTIME_R */ + SQLLEN pcbValueOffset, rgbValueOffset; + char *rgbValueBindRow = NULL; + SQLLEN *pcbValueBindRow = NULL, *pIndicatorBindRow = NULL; + SQLSETPOSIROW bind_row = stmt->bind_row; + int bind_size = opts->bind_size; + int result = COPY_OK; + const ConnectionClass *conn = SC_get_conn(stmt); + BOOL text_bin_handling; + const char *neut_str = value; + char booltemp[3]; + char midtemp[64]; + GetDataClass *esdc; + + if (stmt->current_col >= 0) { + if (stmt->current_col >= opts->allocated) { + return SQL_ERROR; + } + if (gdata->allocated != opts->allocated) + extend_getdata_info(gdata, opts->allocated, TRUE); + esdc = &gdata->gdata[stmt->current_col]; + if (esdc->data_left == -2) + esdc->data_left = (cbValueMax > 0) ? 0 : -1; /* This seems to be * + * needed by ADO ? */ + if (esdc->data_left == 0) { + if (esdc->ttlbuf != NULL) { + free(esdc->ttlbuf); + esdc->ttlbuf = NULL; + esdc->ttlbuflen = 0; + } + esdc->data_left = -2; /* needed by ADO ? */ + return COPY_NO_DATA_FOUND; + } + } + /*--------- + * rgbValueOffset is *ONLY* for character and binary data. + * pcbValueOffset is for computing any pcbValue location + *--------- + */ + + if (bind_size > 0) + pcbValueOffset = rgbValueOffset = (bind_size * bind_row); + else { + pcbValueOffset = bind_row * sizeof(SQLLEN); + rgbValueOffset = bind_row * cbValueMax; + } + /* + * The following is applicable in case bind_size > 0 + * or the fCType is of variable length. + */ + if (rgbValue) + rgbValueBindRow = (char *)rgbValue + rgbValueOffset; + if (pcbValue) + pcbValueBindRow = LENADDR_SHIFT(pcbValue, pcbValueOffset); + if (pIndicator) { + pIndicatorBindRow = (SQLLEN *)((char *)pIndicator + pcbValueOffset); + *pIndicatorBindRow = 0; + } + + memset(&std_time, 0, sizeof(SIMPLE_TIME)); + + MYLOG(ES_DEBUG, + "field_type = %d, fctype = %d, value = '%s', cbValueMax=" FORMAT_LEN + "\n", + field_type, fCType, (value == NULL) ? "" : value, cbValueMax); + + if (!value) { + /* + * handle a null just by returning SQL_NULL_DATA in pcbValue, and + * doing nothing to the buffer. + */ + if (pIndicator) { + *pIndicatorBindRow = SQL_NULL_DATA; + return COPY_OK; + } else { + SC_set_error(stmt, STMT_RETURN_NULL_WITHOUT_INDICATOR, + "StrLen_or_IndPtr was a null pointer and NULL data " + "was retrieved", + func); + return SQL_ERROR; + } + } + + if (stmt->hdbc->DataSourceToDriver != NULL) { + size_t length = strlen(value); + + stmt->hdbc->DataSourceToDriver(stmt->hdbc->translation_option, SQL_CHAR, + valuei, (SDWORD)length, valuei, + (SDWORD)length, NULL, NULL, 0, NULL); + } + + /* + * First convert any specific elasticsearch types into more useable data. + * + * NOTE: Conversions from ES char/varchar of a date/time/timestamp value + * to SQL_C_DATE,SQL_C_TIME, SQL_C_TIMESTAMP not supported + */ + switch (field_type) { + /* + * $$$ need to add parsing for date/time/timestamp strings in + * ES_TYPE_CHAR,VARCHAR $$$ + */ + case ES_TYPE_DATE: + sscanf(value, "%4d-%2d-%2d", &std_time.y, &std_time.m, &std_time.d); + break; + + case ES_TYPE_TIME: { + BOOL bZone = FALSE; /* time zone stuff is unreliable */ + int zone; + timestamp2stime(value, &std_time, &bZone, &zone); + } break; + + case ES_TYPE_ABSTIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + case ES_TYPE_TIMESTAMP: + std_time.fr = 0; + std_time.infinity = 0; + if (strnicmp(value, INFINITY_STRING, 8) == 0) { + std_time.infinity = 1; + std_time.m = 12; + std_time.d = 31; + std_time.y = 9999; + std_time.hh = 23; + std_time.mm = 59; + std_time.ss = 59; + } + if (strnicmp(value, MINFINITY_STRING, 9) == 0) { + std_time.infinity = -1; + std_time.m = 1; + std_time.d = 1; + // std_time.y = -4713; + std_time.y = -9999; + std_time.hh = 0; + std_time.mm = 0; + std_time.ss = 0; + } + if (strnicmp(value, "invalid", 7) != 0) { + BOOL bZone = field_type != ES_TYPE_TIMESTAMP_NO_TMZONE; + int zone; + + /* + * sscanf(value, "%4d-%2d-%2d %2d:%2d:%2d", &std_time.y, + * &std_time.m, &std_time.d, &std_time.hh, &std_time.mm, + * &std_time.ss); + */ + bZone = FALSE; /* time zone stuff is unreliable */ + timestamp2stime(value, &std_time, &bZone, &zone); + MYLOG(ES_ALL, "2stime fr=%d\n", std_time.fr); + } else { + /* + * The timestamp is invalid so set something conspicuous, + * like the epoch + */ + struct tm *tim; + time_t t = 0; +#ifdef HAVE_LOCALTIME_R + tim = localtime_r(&t, &tm); +#else + tim = localtime(&t); +#endif /* HAVE_LOCALTIME_R */ + std_time.m = tim->tm_mon + 1; + std_time.d = tim->tm_mday; + std_time.y = tim->tm_year + 1900; + std_time.hh = tim->tm_hour; + std_time.mm = tim->tm_min; + std_time.ss = tim->tm_sec; + } + break; + + case ES_TYPE_BOOL: { /* change T/F to 1/0 */ + switch (((char *)value)[0]) { + case 'f': + case 'F': + case 'n': + case 'N': + case '0': + STRCPY_FIXED(booltemp, "0"); + break; + default: + STRCPY_FIXED(booltemp, "1"); + } + neut_str = booltemp; + } break; + + /* This is for internal use by SQLStatistics() */ + case ES_TYPE_INT2VECTOR: + if (SQL_C_DEFAULT == fCType) { + int i, nval, maxc; + const char *vp; + /* this is an array of eight integers */ + short *short_array = (short *)rgbValueBindRow, shortv; + + maxc = 0; + if (NULL != short_array) + maxc = (int)cbValueMax / sizeof(short); + vp = value; + nval = 0; + MYLOG(ES_DEBUG, "index=("); + for (i = 0;; i++) { + if (sscanf(vp, "%hi", &shortv) != 1) + break; + MYPRINTF(0, " %hi", shortv); + nval++; + if (nval < maxc) + short_array[i + 1] = shortv; + + /* skip the current token */ + while (IS_NOT_SPACE(*vp)) + vp++; + /* and skip the space to the next token */ + while ((*vp != '\0') && (isspace(*vp))) + vp++; + if (*vp == '\0') + break; + } + MYPRINTF(0, ") nval = %i\n", nval); + if (maxc > 0) + short_array[0] = (short)nval; + + /* There is no corresponding fCType for this. */ + len = (nval + 1) * sizeof(short); + if (pcbValue) + *pcbValueBindRow = len; + + if (len <= cbValueMax) + return COPY_OK; /* dont go any further or the data will be + * trashed */ + else + return COPY_RESULT_TRUNCATED; + } + break; + + /* + * This is a large object OID, which is used to store + * LONGVARBINARY objects. + */ + case ES_TYPE_LO_UNDEFINED: + + return convert_lo(stmt, value, fCType, rgbValueBindRow, cbValueMax, + pcbValueBindRow); + + case 0: + break; + + default: + if (field_type + == (OID)stmt->hdbc + ->lobj_type /* hack until permanent type available */ + || (ES_TYPE_OID == field_type && SQL_C_BINARY == fCType + && conn->lo_is_domain)) + return convert_lo(stmt, value, fCType, rgbValueBindRow, + cbValueMax, pcbValueBindRow); + } + + /* Change default into something useable */ + if (fCType == SQL_C_DEFAULT) { + fCType = estype_attr_to_ctype(conn, field_type, atttypmod); +#ifdef UNICODE_SUPPORT + if (fCType == SQL_C_WCHAR && CC_default_is_c(conn)) + fCType = SQL_C_CHAR; +#endif + + MYLOG(ES_DEBUG, ", SQL_C_DEFAULT: fCType = %d\n", fCType); + } + + text_bin_handling = FALSE; + switch (fCType) { + case INTERNAL_ASIS_TYPE: +#ifdef UNICODE_SUPPORT + case SQL_C_WCHAR: +#endif /* UNICODE_SUPPORT */ + case SQL_C_CHAR: + text_bin_handling = TRUE; + break; + case SQL_C_BINARY: + switch (field_type) { + case ES_TYPE_UNKNOWN: + case ES_TYPE_BPCHAR: + case ES_TYPE_VARCHAR: + case ES_TYPE_TEXT: + case ES_TYPE_XML: + case ES_TYPE_BPCHARARRAY: + case ES_TYPE_VARCHARARRAY: + case ES_TYPE_TEXTARRAY: + case ES_TYPE_XMLARRAY: + case ES_TYPE_BYTEA: + text_bin_handling = TRUE; + break; + } + break; + } + + if (text_bin_handling) { + BOOL pre_convert = TRUE; + int midsize = sizeof(midtemp); + int i; + + /* Special character formatting as required */ + + /* + * These really should return error if cbValueMax is not big + * enough. + */ + switch (field_type) { + case ES_TYPE_DATE: + len = SPRINTF_FIXED(midtemp, "%.4d-%.2d-%.2d", std_time.y, + std_time.m, std_time.d); + break; + + case ES_TYPE_TIME: + len = SPRINTF_FIXED(midtemp, "%.2d:%.2d:%.2d", std_time.hh, + std_time.mm, std_time.ss); + if (std_time.fr > 0) { + int wdt; + int fr = effective_fraction(std_time.fr, &wdt); + + char *fraction = NULL; + len = sprintf(fraction, ".%0*d", wdt, fr); + strcat(midtemp, fraction); + } + break; + + case ES_TYPE_ABSTIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + case ES_TYPE_TIMESTAMP: + len = stime2timestamp(&std_time, midtemp, midsize, FALSE, + (int)(midsize - 19 - 2)); + break; + + case ES_TYPE_UUID: + len = strlen(neut_str); + for (i = 0; i < len && i < midsize - 2; i++) + midtemp[i] = (char)toupper((UCHAR)neut_str[i]); + midtemp[i] = '\0'; + MYLOG(ES_DEBUG, "ES_TYPE_UUID: rgbValueBindRow = '%s'\n", + rgbValueBindRow); + break; + + /* + * Currently, data is SILENTLY TRUNCATED for BYTEA and + * character data types if there is not enough room in + * cbValueMax because the driver can't handle multiple + * calls to SQLGetData for these, yet. Most likely, the + * buffer passed in will be big enough to handle the + * maximum limit of elasticsearch, anyway. + * + * LongVarBinary types are handled correctly above, observing + * truncation and all that stuff since there is + * essentially no limit on the large object used to store + * those. + */ + case ES_TYPE_BYTEA: /* convert binary data to hex strings + * (i.e, 255 = "FF") */ + + default: + pre_convert = FALSE; + } + if (pre_convert) + neut_str = midtemp; + result = convert_text_field_to_sql_c( + gdata, stmt->current_col, neut_str, field_type, fCType, + rgbValueBindRow, cbValueMax, conn, &len); + } else { + SQLGUID g; + + /* + * for SQL_C_CHAR, it's probably ok to leave currency symbols in. + * But to convert to numeric types, it is necessary to get rid of + * those. + */ + if (field_type == ES_TYPE_MONEY) { + if (convert_money(neut_str, midtemp, sizeof(midtemp))) + neut_str = midtemp; + else { + MYLOG(ES_DEBUG, "couldn't convert money type to %d\n", fCType); + return COPY_UNSUPPORTED_TYPE; + } + } + + switch (fCType) { + case SQL_C_DATE: + case SQL_C_TYPE_DATE: /* 91 */ + len = 6; + { + DATE_STRUCT *ds; + struct tm *tim; + + if (bind_size > 0) + ds = (DATE_STRUCT *)rgbValueBindRow; + else + ds = (DATE_STRUCT *)rgbValue + bind_row; + + /* + * Initialize date in case conversion destination + * expects date part from this source time data. + * A value may be partially set here, so do some + * sanity checks on the existing values before + * setting them. + */ + tim = SC_get_localtime(stmt); + if (std_time.m == 0) + std_time.m = tim->tm_mon + 1; + if (std_time.d == 0) + std_time.d = tim->tm_mday; + if (std_time.y == 0) + std_time.y = tim->tm_year + 1900; + ds->year = (SQLSMALLINT)std_time.y; + ds->month = (SQLUSMALLINT)std_time.m; + ds->day = (SQLUSMALLINT)std_time.d; + } + break; + + case SQL_C_TIME: + case SQL_C_TYPE_TIME: /* 92 */ + len = 6; + { + TIME_STRUCT *ts; + + if (bind_size > 0) + ts = (TIME_STRUCT *)rgbValueBindRow; + else + ts = (TIME_STRUCT *)rgbValue + bind_row; + ts->hour = (SQLUSMALLINT)std_time.hh; + ts->minute = (SQLUSMALLINT)std_time.mm; + ts->second = (SQLUSMALLINT)std_time.ss; + } + break; + + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_TIMESTAMP: /* 93 */ + len = 16; + { + struct tm *tim; + TIMESTAMP_STRUCT *ts; + + if (bind_size > 0) + ts = (TIMESTAMP_STRUCT *)rgbValueBindRow; + else + ts = (TIMESTAMP_STRUCT *)rgbValue + bind_row; + + /* + * Initialize date in case conversion destination + * expects date part from this source time data. + * A value may be partially set here, so do some + * sanity checks on the existing values before + * setting them. + */ + tim = SC_get_localtime(stmt); + if (std_time.m == 0) + std_time.m = tim->tm_mon + 1; + if (std_time.d == 0) + std_time.d = tim->tm_mday; + if (std_time.y == 0) + std_time.y = tim->tm_year + 1900; + + ts->year = (SQLSMALLINT)std_time.y; + ts->month = (SQLUSMALLINT)std_time.m; + ts->day = (SQLUSMALLINT)std_time.d; + ts->hour = (SQLUSMALLINT)std_time.hh; + ts->minute = (SQLUSMALLINT)std_time.mm; + ts->second = (SQLUSMALLINT)std_time.ss; + ts->fraction = (SQLUINTEGER)std_time.fr; + } + break; + + case SQL_C_BIT: + len = 1; + if (bind_size > 0) + *((UCHAR *)rgbValueBindRow) = (UCHAR)atoi(neut_str); + else + *((UCHAR *)rgbValue + bind_row) = (UCHAR)atoi(neut_str); + + MYLOG(99, + "SQL_C_BIT: bind_row = " FORMAT_POSIROW + " val = %d, cb = " FORMAT_LEN ", rgb=%d\n", + bind_row, atoi(neut_str), cbValueMax, + *((UCHAR *)rgbValue)); + break; + + case SQL_C_STINYINT: + case SQL_C_TINYINT: + len = 1; + if (bind_size > 0) + *((SCHAR *)rgbValueBindRow) = (SCHAR)atoi(neut_str); + else + *((SCHAR *)rgbValue + bind_row) = (SCHAR)atoi(neut_str); + break; + + case SQL_C_UTINYINT: + len = 1; + if (bind_size > 0) + *((UCHAR *)rgbValueBindRow) = (UCHAR)atoi(neut_str); + else + *((UCHAR *)rgbValue + bind_row) = (UCHAR)atoi(neut_str); + break; + + case SQL_C_FLOAT: + set_client_decimal_point((char *)neut_str); + len = 4; + if (bind_size > 0) + *((SFLOAT *)rgbValueBindRow) = + (SFLOAT)get_double_value(neut_str); + else + *((SFLOAT *)rgbValue + bind_row) = + (SFLOAT)get_double_value(neut_str); + break; + + case SQL_C_DOUBLE: + set_client_decimal_point((char *)neut_str); + len = 8; + if (bind_size > 0) + *((SDOUBLE *)rgbValueBindRow) = + (SDOUBLE)get_double_value(neut_str); + else + *((SDOUBLE *)rgbValue + bind_row) = + (SDOUBLE)get_double_value(neut_str); + break; + + case SQL_C_NUMERIC: { + SQL_NUMERIC_STRUCT *ns; + BOOL overflowed; + + if (bind_size > 0) + ns = (SQL_NUMERIC_STRUCT *)rgbValueBindRow; + else + ns = (SQL_NUMERIC_STRUCT *)rgbValue + bind_row; + + parse_to_numeric_struct(neut_str, ns, &overflowed); + if (overflowed) + result = COPY_RESULT_TRUNCATED; + } break; + + case SQL_C_SSHORT: + case SQL_C_SHORT: + len = 2; + if (bind_size > 0) + *((SQLSMALLINT *)rgbValueBindRow) = + (SQLSMALLINT)atoi(neut_str); + else + *((SQLSMALLINT *)rgbValue + bind_row) = + (SQLSMALLINT)atoi(neut_str); + break; + + case SQL_C_USHORT: + len = 2; + if (bind_size > 0) + *((SQLUSMALLINT *)rgbValueBindRow) = + (SQLUSMALLINT)atoi(neut_str); + else + *((SQLUSMALLINT *)rgbValue + bind_row) = + (SQLUSMALLINT)atoi(neut_str); + break; + + case SQL_C_SLONG: + case SQL_C_LONG: + len = 4; + if (bind_size > 0) + *((SQLINTEGER *)rgbValueBindRow) = atol(neut_str); + else + *((SQLINTEGER *)rgbValue + bind_row) = atol(neut_str); + break; + + case SQL_C_ULONG: + len = 4; + if (bind_size > 0) + *((SQLUINTEGER *)rgbValueBindRow) = ATOI32U(neut_str); + else + *((SQLUINTEGER *)rgbValue + bind_row) = ATOI32U(neut_str); + break; + +#ifdef ODBCINT64 + case SQL_C_SBIGINT: + len = 8; + if (bind_size > 0) + *((SQLBIGINT *)rgbValueBindRow) = ATOI64(neut_str); + else + *((SQLBIGINT *)rgbValue + bind_row) = ATOI64(neut_str); + break; + + case SQL_C_UBIGINT: + len = 8; + if (bind_size > 0) + *((SQLUBIGINT *)rgbValueBindRow) = ATOI64U(neut_str); + else + *((SQLUBIGINT *)rgbValue + bind_row) = ATOI64U(neut_str); + break; + +#endif /* ODBCINT64 */ + case SQL_C_BINARY: + /* The following is for SQL_C_VARBOOKMARK */ + if (ES_TYPE_INT4 == field_type) { + UInt4 ival = ATOI32U(neut_str); + + MYLOG(ES_ALL, "SQL_C_VARBOOKMARK value=%d\n", ival); + if (pcbValue) + *pcbValueBindRow = sizeof(ival); + if (cbValueMax >= (SQLLEN)sizeof(ival)) { + memcpy(rgbValueBindRow, &ival, sizeof(ival)); + return COPY_OK; + } else + return COPY_RESULT_TRUNCATED; + } else if (ES_TYPE_UUID == field_type) { + int rtn = char2guid(neut_str, &g); + + if (COPY_OK != rtn) + return rtn; + if (pcbValue) + *pcbValueBindRow = sizeof(g); + if (cbValueMax >= (SQLLEN)sizeof(g)) { + memcpy(rgbValueBindRow, &g, sizeof(g)); + return COPY_OK; + } else + return COPY_RESULT_TRUNCATED; + } else { + MYLOG(ES_DEBUG, + "couldn't convert the type %d to SQL_C_BINARY\n", + field_type); + return COPY_UNSUPPORTED_TYPE; + } + break; + case SQL_C_GUID: + + result = char2guid(neut_str, &g); + if (COPY_OK != result) { + MYLOG(ES_DEBUG, "Could not convert to SQL_C_GUID\n"); + return COPY_UNSUPPORTED_TYPE; + } + len = sizeof(g); + if (bind_size > 0) + *((SQLGUID *)rgbValueBindRow) = g; + else + *((SQLGUID *)rgbValue + bind_row) = g; + break; + case SQL_C_INTERVAL_YEAR: + case SQL_C_INTERVAL_MONTH: + case SQL_C_INTERVAL_YEAR_TO_MONTH: + case SQL_C_INTERVAL_DAY: + case SQL_C_INTERVAL_HOUR: + case SQL_C_INTERVAL_DAY_TO_HOUR: + case SQL_C_INTERVAL_MINUTE: + case SQL_C_INTERVAL_HOUR_TO_MINUTE: + case SQL_C_INTERVAL_SECOND: + case SQL_C_INTERVAL_DAY_TO_SECOND: + case SQL_C_INTERVAL_HOUR_TO_SECOND: + case SQL_C_INTERVAL_MINUTE_TO_SECOND: + interval2istruct( + fCType, precision, neut_str, + bind_size > 0 ? (SQL_INTERVAL_STRUCT *)rgbValueBindRow + : (SQL_INTERVAL_STRUCT *)rgbValue + bind_row); + break; + + default: + MYLOG(ES_DEBUG, "conversion to the type %d isn't supported\n", + fCType); + return COPY_UNSUPPORTED_TYPE; + } + } + + /* store the length of what was copied, if there's a place for it */ + if (pcbValue) + *pcbValueBindRow = len; + + if (result == COPY_OK && stmt->current_col >= 0) + gdata->gdata[stmt->current_col].data_left = 0; + return result; +} + +/*-------------------------------------------------------------------- + * Functions/Macros to get rid of query size limit. + * + * I always used the follwoing macros to convert from + * old_statement to new_statement. Please improve it + * if you have a better way. Hiroshi 2001/05/22 + *-------------------------------------------------------------------- + */ + +#define FLGP_USING_CURSOR (1L << 1) +#define FLGP_SELECT_INTO (1L << 2) +#define FLGP_SELECT_FOR_UPDATE_OR_SHARE (1L << 3) +#define FLGP_MULTIPLE_STATEMENT (1L << 5) +#define FLGP_SELECT_FOR_READONLY (1L << 6) +typedef struct _QueryParse { + const char *statement; + int statement_type; + size_t opos; + ssize_t from_pos; + ssize_t where_pos; + ssize_t stmt_len; + int in_status; + char escape_in_literal, prev_token_end; + const char *dollar_tag; + ssize_t taglen; + char token_curr[64]; + int token_len; + size_t declare_pos; + UInt4 flags, comment_level; + encoded_str encstr; +} QueryParse; + +enum { + QP_IN_IDENT_KEYWORD = 1L /* identifier or keyword */ + , + QP_IN_DQUOTE_IDENTIFIER = (1L << 1) /* "" */ + , + QP_IN_LITERAL = (1L << 2) /* '' */ + , + QP_IN_ESCAPE = (1L << 3) /* \ in literal */ + , + QP_IN_DOLLAR_QUOTE = (1L << 4) /* $...$ $...$ */ + , + QP_IN_COMMENT_BLOCK = (1L << 5) /* slash asterisk */ + , + QP_IN_LINE_COMMENT = (1L << 6) /* -- */ +}; + +#define QP_in_idle_status(qp) ((qp)->in_status == 0) + +#define QP_is_in(qp, status) (((qp)->in_status & status) != 0) +#define QP_enter(qp, status) ((qp)->in_status |= status) +#define QP_exit(qp, status) ((qp)->in_status &= (~status)) + +typedef enum { + RPM_REPLACE_PARAMS, + RPM_FAKE_PARAMS, + RPM_BUILDING_PREPARE_STATEMENT, + RPM_BUILDING_BIND_REQUEST +} ResolveParamMode; + +#define FLGB_INACCURATE_RESULT (1L << 4) +#define FLGB_CREATE_KEYSET (1L << 5) +#define FLGB_KEYSET_DRIVEN (1L << 6) +#define FLGB_CONVERT_LF (1L << 7) +#define FLGB_DISCARD_OUTPUT (1L << 8) +#define FLGB_BINARY_AS_POSSIBLE (1L << 9) +#define FLGB_LITERAL_EXTENSION (1L << 10) +#define FLGB_HEX_BIN_FORMAT (1L << 11) +#define FLGB_PARAM_CAST (1L << 12) +typedef struct _QueryBuild { + char *query_statement; + size_t str_alsize; + size_t npos; + SQLLEN current_row; + Int2 param_number; + Int2 dollar_number; + Int2 num_io_params; + Int2 num_output_params; + Int2 num_discard_params; + Int2 proc_return; + Int2 brace_level; + char parenthesize_the_first; + APDFields *apdopts; + IPDFields *ipdopts; + PutDataInfo *pdata; + size_t load_stmt_len; + size_t load_from_pos; + ResolveParamMode param_mode; + UInt4 flags; + int ccsc; + int errornumber; + const char *errormsg; + + ConnectionClass *conn; /* mainly needed for LO handling */ + StatementClass *stmt; /* needed to set error info in ENLARGE_.. */ +} QueryBuild; + +#define INIT_MIN_ALLOC 4096 + +/* + * New macros (Aceto) + *-------------------- + */ + +#define F_OldChar(qp) ((qp)->statement[(qp)->opos]) + +#define F_OldPtr(qp) ((qp)->statement + (qp)->opos) + +#define F_OldNext(qp) (++(qp)->opos) + +#define F_OldPrior(qp) (--(qp)->opos) + +#define F_OldPos(qp) (qp)->opos + +#define F_ExtractOldTo(qp, buf, ch, maxsize) \ + do { \ + size_t c = 0; \ + while ((qp)->statement[qp->opos] != '\0' \ + && (qp)->statement[qp->opos] != ch) { \ + if (c >= maxsize) \ + break; \ + buf[c++] = (qp)->statement[qp->opos++]; \ + } \ + if ((qp)->statement[qp->opos] == '\0') { \ + retval = SQL_ERROR; \ + goto cleanup; \ + } \ + buf[c] = '\0'; \ + } while (0) + +#define F_NewChar(qb) (qb->query_statement[(qb)->npos]) + +#define F_NewPtr(qb) ((qb)->query_statement + (qb)->npos) + +#define F_NewNext(qb) (++(qb)->npos) + +#define F_NewPos(qb) ((qb)->npos) + +/*---------- + * Terminate the stmt_with_params string with NULL. + *---------- + */ +#define CVT_TERMINATE(qb) \ + do { \ + if (NULL == (qb)->query_statement) { \ + retval = SQL_ERROR; \ + goto cleanup; \ + } \ + (qb)->query_statement[(qb)->npos] = '\0'; \ + } while (0) + +/*---------- + * Append a data. + *---------- + */ +#define CVT_APPEND_DATA(qb, s, len) \ + do { \ + size_t newpos = (qb)->npos + len; \ + ENLARGE_NEWSTATEMENT((qb), newpos); \ + memcpy(&(qb)->query_statement[(qb)->npos], s, len); \ + (qb)->npos = newpos; \ + (qb)->query_statement[newpos] = '\0'; \ + } while (0) + +/*---------- + * Append a string. + *---------- + */ +#define CVT_APPEND_STR(qb, s) \ + do { \ + size_t len = strlen(s); \ + CVT_APPEND_DATA(qb, s, len); \ + } while (0) + +/*---------- + * Append a char. + *---------- + */ +#define CVT_APPEND_CHAR(qb, c) \ + do { \ + ENLARGE_NEWSTATEMENT(qb, (qb)->npos + 1); \ + (qb)->query_statement[(qb)->npos++] = c; \ + } while (0) + +int findIdentifier(const UCHAR *str, int ccsc, const UCHAR **next_token) { + int slen = -1; + encoded_str encstr; + UCHAR tchar; + BOOL dquote = FALSE; + + *next_token = NULL; + encoded_str_constr(&encstr, ccsc, (const char *)str); + for (tchar = (UCHAR)encoded_nextchar(&encstr); tchar; + tchar = (UCHAR)encoded_nextchar(&encstr)) { + if (MBCS_NON_ASCII(encstr)) + continue; + if (encstr.pos == 0) /* the first character */ + { + if (dquote = (IDENTIFIER_QUOTE == tchar), dquote) + continue; + if (!isalpha(tchar)) { + slen = 0; + if (IS_NOT_SPACE(tchar)) + *next_token = ENCODE_PTR(encstr); + break; + } + } + if (dquote) { + if (IDENTIFIER_QUOTE == tchar) { + tchar = (UCHAR)encoded_nextchar(&encstr); + if (IDENTIFIER_QUOTE == tchar) + continue; + slen = (int)encstr.pos; + break; + } + } else { + if (isalnum(tchar)) + continue; + switch (tchar) { + case '_': + case DOLLAR_QUOTE: + continue; + } + slen = (int)encstr.pos; + if (IS_NOT_SPACE(tchar)) + *next_token = ENCODE_PTR(encstr); + break; + } + } + if (slen < 0 && !dquote) + slen = (int)encstr.pos; + if (NULL == *next_token) { + for (; tchar; tchar = (UCHAR)encoded_nextchar(&encstr)) { + if (IS_NOT_SPACE((UCHAR)tchar)) { + *next_token = ENCODE_PTR(encstr); + break; + } + } + } + return slen; +} + +static esNAME lower_or_remove_dquote(esNAME nm, const UCHAR *src, int srclen, + int ccsc) { + int i, outlen; + char *tc; + UCHAR tchar; + BOOL idQuote; + encoded_str encstr; + + if (nm.name) + tc = realloc(nm.name, srclen + 1); + else + tc = malloc(srclen + 1); + if (!tc) { + NULL_THE_NAME(nm); + return nm; + } + nm.name = tc; + idQuote = (src[0] == IDENTIFIER_QUOTE); + encoded_str_constr(&encstr, ccsc, (const char *)src); + for (i = 0, tchar = (UCHAR)encoded_nextchar(&encstr), outlen = 0; + i < srclen; i++, tchar = (UCHAR)encoded_nextchar(&encstr)) { + if (MBCS_NON_ASCII(encstr)) { + tc[outlen++] = tchar; + continue; + } + if (idQuote) { + if (IDENTIFIER_QUOTE == tchar) { + if (0 == i) + continue; + if (i == srclen - 1) + continue; + i++; + tchar = (UCHAR)encoded_nextchar(&encstr); + } + tc[outlen++] = tchar; + } else { + tc[outlen++] = (char)tolower(tchar); + } + } + tc[outlen] = '\0'; + return nm; +} + +int eatTableIdentifiers(const UCHAR *str, int ccsc, esNAME *table, + esNAME *schema) { + int len; + const UCHAR *next_token; + const UCHAR *tstr = str; + + while (isspace(*tstr)) + tstr++; + + if ((len = findIdentifier(tstr, ccsc, &next_token)) <= 0) + return len; /* table name doesn't exist */ + if (table) { + if (IDENTIFIER_QUOTE == *tstr) + *table = lower_or_remove_dquote(*table, tstr, len, ccsc); + else + STRN_TO_NAME(*table, tstr, len); + } + if (!next_token || '.' != *next_token || (int)(next_token - tstr) != len) + return (int)(next_token - str); /* table only */ + tstr = next_token + 1; + if ((len = findIdentifier(tstr, ccsc, &next_token)) <= 0) + return -1; + if (table) { + if (schema) + MOVE_NAME(*schema, *table); + *table = lower_or_remove_dquote(*table, tstr, len, ccsc); + } + if (!next_token || '.' != *next_token || (int)(next_token - tstr) != len) + return (int)(next_token - str); /* schema.table */ + tstr = next_token + 1; + if ((len = findIdentifier(tstr, ccsc, &next_token)) <= 0) + return -1; + if (table) { + if (schema) + MOVE_NAME(*schema, *table); + *table = lower_or_remove_dquote(*table, tstr, len, ccsc); + } + return (int)(next_token - str); /* catalog.schema.table */ +} + +#define PT_TOKEN_IGNORE(pt) ((pt)->curchar_processed = TRUE) + +#define MIN_ALC_SIZE 128 + +/* + * With SQL_MAX_NUMERIC_LEN = 16, the highest representable number is + * 2^128 - 1, which fits in 39 digits. + */ +#define MAX_NUMERIC_DIGITS 39 + +/* + * Convert a string representation of a numeric into SQL_NUMERIC_STRUCT. + */ +static void parse_to_numeric_struct(const char *wv, SQL_NUMERIC_STRUCT *ns, + BOOL *overflow) { + int i, nlen, dig; + char calv[SQL_MAX_NUMERIC_LEN * 3]; + BOOL dot_exist; + + *overflow = FALSE; + + /* skip leading space */ + while (*wv && isspace((unsigned char)*wv)) + wv++; + + /* sign */ + ns->sign = 1; + if (*wv == '-') { + ns->sign = 0; + wv++; + } else if (*wv == '+') + wv++; + + /* skip leading zeros */ + while (*wv == '0') + wv++; + + /* read the digits into calv */ + ns->precision = 0; + ns->scale = 0; + for (nlen = 0, dot_exist = FALSE;; wv++) { + if (*wv == '.') { + if (dot_exist) + break; + dot_exist = TRUE; + } else if (*wv == '\0' || !isdigit((unsigned char)*wv)) + break; + else { + if (nlen >= (int)sizeof(calv)) { + if (dot_exist) + break; + else { + ns->scale--; + *overflow = TRUE; + continue; + } + } + if (dot_exist) + ns->scale++; + calv[nlen++] = *wv; + } + } + ns->precision = (SQLCHAR)nlen; + + /* Convert the decimal digits to binary */ + memset(ns->val, 0, sizeof(ns->val)); + for (dig = 0; dig < nlen; dig++) { + UInt4 carry; + + /* multiply the current value by 10, and add the next digit */ + carry = calv[dig] - '0'; + for (i = 0; i < (int)sizeof(ns->val); i++) { + UInt4 t; + + t = ((UInt4)ns->val[i]) * 10 + carry; + ns->val[i] = (unsigned char)(t & 0xFF); + carry = (t >> 8); + } + + if (carry != 0) + *overflow = TRUE; + } +} + +static BOOL convert_money(const char *s, char *sout, size_t soutmax) { + char in, decp = 0; + size_t i = 0, out = 0; + int num_in = -1, period_in = -1, comma_in = -1; + + for (i = 0; s[i]; i++) { + switch (in = s[i]) { + case '.': + if (period_in < 0) + period_in = (int)i; + break; + case ',': + if (comma_in < 0) + comma_in = (int)i; + break; + default: + if ('0' <= in && '9' >= in) + num_in = (int)i; + break; + } + } + if (period_in > comma_in) { + if (period_in >= num_in - 2) + decp = '.'; + } else if (comma_in >= 0 && comma_in >= num_in - 2) + decp = ','; + for (i = 0; s[i] && out + 1 < soutmax; i++) { + switch (in = s[i]) { + case '(': + case '-': + sout[out++] = '-'; + break; + default: + if (in >= '0' && in <= '9') + sout[out++] = in; + else if (in == decp) + sout[out++] = '.'; + } + } + sout[out] = '\0'; + return TRUE; +} + +/* Change linefeed to carriage-return/linefeed */ +size_t convert_linefeeds(const char *si, char *dst, size_t max, BOOL convlf, + BOOL *changed) { + size_t i = 0, out = 0; + + if (max == 0) + max = 0xffffffff; + *changed = FALSE; + for (i = 0; si[i] && out < max - 1; i++) { + if (convlf && si[i] == '\n') { + /* Only add the carriage-return if needed */ + if (i > 0 && ES_CARRIAGE_RETURN == si[i - 1]) { + if (dst) + dst[out++] = si[i]; + else + out++; + continue; + } + *changed = TRUE; + + if (dst) { + dst[out++] = ES_CARRIAGE_RETURN; + dst[out++] = '\n'; + } else + out += 2; + } else { + if (dst) + dst[out++] = si[i]; + else + out++; + } + } + if (dst) + dst[out] = '\0'; + return out; +} + +static int conv_from_octal(const char *s) { + ssize_t i; + int y = 0; + + for (i = 1; i <= 3; i++) + y += (s[i] - '0') << (3 * (3 - i)); + + return y; +} + +/* convert octal escapes to bytes */ +static size_t convert_from_esbinary(const char *value, char *rgbValue, + SQLLEN cbValueMax) { + UNUSED(cbValueMax); + size_t i, ilen = strlen(value); + size_t o = 0; + + for (i = 0; i < ilen;) { + if (value[i] == BYTEA_ESCAPE_CHAR) { + if (value[i + 1] == BYTEA_ESCAPE_CHAR) { + if (rgbValue) + rgbValue[o] = value[i]; + o++; + i += 2; + } else if (value[i + 1] == 'x') { + i += 2; + if (i < ilen) { + ilen -= i; + if (rgbValue) + es_hex2bin(value + i, rgbValue + o, ilen); + o += ilen / 2; + } + break; + } else { + if (rgbValue) + rgbValue[o] = (char)conv_from_octal(&value[i]); + o++; + i += 4; + } + } else { + if (rgbValue) + rgbValue[o] = value[i]; + o++; + i++; + } + /** if (rgbValue) + MYLOG(ES_DEBUG, "i=%d, rgbValue[%d] = %d, %c\n", i, o, rgbValue[o], + rgbValue[o]); ***/ + } + + if (rgbValue) + rgbValue[o] = '\0'; /* extra protection */ + + MYLOG(ES_DEBUG, "in=" FORMAT_SIZE_T ", out = " FORMAT_SIZE_T "\n", ilen, o); + + return o; +} + +static const char *hextbl = "0123456789ABCDEF"; + +#define def_bin2hex(type) \ + (const char *src, type *dst, SQLLEN length) { \ + const char *src_wk; \ + UCHAR chr; \ + type *dst_wk; \ + BOOL backwards; \ + int i; \ + \ + backwards = FALSE; \ + if ((char *)dst < src) { \ + if ((char *)(dst + 2 * (length - 1)) > src + length - 1) \ + return -1; \ + } else if ((char *)dst < src + length) \ + backwards = TRUE; \ + if (backwards) { \ + for (i = 0, src_wk = src + length - 1, \ + dst_wk = dst + 2 * length - 1; \ + i < length; i++, src_wk--) { \ + chr = *src_wk; \ + *dst_wk-- = hextbl[chr % 16]; \ + *dst_wk-- = hextbl[chr >> 4]; \ + } \ + } else { \ + for (i = 0, src_wk = src, dst_wk = dst; i < length; \ + i++, src_wk++) { \ + chr = *src_wk; \ + *dst_wk++ = hextbl[chr >> 4]; \ + *dst_wk++ = hextbl[chr % 16]; \ + } \ + } \ + dst[2 * length] = '\0'; \ + return 2 * length * sizeof(type); \ + } +#ifdef UNICODE_SUPPORT +static SQLLEN es_bin2whex def_bin2hex(SQLWCHAR) +#endif /* UNICODE_SUPPORT */ + + static SQLLEN es_bin2hex def_bin2hex(char) + + SQLLEN es_hex2bin(const char *src, char *dst, SQLLEN length) { + UCHAR chr; + const char *src_wk; + char *dst_wk; + SQLLEN i; + int val; + BOOL HByte = TRUE; + + for (i = 0, src_wk = src, dst_wk = dst; i < length; i++, src_wk++) { + chr = *src_wk; + if (!chr) + break; + if (chr >= 'a' && chr <= 'f') + val = chr - 'a' + 10; + else if (chr >= 'A' && chr <= 'F') + val = chr - 'A' + 10; + else + val = chr - '0'; + if (HByte) + *dst_wk = (char)(val << 4); + else { + *dst_wk += (char)val; + dst_wk++; + } + HByte = !HByte; + } + *dst_wk = '\0'; + return length; +} + +static int convert_lo(StatementClass *stmt, const void *value, + SQLSMALLINT fCType, PTR rgbValue, SQLLEN cbValueMax, + SQLLEN *pcbValue) { + UNUSED(cbValueMax, pcbValue, rgbValue, fCType, value); + SC_set_error(stmt, STMT_EXEC_ERROR, + "Could not convert large object to c-type (large objects are " + "not supported).", + "convert_lo"); + return COPY_GENERAL_ERROR; +} diff --git a/sql-odbc/src/odfesqlodbc/convert.h b/sql-odbc/src/odfesqlodbc/convert.h new file mode 100644 index 0000000000..31f112e67c --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/convert.h @@ -0,0 +1,46 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __CONVERT_H__ +#define __CONVERT_H__ + +#include "es_odbc.h" + +#ifdef __cplusplus +extern "C" { +#endif +/* copy_and_convert results */ +#define COPY_OK 0 +#define COPY_UNSUPPORTED_TYPE 1 +#define COPY_UNSUPPORTED_CONVERSION 2 +#define COPY_RESULT_TRUNCATED 3 +#define COPY_GENERAL_ERROR 4 +#define COPY_NO_DATA_FOUND 5 +#define COPY_INVALID_STRING_CONVERSION 6 + +int copy_and_convert_field_bindinfo(StatementClass *stmt, OID field_type, + int atttypmod, void *value, int col); +int copy_and_convert_field(StatementClass *stmt, OID field_type, int atttypmod, + void *value, SQLSMALLINT fCType, int precision, + PTR rgbValue, SQLLEN cbValueMax, SQLLEN *pcbValue, + SQLLEN *pIndicator); + +SQLLEN es_hex2bin(const char *in, char *out, SQLLEN len); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/sql-odbc/src/odfesqlodbc/descriptor.c b/sql-odbc/src/odfesqlodbc/descriptor.c new file mode 100644 index 0000000000..7f3902473d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/descriptor.c @@ -0,0 +1,590 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "descriptor.h" + +#include +#include +#include + +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "misc.h" +#include "qresult.h" +#include "statement.h" + +void TI_Destructor(TABLE_INFO **ti, int count) { + int i; + + MYLOG(ES_TRACE, "entering count=%d\n", count); + if (ti) { + for (i = 0; i < count; i++) { + if (ti[i]) { + COL_INFO *coli = ti[i]->col_info; + if (coli) { + MYLOG(ES_ALL, "!!!refcnt %p:%d -> %d\n", coli, coli->refcnt, + coli->refcnt - 1); + coli->refcnt--; + if (coli->refcnt <= 0 + && 0 == coli->acc_time) /* acc_time == 0 means the table + is dropped */ + free_col_info_contents(coli); + } + NULL_THE_NAME(ti[i]->schema_name); + NULL_THE_NAME(ti[i]->table_name); + NULL_THE_NAME(ti[i]->table_alias); + NULL_THE_NAME(ti[i]->bestitem); + NULL_THE_NAME(ti[i]->bestqual); + TI_Destroy_IH(ti[i]); + free(ti[i]); + ti[i] = NULL; + } + } + } +} + +void FI_Destructor(FIELD_INFO **fi, int count, BOOL freeFI) { + int i; + + MYLOG(ES_TRACE, "entering count=%d\n", count); + if (fi) { + for (i = 0; i < count; i++) { + if (fi[i]) { + NULL_THE_NAME(fi[i]->column_name); + NULL_THE_NAME(fi[i]->column_alias); + NULL_THE_NAME(fi[i]->schema_name); + NULL_THE_NAME(fi[i]->before_dot); + if (freeFI) { + free(fi[i]); + fi[i] = NULL; + } + } + } + if (freeFI) + free(fi); + } +} + +#define INIT_IH 32 + +void TI_Destroy_IH(TABLE_INFO *ti) { + InheritanceClass *ih; + unsigned int i; + + if (NULL == (ih = ti->ih)) + return; + for (i = 0; i < ih->count; i++) { + NULL_THE_NAME(ih->inf[i].fullTable); + } + free(ih); + ti->ih = NULL; +} + +void DC_Constructor(DescriptorClass *self, BOOL embedded, + StatementClass *stmt) { + UNUSED(stmt); + memset(self, 0, sizeof(DescriptorClass)); + self->deschd.embedded = (char)embedded; +} + +static void ARDFields_free(ARDFields *self) { + MYLOG(ES_TRACE, "entering %p bookmark=%p\n", self, self->bookmark); + if (self->bookmark) { + free(self->bookmark); + self->bookmark = NULL; + } + /* + * the memory pointed to by the bindings is not deallocated by the + * driver but by the application that uses that driver, so we don't + * have to care + */ + ARD_unbind_cols(self, TRUE); +} + +static void APDFields_free(APDFields *self) { + if (self->bookmark) { + free(self->bookmark); + self->bookmark = NULL; + } + /* param bindings */ + APD_free_params(self, STMT_FREE_PARAMS_ALL); +} + +static void IRDFields_free(IRDFields *self) { + /* Free the parsed field information */ + if (self->fi) { + FI_Destructor(self->fi, self->allocated, TRUE); + self->fi = NULL; + } + self->allocated = 0; + self->nfields = 0; +} + +static void IPDFields_free(IPDFields *self) { + /* param bindings */ + IPD_free_params(self, STMT_FREE_PARAMS_ALL); +} + +void DC_Destructor(DescriptorClass *self) { + DescriptorHeader *deschd = &(self->deschd); + if (deschd->__error_message) { + free(deschd->__error_message); + deschd->__error_message = NULL; + } + if (deschd->eserror) { + ER_Destructor(deschd->eserror); + deschd->eserror = NULL; + } + if (deschd->type_defined) { + switch (deschd->desc_type) { + case SQL_ATTR_APP_ROW_DESC: + ARDFields_free(&(self->ardf)); + break; + case SQL_ATTR_APP_PARAM_DESC: + APDFields_free(&(self->apdf)); + break; + case SQL_ATTR_IMP_ROW_DESC: + IRDFields_free(&(self->irdf)); + break; + case SQL_ATTR_IMP_PARAM_DESC: + IPDFields_free(&(self->ipdf)); + break; + } + } +} + +void InitializeEmbeddedDescriptor(DescriptorClass *self, StatementClass *stmt, + UInt4 desc_type) { + DescriptorHeader *deschd = &(self->deschd); + DC_Constructor(self, TRUE, stmt); + DC_get_conn(self) = SC_get_conn(stmt); + deschd->type_defined = TRUE; + deschd->desc_type = desc_type; + switch (desc_type) { + case SQL_ATTR_APP_ROW_DESC: + memset(&(self->ardf), 0, sizeof(ARDFields)); + stmt->ard = self; + break; + case SQL_ATTR_APP_PARAM_DESC: + memset(&(self->apdf), 0, sizeof(APDFields)); + stmt->apd = self; + break; + case SQL_ATTR_IMP_ROW_DESC: + memset(&(self->irdf), 0, sizeof(IRDFields)); + stmt->ird = self; + stmt->ird->irdf.stmt = stmt; + break; + case SQL_ATTR_IMP_PARAM_DESC: + memset(&(self->ipdf), 0, sizeof(IPDFields)); + stmt->ipd = self; + break; + } +} + +/* + * ARDFields initialize + */ +void InitializeARDFields(ARDFields *opt) { + memset(opt, 0, sizeof(ARDFields)); + opt->size_of_rowset = 1; + opt->bind_size = 0; /* default is to bind by column */ + opt->size_of_rowset_odbc2 = 1; +} +/* + * APDFields initialize + */ +void InitializeAPDFields(APDFields *opt) { + memset(opt, 0, sizeof(APDFields)); + opt->paramset_size = 1; + opt->param_bind_type = 0; /* default is to bind by column */ + opt->paramset_size_dummy = 1; /* dummy setting */ +} + +BindInfoClass *ARD_AllocBookmark(ARDFields *ardopts) { + if (!ardopts->bookmark) { + ardopts->bookmark = (BindInfoClass *)malloc(sizeof(BindInfoClass)); + memset(ardopts->bookmark, 0, sizeof(BindInfoClass)); + } + return ardopts->bookmark; +} + +#define DESC_INCREMENT 10 +char CC_add_descriptor(ConnectionClass *self, DescriptorClass *desc) { + int i; + int new_num_descs; + DescriptorClass **descs; + + MYLOG(ES_TRACE, "entering self=%p, desc=%p\n", self, desc); + + for (i = 0; i < self->num_descs; i++) { + if (!self->descs[i]) { + DC_get_conn(desc) = self; + self->descs[i] = desc; + return TRUE; + } + } + /* no more room -- allocate more memory */ + new_num_descs = DESC_INCREMENT + self->num_descs; + descs = (DescriptorClass **)realloc( + self->descs, sizeof(DescriptorClass *) * new_num_descs); + if (!descs) + return FALSE; + self->descs = descs; + + memset(&self->descs[self->num_descs], 0, + sizeof(DescriptorClass *) * DESC_INCREMENT); + DC_get_conn(desc) = self; + self->descs[self->num_descs] = desc; + self->num_descs = new_num_descs; + + return TRUE; +} + +/* + * This API allocates a Application descriptor. + */ +RETCODE SQL_API ESAPI_AllocDesc(HDBC ConnectionHandle, + SQLHDESC *DescriptorHandle) { + CSTR func = "ESAPI_AllocDesc"; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + RETCODE ret = SQL_SUCCESS; + DescriptorClass *desc; + + MYLOG(ES_TRACE, "entering...\n"); + + desc = (DescriptorClass *)malloc(sizeof(DescriptorClass)); + if (desc) { + memset(desc, 0, sizeof(DescriptorClass)); + DC_get_conn(desc) = conn; + if (CC_add_descriptor(conn, desc)) + *DescriptorHandle = desc; + else { + free(desc); + CC_set_error(conn, CONN_STMT_ALLOC_ERROR, + "Maximum number of descriptors exceeded", func); + ret = SQL_ERROR; + } + } else { + CC_set_error(conn, CONN_STMT_ALLOC_ERROR, + "No more memory ti allocate a further descriptor", func); + ret = SQL_ERROR; + } + return ret; +} + +RETCODE SQL_API ESAPI_FreeDesc(SQLHDESC DescriptorHandle) { + DescriptorClass *desc = (DescriptorClass *)DescriptorHandle; + RETCODE ret = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering...\n"); + DC_Destructor(desc); + if (!desc->deschd.embedded) { + int i; + ConnectionClass *conn = DC_get_conn(desc); + + for (i = 0; i < conn->num_descs; i++) { + if (conn->descs[i] == desc) { + conn->descs[i] = NULL; + break; + } + } + free(desc); + } + return ret; +} + +static void BindInfoClass_copy(const BindInfoClass *src, + BindInfoClass *target) { + memcpy(target, src, sizeof(BindInfoClass)); +} +static void ARDFields_copy(const ARDFields *src, ARDFields *target) { + memcpy(target, src, sizeof(ARDFields)); + target->bookmark = NULL; + if (src->bookmark) { + BindInfoClass *bookmark = ARD_AllocBookmark(target); + if (bookmark) + BindInfoClass_copy(src->bookmark, bookmark); + } + if (src->allocated <= 0) { + target->allocated = 0; + target->bindings = NULL; + } else { + int i; + + target->bindings = malloc(target->allocated * sizeof(BindInfoClass)); + if (!target->bindings) + target->allocated = 0; + for (i = 0; i < target->allocated; i++) + BindInfoClass_copy(&src->bindings[i], &target->bindings[i]); + } +} + +static void ParameterInfoClass_copy(const ParameterInfoClass *src, + ParameterInfoClass *target) { + memcpy(target, src, sizeof(ParameterInfoClass)); +} +static void APDFields_copy(const APDFields *src, APDFields *target) { + memcpy(target, src, sizeof(APDFields)); + if (src->bookmark) { + target->bookmark = malloc(sizeof(ParameterInfoClass)); + if (target->bookmark) + ParameterInfoClass_copy(src->bookmark, target->bookmark); + } + if (src->allocated <= 0) { + target->allocated = 0; + target->parameters = NULL; + } else { + int i; + + target->parameters = + malloc(target->allocated * sizeof(ParameterInfoClass)); + if (!target->parameters) + target->allocated = 0; + for (i = 0; i < target->allocated; i++) + ParameterInfoClass_copy(&src->parameters[i], + &target->parameters[i]); + } +} + +static void ParameterImplClass_copy(const ParameterImplClass *src, + ParameterImplClass *target) { + memcpy(target, src, sizeof(ParameterImplClass)); +} +static void IPDFields_copy(const IPDFields *src, IPDFields *target) { + memcpy(target, src, sizeof(IPDFields)); + if (src->allocated <= 0) { + target->allocated = 0; + target->parameters = NULL; + } else { + int i; + + target->parameters = (ParameterImplClass *)malloc( + target->allocated * sizeof(ParameterImplClass)); + if (!target->parameters) + target->allocated = 0; + for (i = 0; i < target->allocated; i++) + ParameterImplClass_copy(&src->parameters[i], + &target->parameters[i]); + } +} + +RETCODE SQL_API ESAPI_CopyDesc(SQLHDESC SourceDescHandle, + SQLHDESC TargetDescHandle) { + RETCODE ret = SQL_ERROR; + DescriptorClass *src, *target; + DescriptorHeader *srchd, *targethd; + ARDFields *ard_src, *ard_tgt; + APDFields *apd_src, *apd_tgt; + IPDFields *ipd_src, *ipd_tgt; + + MYLOG(ES_TRACE, "entering...\n"); + src = (DescriptorClass *)SourceDescHandle; + target = (DescriptorClass *)TargetDescHandle; + srchd = &(src->deschd); + targethd = &(target->deschd); + if (!srchd->type_defined) { + MYLOG(ES_ERROR, "source type undefined\n"); + DC_set_error(target, DESC_EXEC_ERROR, "source handle type undefined"); + return ret; + } + if (targethd->type_defined) { + MYLOG(ES_DEBUG, "source type=%d -> target type=%d\n", srchd->desc_type, + targethd->desc_type); + if (SQL_ATTR_IMP_ROW_DESC == targethd->desc_type) { + MYLOG(ES_DEBUG, "can't modify IRD\n"); + DC_set_error(target, DESC_EXEC_ERROR, "can't copy to IRD"); + return ret; + } else if (targethd->desc_type != srchd->desc_type) { + if (targethd->embedded) { + MYLOG(ES_DEBUG, "src type != target type\n"); + DC_set_error( + target, DESC_EXEC_ERROR, + "copying different type descriptor to embedded one"); + return ret; + } + } + DC_Destructor(target); + } + ret = SQL_SUCCESS; + switch (srchd->desc_type) { + case SQL_ATTR_APP_ROW_DESC: + MYLOG(ES_DEBUG, "src=%p target=%p type=%d", src, target, + srchd->desc_type); + if (!targethd->type_defined) { + targethd->desc_type = srchd->desc_type; + } + ard_src = &(src->ardf); + MYPRINTF(ES_DEBUG, + " rowset_size=" FORMAT_LEN " bind_size=" FORMAT_UINTEGER + " ope_ptr=%p off_ptr=%p\n", + ard_src->size_of_rowset, ard_src->bind_size, + ard_src->row_operation_ptr, ard_src->row_offset_ptr); + ard_tgt = &(target->ardf); + MYPRINTF(ES_DEBUG, " target=%p", ard_tgt); + ARDFields_copy(ard_src, ard_tgt); + MYPRINTF(ES_DEBUG, " offset_ptr=%p\n", ard_tgt->row_offset_ptr); + break; + case SQL_ATTR_APP_PARAM_DESC: + if (!targethd->type_defined) { + targethd->desc_type = srchd->desc_type; + } + apd_src = &(src->apdf); + apd_tgt = &(target->apdf); + APDFields_copy(apd_src, apd_tgt); + break; + case SQL_ATTR_IMP_PARAM_DESC: + if (!targethd->type_defined) { + targethd->desc_type = srchd->desc_type; + } + ipd_src = &(src->ipdf); + ipd_tgt = &(target->ipdf); + IPDFields_copy(ipd_src, ipd_tgt); + break; + default: + MYLOG(ES_DEBUG, "invalid descriptor handle type=%d\n", + srchd->desc_type); + DC_set_error(target, DESC_EXEC_ERROR, "invalid descriptor type"); + ret = SQL_ERROR; + } + + if (SQL_SUCCESS == ret) + targethd->type_defined = TRUE; + return ret; +} + +void DC_set_error(DescriptorClass *self, int errornumber, + const char *errormsg) { + DescriptorHeader *deschd = &(self->deschd); + if (deschd->__error_message) + free(deschd->__error_message); + deschd->__error_number = errornumber; + deschd->__error_message = errormsg ? strdup(errormsg) : NULL; +} +void DC_set_errormsg(DescriptorClass *self, const char *errormsg) { + DescriptorHeader *deschd = &(self->deschd); + if (deschd->__error_message) + free(deschd->__error_message); + deschd->__error_message = errormsg ? strdup(errormsg) : NULL; +} +const char *DC_get_errormsg(const DescriptorClass *desc) { + return desc->deschd.__error_message; +} +int DC_get_errornumber(const DescriptorClass *desc) { + return desc->deschd.__error_number; +} + +/* Map sql commands to statement types */ +static const struct { + int number; + const char ver3str[6]; + const char ver2str[6]; +} Descriptor_sqlstate[] = + + { + {DESC_ERROR_IN_ROW, "01S01", "01S01"}, + {DESC_OPTION_VALUE_CHANGED, "01S02", "01S02"}, + {DESC_OK, "00000", "00000"}, /* OK */ + {DESC_EXEC_ERROR, "HY000", "S1000"}, /* also a general error */ + {DESC_STATUS_ERROR, "HY010", "S1010"}, + {DESC_SEQUENCE_ERROR, "HY010", "S1010"}, /* Function sequence error */ + {DESC_NO_MEMORY_ERROR, "HY001", + "S1001"}, /* memory allocation failure */ + {DESC_COLNUM_ERROR, "07009", "S1002"}, /* invalid column number */ + {DESC_NO_STMTSTRING, "HY001", + "S1001"}, /* having no stmtstring is also a malloc problem */ + {DESC_ERROR_TAKEN_FROM_BACKEND, "HY000", "S1000"}, /* general error */ + {DESC_INTERNAL_ERROR, "HY000", "S1000"}, /* general error */ + {DESC_STILL_EXECUTING, "HY010", "S1010"}, + {DESC_NOT_IMPLEMENTED_ERROR, "HYC00", "S1C00"}, /* == 'driver not + * capable' */ + {DESC_BAD_PARAMETER_NUMBER_ERROR, "07009", "S1093"}, + {DESC_OPTION_OUT_OF_RANGE_ERROR, "HY092", "S1092"}, + {DESC_INVALID_COLUMN_NUMBER_ERROR, "07009", "S1002"}, + {DESC_RESTRICTED_DATA_TYPE_ERROR, "07006", "07006"}, + {DESC_INVALID_CURSOR_STATE_ERROR, "07005", "24000"}, + {DESC_CREATE_TABLE_ERROR, "42S01", "S0001"}, /* table already exists */ + {DESC_NO_CURSOR_NAME, "S1015", "S1015"}, + {DESC_INVALID_CURSOR_NAME, "34000", "34000"}, + {DESC_INVALID_ARGUMENT_NO, "HY024", + "S1009"}, /* invalid argument value */ + {DESC_ROW_OUT_OF_RANGE, "HY107", "S1107"}, + {DESC_OPERATION_CANCELLED, "HY008", "S1008"}, + {DESC_INVALID_CURSOR_POSITION, "HY109", "S1109"}, + {DESC_VALUE_OUT_OF_RANGE, "HY019", "22003"}, + {DESC_OPERATION_INVALID, "HY011", "S1011"}, + {DESC_PROGRAM_TYPE_OUT_OF_RANGE, "?????", "?????"}, + {DESC_BAD_ERROR, "08S01", "08S01"}, /* communication link failure */ + {DESC_INVALID_OPTION_IDENTIFIER, "HY092", "HY092"}, + {DESC_RETURN_NULL_WITHOUT_INDICATOR, "22002", "22002"}, + {DESC_INVALID_DESCRIPTOR_IDENTIFIER, "HY091", "HY091"}, + {DESC_OPTION_NOT_FOR_THE_DRIVER, "HYC00", "HYC00"}, + {DESC_FETCH_OUT_OF_RANGE, "HY106", "S1106"}, + {DESC_COUNT_FIELD_INCORRECT, "07002", "07002"}, +}; + +static ES_ErrorInfo *DC_create_errorinfo(const DescriptorClass *self) { + const DescriptorHeader *deschd = &(self->deschd); + ES_ErrorInfo *error; + ConnectionClass *conn; + EnvironmentClass *env; + Int4 errornum; + BOOL env_is_odbc3 = TRUE; + + if (deschd->eserror) + return deschd->eserror; + errornum = deschd->__error_number; + error = ER_Constructor(errornum, deschd->__error_message); + if (!error) + return error; + conn = DC_get_conn(self); + if (conn && (env = (EnvironmentClass *)conn->henv, env)) + env_is_odbc3 = EN_is_odbc3(env); + errornum -= LOWEST_DESC_ERROR; + if (errornum < 0 + || errornum >= (int)(sizeof(Descriptor_sqlstate) + / sizeof(Descriptor_sqlstate[0]))) + errornum = 1 - LOWEST_DESC_ERROR; + STRCPY_FIXED(error->sqlstate, env_is_odbc3 + ? Descriptor_sqlstate[errornum].ver3str + : Descriptor_sqlstate[errornum].ver2str); + return error; +} +void DC_log_error(const char *func, const char *desc, + const DescriptorClass *self) { +#define nullcheck(a) (a ? a : "(NULL)") + if (self) { + MYLOG(ES_DEBUG, + "DESCRIPTOR ERROR: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", + func, desc, self->deschd.__error_number, + nullcheck(self->deschd.__error_message)); + } +} + +/* Returns the next SQL error information. */ +RETCODE SQL_API ESAPI_DescError(SQLHDESC hdesc, SQLSMALLINT RecNumber, + SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, + SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, + SQLSMALLINT *pcbErrorMsg, UWORD flag) { + /* CC: return an error of a hdesc */ + DescriptorClass *desc = (DescriptorClass *)hdesc; + DescriptorHeader *deschd = &(desc->deschd); + + MYLOG(ES_TRACE, "entering RecN=%hd\n", RecNumber); + deschd->eserror = DC_create_errorinfo(desc); + return ER_ReturnError(deschd->eserror, RecNumber, szSqlState, pfNativeError, + szErrorMsg, cbErrorMsgMax, pcbErrorMsg, flag); +} diff --git a/sql-odbc/src/odfesqlodbc/descriptor.h b/sql-odbc/src/odfesqlodbc/descriptor.h new file mode 100644 index 0000000000..6ff4f4bfec --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/descriptor.h @@ -0,0 +1,270 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __DESCRIPTOR_H__ +#define __DESCRIPTOR_H__ + +#include "es_odbc.h" + +#ifdef WIN32 +#pragma warning(push) +#pragma warning(disable : 4201) // nonstandard extension used: nameless + // struct/union warning +#endif // WIN32 + +typedef struct InheritanceClass { + UInt4 allocated; + UInt4 count; + OID cur_tableoid; + esNAME cur_fullTable; + struct { + OID tableoid; + esNAME fullTable; + } inf[1]; +} InheritanceClass; + +enum { + TI_UPDATABLE = 1L, + TI_HASOIDS_CHECKED = (1L << 1), + TI_HASOIDS = (1L << 2), + TI_COLATTRIBUTE = (1L << 3), + TI_HASSUBCLASS = (1L << 4) +}; +typedef struct { + OID table_oid; + COL_INFO *col_info; /* cached SQLColumns info for this table */ + esNAME schema_name; + esNAME table_name; + esNAME table_alias; + esNAME bestitem; + esNAME bestqual; + UInt4 flags; + InheritanceClass *ih; +} TABLE_INFO; +#define TI_set_updatable(ti) (ti->flags |= TI_UPDATABLE) +#define TI_is_updatable(ti) (0 != (ti->flags & TI_UPDATABLE)) +#define TI_no_updatable(ti) (ti->flags &= (~TI_UPDATABLE)) +#define TI_set_hasoids_checked(ti) (ti->flags |= TI_HASOIDS_CHECKED) +#define TI_checked_hasoids(ti) (0 != (ti->flags & TI_HASOIDS)) +#define TI_set_hasoids(ti) (ti->flags |= TI_HASOIDS) +#define TI_has_oids(ti) (0 != (ti->flags & TI_HASOIDS)) +#define TI_set_has_no_oids(ti) (ti->flags &= (~TI_HASOIDS)) +#define TI_set_hassubclass(ti) (ti->flags |= TI_HASSUBCLASS) +#define TI_has_subclass(ti) (0 != (ti->flags & TI_HASSUBCLASS)) +#define TI_set_has_no_subclass(ti) (ti->flags &= (~TI_HASSUBCLASS)) +void TI_Destructor(TABLE_INFO **, int); +void TI_Destroy_IH(TABLE_INFO *ti); + +enum { + FIELD_INITIALIZED = 0, + FIELD_PARSING = 1L, + FIELD_TEMP_SET = (1L << 1), + FIELD_COL_ATTRIBUTE = (1L << 2), + FIELD_PARSED_OK = (1L << 3), + FIELD_PARSED_INCOMPLETE = (1L << 4) +}; +typedef struct { + char flag; + char updatable; + Int2 attnum; + esNAME schema_name; + TABLE_INFO *ti; /* to resolve explicit table names */ + esNAME column_name; + esNAME column_alias; + char nullable; + char auto_increment; + char func; + char columnkey; + int column_size; /* precision in 2.x */ + int decimal_digits; /* scale in 2.x */ + int display_size; + SQLLEN length; + OID columntype; + OID basetype; /* may be the basetype when the column type is a domain */ + int typmod; + char expr; + char quote; + char dquote; + char numeric; + esNAME before_dot; +} FIELD_INFO; +Int4 FI_precision(const FIELD_INFO *); +void FI_Destructor(FIELD_INFO **, int, BOOL freeFI); +#define FI_is_applicable(fi) \ + (NULL != fi && (fi->flag & (FIELD_PARSED_OK | FIELD_COL_ATTRIBUTE)) != 0) +#define FI_type(fi) (0 == (fi)->basetype ? (fi)->columntype : (fi)->basetype) + +typedef struct DescriptorHeader_ { + ConnectionClass *conn_conn; + char embedded; + char type_defined; + UInt4 desc_type; + UInt4 error_row; /* 1-based row */ + UInt4 error_index; /* 1-based index */ + Int4 __error_number; + char *__error_message; + ES_ErrorInfo *eserror; +} DescriptorHeader; + +/* + * ARD and APD are(must be) of the same format + */ +struct ARDFields_ { + SQLLEN size_of_rowset; /* for ODBC3 fetch operation */ + SQLUINTEGER bind_size; /* size of each structure if using + * Row-wise Binding */ + SQLUSMALLINT *row_operation_ptr; + SQLULEN *row_offset_ptr; + BindInfoClass *bookmark; + BindInfoClass *bindings; + SQLSMALLINT allocated; + SQLLEN size_of_rowset_odbc2; /* for SQLExtendedFetch */ +}; + +/* + * APD must be of the same format as ARD + */ +struct APDFields_ { + SQLLEN paramset_size; /* really an SQLINTEGER type */ + SQLUINTEGER param_bind_type; /* size of each structure if using + * Row-wise Parameter Binding */ + SQLUSMALLINT *param_operation_ptr; + SQLULEN *param_offset_ptr; + ParameterInfoClass *bookmark; /* dummy item to fit APD to ARD */ + ParameterInfoClass *parameters; + SQLSMALLINT allocated; + SQLLEN paramset_size_dummy; /* dummy item to fit APD to ARD */ +}; + +struct IRDFields_ { + StatementClass *stmt; + SQLULEN *rowsFetched; + SQLUSMALLINT *rowStatusArray; + UInt4 nfields; + SQLSMALLINT allocated; + FIELD_INFO **fi; +}; + +struct IPDFields_ { + SQLULEN *param_processed_ptr; + SQLUSMALLINT *param_status_ptr; + SQLSMALLINT allocated; + ParameterImplClass *parameters; +}; + +/*** +typedef struct +{ + DescriptorHeader deschd; + ARDFields ardopts; +} ARDClass; +typedef struct +{ + DescriptorHeader deschd; + APDFields apdopts; +} APDClass; +typedef struct +{ + DescriptorHeader deschd; + IRDFields irdopts; +} IRDClass; +typedef struct +{ + DescriptorHeader deschd; + IPDFields ipdopts; +} IPDClass; +***/ +typedef struct { + DescriptorHeader deschd; + union { + ARDFields ardf; + APDFields apdf; + IRDFields irdf; + IPDFields ipdf; + }; +} DescriptorClass; + +#define DC_get_conn(a) ((a)->deschd.conn_conn) +#define DC_get_desc_type(a) ((a)->deschd.desc_type) +#define DC_get_embedded(a) ((a)->deschd.embedded) + +void InitializeEmbeddedDescriptor(DescriptorClass *, StatementClass *stmt, + UInt4 desc_type); +void DC_Destructor(DescriptorClass *desc); +void InitializeARDFields(ARDFields *self); +void InitializeAPDFields(APDFields *self); +/* void InitializeIRDFields(IRDFields *self); +void InitializeIPDFiedls(IPDFields *self); */ +BindInfoClass *ARD_AllocBookmark(ARDFields *self); +void ARD_unbind_cols(ARDFields *self, BOOL freeall); +void APD_free_params(APDFields *self, char option); +void IPD_free_params(IPDFields *self, char option); +RETCODE DC_set_stmt(DescriptorClass *desc, StatementClass *stmt); +void DC_set_error(DescriptorClass *desc, int errornumber, const char *errormsg); +void DC_set_errormsg(DescriptorClass *desc, const char *errormsg); +ES_ErrorInfo *DC_get_error(DescriptorClass *self); +int DC_get_errornumber(const DescriptorClass *self); +const char *DC_get_errormsg(const DescriptorClass *self); +void DC_log_error(const char *func, const char *desc, + const DescriptorClass *self); + +/* Error numbers about descriptor handle */ +enum { + LOWEST_DESC_ERROR = -2 + /* minus means warning/notice message */ + , + DESC_ERROR_IN_ROW = -2, + DESC_OPTION_VALUE_CHANGED = -1, + DESC_OK = 0, + DESC_EXEC_ERROR, + DESC_STATUS_ERROR, + DESC_SEQUENCE_ERROR, + DESC_NO_MEMORY_ERROR, + DESC_COLNUM_ERROR, + DESC_NO_STMTSTRING, + DESC_ERROR_TAKEN_FROM_BACKEND, + DESC_INTERNAL_ERROR, + DESC_STILL_EXECUTING, + DESC_NOT_IMPLEMENTED_ERROR, + DESC_BAD_PARAMETER_NUMBER_ERROR, + DESC_OPTION_OUT_OF_RANGE_ERROR, + DESC_INVALID_COLUMN_NUMBER_ERROR, + DESC_RESTRICTED_DATA_TYPE_ERROR, + DESC_INVALID_CURSOR_STATE_ERROR, + DESC_CREATE_TABLE_ERROR, + DESC_NO_CURSOR_NAME, + DESC_INVALID_CURSOR_NAME, + DESC_INVALID_ARGUMENT_NO, + DESC_ROW_OUT_OF_RANGE, + DESC_OPERATION_CANCELLED, + DESC_INVALID_CURSOR_POSITION, + DESC_VALUE_OUT_OF_RANGE, + DESC_OPERATION_INVALID, + DESC_PROGRAM_TYPE_OUT_OF_RANGE, + DESC_BAD_ERROR, + DESC_INVALID_OPTION_IDENTIFIER, + DESC_RETURN_NULL_WITHOUT_INDICATOR, + DESC_INVALID_DESCRIPTOR_IDENTIFIER, + DESC_OPTION_NOT_FOR_THE_DRIVER, + DESC_FETCH_OUT_OF_RANGE, + DESC_COUNT_FIELD_INCORRECT +}; + +#ifdef WIN32 +#pragma warning(pop) +#endif // WIN32 + +#endif /* __DESCRIPTOR_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/dlg_specific.c b/sql-odbc/src/odfesqlodbc/dlg_specific.c new file mode 100644 index 0000000000..5578bb64ef --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/dlg_specific.c @@ -0,0 +1,522 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "dlg_specific.h" + +#include + +#include "es_apifunc.h" +#include "misc.h" + +#define NULL_IF_NULL(a) ((a) ? ((const char *)(a)) : "(null)") + +static void encode(const esNAME, char *out, int outlen); +static esNAME decode(const char *in); +static esNAME decode_or_remove_braces(const char *in); + +#define OVR_EXTRA_BITS \ + (BIT_FORCEABBREVCONNSTR | BIT_FAKE_MSS | BIT_BDE_ENVIRONMENT \ + | BIT_CVT_NULL_DATE | BIT_ACCESSIBLE_ONLY | BIT_IGNORE_ROUND_TRIP_TIME \ + | BIT_DISABLE_KEEPALIVE) + +#define OPENING_BRACKET '{' +#define CLOSING_BRACKET '}' + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wembedded-directive" +#endif // __APPLE__ +void makeConnectString(char *connect_string, const ConnInfo *ci, UWORD len) { + UNUSED(len); + char got_dsn = (ci->dsn[0] != '\0'); + char encoded_item[LARGE_REGISTRY_LEN]; + char *connsetStr = NULL; + char *esoptStr = NULL; +#ifdef _HANDLE_ENLIST_IN_DTC_ + char xaOptStr[16]; +#endif + ssize_t hlen, nlen, olen; + + encode(ci->password, encoded_item, sizeof(encoded_item)); + /* fundamental info */ + nlen = MAX_CONNECT_STRING; + olen = snprintf( + connect_string, nlen, + "%s=%s;" INI_SERVER + "=%s;" + "database=elasticsearch;" INI_PORT "=%s;" INI_USERNAME_ABBR + "=%s;" INI_PASSWORD_ABBR "=%s;" INI_AUTH_MODE "=%s;" INI_REGION + "=%s;" INI_SSL_USE "=%d;" INI_SSL_HOST_VERIFY "=%d;" INI_LOG_LEVEL + "=%d;" INI_LOG_OUTPUT "=%s;" INI_TIMEOUT "=%s;" INI_FETCH_SIZE "=%s;", + got_dsn ? "DSN" : "DRIVER", got_dsn ? ci->dsn : ci->drivername, + ci->server, ci->port, ci->username, encoded_item, ci->authtype, + ci->region, (int)ci->use_ssl, (int)ci->verify_server, + (int)ci->drivers.loglevel, ci->drivers.output_dir, + ci->response_timeout, ci->fetch_size); + if (olen < 0 || olen >= nlen) { + connect_string[0] = '\0'; + return; + } + + /* extra info */ + hlen = strlen(connect_string); + nlen = MAX_CONNECT_STRING - hlen; + if (olen < 0 || olen >= nlen) /* failed */ + connect_string[0] = '\0'; + + if (NULL != connsetStr) + free(connsetStr); + if (NULL != esoptStr) + free(esoptStr); +} +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + +BOOL get_DSN_or_Driver(ConnInfo *ci, const char *attribute, const char *value) { + BOOL found = TRUE; + + if (stricmp(attribute, "DSN") == 0) + STRCPY_FIXED(ci->dsn, value); + else if (stricmp(attribute, "driver") == 0) + STRCPY_FIXED(ci->drivername, value); + else + found = FALSE; + + return found; +} + +BOOL copyConnAttributes(ConnInfo *ci, const char *attribute, + const char *value) { + BOOL found = TRUE, printed = FALSE; + if (stricmp(attribute, "DSN") == 0) + STRCPY_FIXED(ci->dsn, value); + else if (stricmp(attribute, "driver") == 0) + STRCPY_FIXED(ci->drivername, value); + else if ((stricmp(attribute, INI_HOST) == 0) + || (stricmp(attribute, INI_SERVER) == 0)) + STRCPY_FIXED(ci->server, value); + else if (stricmp(attribute, INI_PORT) == 0) + STRCPY_FIXED(ci->port, value); + else if ((stricmp(attribute, INI_USERNAME) == 0) + || (stricmp(attribute, INI_USERNAME_ABBR) == 0)) + STRCPY_FIXED(ci->username, value); + else if ((stricmp(attribute, INI_PASSWORD) == 0) + || (stricmp(attribute, INI_PASSWORD_ABBR) == 0)) { + ci->password = decode_or_remove_braces(value); +#ifndef FORCE_PASSWORDE_DISPLAY + MYLOG(ES_DEBUG, "key='%s' value='xxxxxxxx'\n", attribute); + printed = TRUE; +#endif + } else if (stricmp(attribute, INI_AUTH_MODE) == 0) + STRCPY_FIXED(ci->authtype, value); + else if (stricmp(attribute, INI_REGION) == 0) + STRCPY_FIXED(ci->region, value); + else if (stricmp(attribute, INI_SSL_USE) == 0) + ci->use_ssl = (char)atoi(value); + else if (stricmp(attribute, INI_SSL_HOST_VERIFY) == 0) + ci->verify_server = (char)atoi(value); + else if (stricmp(attribute, INI_LOG_LEVEL) == 0) + ci->drivers.loglevel = (char)atoi(value); + else if (stricmp(attribute, INI_LOG_OUTPUT) == 0) + STRCPY_FIXED(ci->drivers.output_dir, value); + else if (stricmp(attribute, INI_TIMEOUT) == 0) + STRCPY_FIXED(ci->response_timeout, value); + else if (stricmp(attribute, INI_FETCH_SIZE) == 0) + STRCPY_FIXED(ci->fetch_size, value); + else + found = FALSE; + + if (!printed) + MYLOG(ES_DEBUG, "key='%s' value='%s'%s\n", attribute, value, + found ? NULL_STRING : " not found"); + + return found; +} + +static void getCiDefaults(ConnInfo *ci) { + strncpy(ci->desc, DEFAULT_DESC, MEDIUM_REGISTRY_LEN); + strncpy(ci->drivername, DEFAULT_DRIVERNAME, MEDIUM_REGISTRY_LEN); + strncpy(ci->server, DEFAULT_HOST, MEDIUM_REGISTRY_LEN); + strncpy(ci->port, DEFAULT_PORT, SMALL_REGISTRY_LEN); + strncpy(ci->response_timeout, DEFAULT_RESPONSE_TIMEOUT_STR, + SMALL_REGISTRY_LEN); + strncpy(ci->fetch_size, DEFAULT_FETCH_SIZE_STR, + SMALL_REGISTRY_LEN); + strncpy(ci->authtype, DEFAULT_AUTHTYPE, MEDIUM_REGISTRY_LEN); + if (ci->password.name != NULL) + free(ci->password.name); + ci->password.name = NULL; + strncpy(ci->username, DEFAULT_USERNAME, MEDIUM_REGISTRY_LEN); + strncpy(ci->region, DEFAULT_REGION, MEDIUM_REGISTRY_LEN); + ci->use_ssl = DEFAULT_USE_SSL; + ci->verify_server = DEFAULT_VERIFY_SERVER; + strcpy(ci->drivers.output_dir, "C:\\"); +} + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wembedded-directive" +#endif // __APPLE__ +int getDriverNameFromDSN(const char *dsn, char *driver_name, int namelen) { +#ifdef WIN32 + return SQLGetPrivateProfileString(ODBC_DATASOURCES, dsn, NULL_STRING, + driver_name, namelen, ODBC_INI); +#else /* WIN32 */ + int cnt; + + cnt = SQLGetPrivateProfileString(dsn, "Driver", NULL_STRING, driver_name, + namelen, ODBC_INI); + if (!driver_name[0]) + return cnt; + if (strchr(driver_name, '/') || /* path to the driver */ + strchr(driver_name, '.')) { + driver_name[0] = '\0'; + return 0; + } + return cnt; +#endif /* WIN32 */ +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +} + +void getDriversDefaults(const char *drivername, GLOBAL_VALUES *comval) { + if (NULL != drivername) + STR_TO_NAME(comval->drivername, drivername); +} + +void getDSNinfo(ConnInfo *ci, const char *configDrvrname) { + char *DSN = ci->dsn; + char temp[LARGE_REGISTRY_LEN]; + const char *drivername; + getCiDefaults(ci); + drivername = ci->drivername; + if (DSN[0] == '\0') { + if (drivername[0] == '\0') /* adding new DSN via configDSN */ + { + if (configDrvrname) + drivername = configDrvrname; + strncpy_null(DSN, INI_DSN, sizeof(ci->dsn)); + } + /* else dns-less connections */ + } + + /* brute-force chop off trailing blanks... */ + while (*(DSN + strlen(DSN) - 1) == ' ') + *(DSN + strlen(DSN) - 1) = '\0'; + + if (!drivername[0] && DSN[0]) + getDriverNameFromDSN(DSN, (char *)drivername, sizeof(ci->drivername)); + MYLOG(ES_DEBUG, "drivername=%s\n", drivername); + if (!drivername[0]) + drivername = INVALID_DRIVER; + getDriversDefaults(drivername, &(ci->drivers)); + + if (DSN[0] == '\0') + return; + + /* Proceed with getting info for the given DSN. */ + if (SQLGetPrivateProfileString(DSN, INI_SERVER, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->server, temp); + if (SQLGetPrivateProfileString(DSN, INI_HOST, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->server, temp); + if (SQLGetPrivateProfileString(DSN, INI_PORT, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->port, temp); + if (SQLGetPrivateProfileString(DSN, INI_USERNAME, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->username, temp); + if (SQLGetPrivateProfileString(DSN, INI_USERNAME_ABBR, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->username, temp); + if (SQLGetPrivateProfileString(DSN, INI_PASSWORD, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + ci->password = decode(temp); + if (SQLGetPrivateProfileString(DSN, INI_PASSWORD_ABBR, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + ci->password = decode(temp); + if (SQLGetPrivateProfileString(DSN, INI_AUTH_MODE, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->authtype, temp); + if (SQLGetPrivateProfileString(DSN, INI_REGION, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->region, temp); + if (SQLGetPrivateProfileString(DSN, INI_SSL_USE, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + ci->use_ssl = (char)atoi(temp); + if (SQLGetPrivateProfileString(DSN, INI_SSL_HOST_VERIFY, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + ci->verify_server = (char)atoi(temp); + if (SQLGetPrivateProfileString(DSN, INI_LOG_LEVEL, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + ci->drivers.loglevel = (char)atoi(temp); + if (SQLGetPrivateProfileString(DSN, INI_LOG_OUTPUT, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->drivers.output_dir, temp); + if (SQLGetPrivateProfileString(DSN, INI_TIMEOUT, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->response_timeout, temp); + if (SQLGetPrivateProfileString(DSN, INI_FETCH_SIZE, NULL_STRING, temp, + sizeof(temp), ODBC_INI) + > 0) + STRCPY_FIXED(ci->fetch_size, temp); + STR_TO_NAME(ci->drivers.drivername, drivername); +} +/* + * This function writes any global parameters (that can be manipulated) + * to the ODBCINST.INI portion of the registry + */ +int write_Ci_Drivers(const char *fileName, const char *sectionName, + const GLOBAL_VALUES *comval) { + UNUSED(comval, fileName, sectionName); + + // We don't need anything here + return 0; +} + +int writeDriversDefaults(const char *drivername, const GLOBAL_VALUES *comval) { + return write_Ci_Drivers(ODBCINST_INI, drivername, comval); +} + +/* This is for datasource based options only */ +void writeDSNinfo(const ConnInfo *ci) { + const char *DSN = ci->dsn; + char encoded_item[MEDIUM_REGISTRY_LEN], temp[SMALL_REGISTRY_LEN]; + + SQLWritePrivateProfileString(DSN, INI_HOST, ci->server, ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_PORT, ci->port, ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_USERNAME, ci->username, ODBC_INI); + encode(ci->password, encoded_item, sizeof(encoded_item)); + SQLWritePrivateProfileString(DSN, INI_PASSWORD, encoded_item, ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_AUTH_MODE, ci->authtype, ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_REGION, ci->region, ODBC_INI); + ITOA_FIXED(temp, ci->use_ssl); + SQLWritePrivateProfileString(DSN, INI_SSL_USE, temp, ODBC_INI); + ITOA_FIXED(temp, ci->verify_server); + SQLWritePrivateProfileString(DSN, INI_SSL_HOST_VERIFY, temp, ODBC_INI); + ITOA_FIXED(temp, ci->drivers.loglevel); + SQLWritePrivateProfileString(DSN, INI_LOG_LEVEL, temp, ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_LOG_OUTPUT, ci->drivers.output_dir, + ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_TIMEOUT, ci->response_timeout, + ODBC_INI); + SQLWritePrivateProfileString(DSN, INI_FETCH_SIZE, ci->fetch_size, + ODBC_INI); + +} + +static void encode(const esNAME in, char *out, int outlen) { + size_t i, ilen = 0; + int o = 0; + char inc, *ins; + + if (NAME_IS_NULL(in)) { + out[0] = '\0'; + return; + } + ins = GET_NAME(in); + ilen = strlen(ins); + for (i = 0; i < ilen && o < outlen - 1; i++) { + inc = ins[i]; + if (inc == '+') { + if (o + 2 >= outlen) + break; + snprintf(&out[o], outlen - o, "%%2B"); + o += 3; + } else if (isspace((unsigned char)inc)) + out[o++] = '+'; + else if (!isalnum((unsigned char)inc)) { + if (o + 2 >= outlen) + break; + snprintf(&out[o], outlen - o, "%%%02x", inc); + o += 3; + } else + out[o++] = inc; + } + out[o++] = '\0'; +} + +static unsigned int conv_from_hex(const char *s) { + int i, y = 0, val; + + for (i = 1; i <= 2; i++) { + if (s[i] >= 'a' && s[i] <= 'f') + val = s[i] - 'a' + 10; + else if (s[i] >= 'A' && s[i] <= 'F') + val = s[i] - 'A' + 10; + else + val = s[i] - '0'; + + y += val << (4 * (2 - i)); + } + + return y; +} + +static esNAME decode(const char *in) { + size_t i, ilen = strlen(in), o = 0; + char inc, *outs; + esNAME out; + + INIT_NAME(out); + if (0 == ilen) { + return out; + } + outs = (char *)malloc(ilen + 1); + if (!outs) + return out; + for (i = 0; i < ilen; i++) { + inc = in[i]; + if (inc == '+') + outs[o++] = ' '; + else if (inc == '%') { + snprintf(&outs[o], ilen + 1 - o, "%c", conv_from_hex(&in[i])); + o++; + i += 2; + } else + outs[o++] = inc; + } + outs[o++] = '\0'; + STR_TO_NAME(out, outs); + free(outs); + return out; +} + +/* + * Remove braces if the input value is enclosed by braces({}). + * Othewise decode the input value. + */ +static esNAME decode_or_remove_braces(const char *in) { + if (OPENING_BRACKET == in[0]) { + size_t inlen = strlen(in); + if (CLOSING_BRACKET == in[inlen - 1]) /* enclosed with braces */ + { + int i; + const char *istr, *eptr; + char *ostr; + esNAME out; + + INIT_NAME(out); + if (NULL == (ostr = (char *)malloc(inlen))) + return out; + eptr = in + inlen - 1; + for (istr = in + 1, i = 0; *istr && istr < eptr; i++) { + if (CLOSING_BRACKET == istr[0] && CLOSING_BRACKET == istr[1]) + istr++; + ostr[i] = *(istr++); + } + ostr[i] = '\0'; + SET_NAME_DIRECTLY(out, ostr); + return out; + } + } + return decode(in); +} + +void CC_conninfo_release(ConnInfo *conninfo) { + NULL_THE_NAME(conninfo->password); + finalize_globals(&conninfo->drivers); +} + +void CC_conninfo_init(ConnInfo *conninfo, UInt4 option) { + MYLOG(ES_TRACE, "entering opt=%d\n", option); + + if (0 != (CLEANUP_FOR_REUSE & option)) + CC_conninfo_release(conninfo); + memset(conninfo, 0, sizeof(ConnInfo)); + + strncpy(conninfo->dsn, DEFAULT_DSN, MEDIUM_REGISTRY_LEN); + strncpy(conninfo->desc, DEFAULT_DESC, MEDIUM_REGISTRY_LEN); + strncpy(conninfo->drivername, DEFAULT_DRIVERNAME, MEDIUM_REGISTRY_LEN); + strncpy(conninfo->server, DEFAULT_HOST, MEDIUM_REGISTRY_LEN); + strncpy(conninfo->port, DEFAULT_PORT, SMALL_REGISTRY_LEN); + strncpy(conninfo->response_timeout, DEFAULT_RESPONSE_TIMEOUT_STR, + SMALL_REGISTRY_LEN); + strncpy(conninfo->fetch_size, DEFAULT_FETCH_SIZE_STR, + SMALL_REGISTRY_LEN); + strncpy(conninfo->authtype, DEFAULT_AUTHTYPE, MEDIUM_REGISTRY_LEN); + if (conninfo->password.name != NULL) + free(conninfo->password.name); + conninfo->password.name = NULL; + strncpy(conninfo->username, DEFAULT_USERNAME, MEDIUM_REGISTRY_LEN); + strncpy(conninfo->region, DEFAULT_REGION, MEDIUM_REGISTRY_LEN); + conninfo->use_ssl = DEFAULT_USE_SSL; + conninfo->verify_server = DEFAULT_VERIFY_SERVER; + + if (0 != (INIT_GLOBALS & option)) + init_globals(&(conninfo->drivers)); +} + +void init_globals(GLOBAL_VALUES *glbv) { + memset(glbv, 0, sizeof(*glbv)); + glbv->loglevel = DEFAULT_LOGLEVEL; + glbv->output_dir[0] = '\0'; +} + +#define CORR_STRCPY(item) strncpy_null(to->item, from->item, sizeof(to->item)) +#define CORR_VALCPY(item) (to->item = from->item) + +void copy_globals(GLOBAL_VALUES *to, const GLOBAL_VALUES *from) { + memset(to, 0, sizeof(*to)); + NAME_TO_NAME(to->drivername, from->drivername); + CORR_VALCPY(loglevel); +} + +void finalize_globals(GLOBAL_VALUES *glbv) { + NULL_THE_NAME(glbv->drivername); +} + +#undef CORR_STRCPY +#undef CORR_VALCPY +#define CORR_STRCPY(item) strncpy_null(ci->item, sci->item, sizeof(ci->item)) +#define CORR_VALCPY(item) (ci->item = sci->item) + +void CC_copy_conninfo(ConnInfo *ci, const ConnInfo *sci) { + memset(ci, 0, sizeof(ConnInfo)); + CORR_STRCPY(dsn); + CORR_STRCPY(desc); + CORR_STRCPY(drivername); + CORR_STRCPY(server); + CORR_STRCPY(username); + CORR_STRCPY(authtype); + CORR_STRCPY(region); + NAME_TO_NAME(ci->password, sci->password); + CORR_VALCPY(use_ssl); + CORR_VALCPY(verify_server); + CORR_STRCPY(port); + CORR_STRCPY(response_timeout); + CORR_STRCPY(fetch_size); + copy_globals(&(ci->drivers), &(sci->drivers)); +} +#undef CORR_STRCPY +#undef CORR_VALCPY diff --git a/sql-odbc/src/odfesqlodbc/dlg_specific.h b/sql-odbc/src/odfesqlodbc/dlg_specific.h new file mode 100644 index 0000000000..02af631ae2 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/dlg_specific.h @@ -0,0 +1,214 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __DLG_SPECIFIC_H__ +#define __DLG_SPECIFIC_H__ + +#include "es_odbc.h" + +#ifdef WIN32 +#include + +#include "resource.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +/* Unknown data type sizes */ +#define UNKNOWNS_AS_MAX 0 +#define UNKNOWNS_AS_DONTKNOW 1 +#define UNKNOWNS_AS_LONGEST 2 + +/* ODBC initialization files */ +#ifndef WIN32 +#define ODBC_INI ".odbc.ini" +#define ODBCINST_INI "odbcinst.ini" +#else +#define ODBC_INI "ODBC.INI" +#define ODBCINST_INI "ODBCINST.INI" +#endif + +#define ODBC_DATASOURCES "ODBC Data Sources" +#define INVALID_DRIVER " @@driver not exist@@ " + +#ifdef UNICODE_SUPPORT +#define INI_DSN "Elasticsearch35W" +#else +#define INI_DSN "Elasticsearch30" +#endif /* UNICODE_SUPPORT */ + +#define INI_HOST "host" +#define INI_SERVER "server" +#define INI_PORT "port" +#define INI_USERNAME "user" +#define INI_USERNAME_ABBR "UID" +#define INI_PASSWORD "password" +#define INI_PASSWORD_ABBR "PWD" +#define INI_AUTH_MODE "auth" +#define INI_REGION "region" +#define INI_SSL_USE "useSSL" +#define INI_SSL_HOST_VERIFY "hostnameVerification" +#define INI_LOG_LEVEL "logLevel" +#define INI_LOG_OUTPUT "logOutput" +#define INI_TIMEOUT "responseTimeout" +#define INI_FETCH_SIZE "fetchSize" + +#define DEFAULT_FETCH_SIZE -1 +#define DEFAULT_FETCH_SIZE_STR "-1" +#define DEFAULT_RESPONSE_TIMEOUT 10 // Seconds +#define DEFAULT_RESPONSE_TIMEOUT_STR "10" +#define DEFAULT_AUTHTYPE "NONE" +#define DEFAULT_HOST "" +#define DEFAULT_PORT "" +#define DEFAULT_USERNAME "" +#define DEFAULT_PASSWORD "" +#define DEFAULT_DRIVERNAME "elasticsearchodbc" +#define DEFAULT_DESC "" +#define DEFAULT_DSN "" +#define DEFAULT_REGION "" +#define DEFAULT_VERIFY_SERVER 1 + +#define AUTHTYPE_NONE "NONE" +#define AUTHTYPE_BASIC "BASIC" +#define AUTHTYPE_IAM "AWS_SIGV4" + +#ifdef _HANDLE_ENLIST_IN_DTC_ +#define INI_XAOPT "XaOpt" +#endif /* _HANDLE_ENLIST_IN_DTC_ */ +/* Bit representation for abbreviated connection strings */ +#define BIT_LFCONVERSION (1L) +#define BIT_UPDATABLECURSORS (1L << 1) +/* #define BIT_DISALLOWPREMATURE (1L<<2) */ +#define BIT_UNIQUEINDEX (1L << 3) +#define BIT_UNKNOWN_DONTKNOW (1L << 6) +#define BIT_UNKNOWN_ASMAX (1L << 7) +#define BIT_COMMLOG (1L << 10) +#define BIT_DEBUG (1L << 11) +#define BIT_PARSE (1L << 12) +#define BIT_CANCELASFREESTMT (1L << 13) +#define BIT_USEDECLAREFETCH (1L << 14) +#define BIT_READONLY (1L << 15) +#define BIT_TEXTASLONGVARCHAR (1L << 16) +#define BIT_UNKNOWNSASLONGVARCHAR (1L << 17) +#define BIT_BOOLSASCHAR (1L << 18) +#define BIT_ROWVERSIONING (1L << 19) +#define BIT_SHOWSYSTEMTABLES (1L << 20) +#define BIT_SHOWOIDCOLUMN (1L << 21) +#define BIT_FAKEOIDINDEX (1L << 22) +#define BIT_TRUEISMINUS1 (1L << 23) +#define BIT_BYTEAASLONGVARBINARY (1L << 24) +#define BIT_USESERVERSIDEPREPARE (1L << 25) +#define BIT_LOWERCASEIDENTIFIER (1L << 26) + +#define EFFECTIVE_BIT_COUNT 28 + +/* Mask for extra options */ +#define BIT_FORCEABBREVCONNSTR 1L +#define BIT_FAKE_MSS (1L << 1) +#define BIT_BDE_ENVIRONMENT (1L << 2) +#define BIT_CVT_NULL_DATE (1L << 3) +#define BIT_ACCESSIBLE_ONLY (1L << 4) +#define BIT_IGNORE_ROUND_TRIP_TIME (1L << 5) +#define BIT_DISABLE_KEEPALIVE (1L << 6) + +/* Connection Defaults */ +#define DEFAULT_READONLY 1 +#define DEFAULT_PROTOCOL \ + "7.4" /* the latest protocol is \ \ + * the default */ +#define DEFAULT_USEDECLAREFETCH 0 +#define DEFAULT_TEXTASLONGVARCHAR 0 +#define DEFAULT_UNKNOWNSASLONGVARCHAR 0 +#define DEFAULT_BOOLSASCHAR 0 +#define DEFAULT_UNIQUEINDEX 1 /* dont recognize */ +#define DEFAULT_LOGLEVEL ES_WARNING +#define DEFAULT_USE_SSL 0 +#define DEFAULT_TRUST_SELF_SIGNED 0 +#define DEFAULT_AUTH_MODE "NONE" +#define DEFAULT_REGION "" +#define DEFAULT_CERTIFICATE "" +#define DEFAULT_KEY "" +#define DEFAULT_UNKNOWNSIZES UNKNOWNS_AS_MAX + +#define DEFAULT_FAKEOIDINDEX 0 +#define DEFAULT_SHOWOIDCOLUMN 0 +#define DEFAULT_ROWVERSIONING 0 +#define DEFAULT_SHOWSYSTEMTABLES 0 /* dont show system tables */ +#define DEFAULT_LIE 0 +#define DEFAULT_PARSE 0 + +#define DEFAULT_CANCELASFREESTMT 0 + +#define DEFAULT_EXTRASYSTABLEPREFIXES "" + +#define DEFAULT_TRUEISMINUS1 0 +#define DEFAULT_UPDATABLECURSORS 1 +#ifdef WIN32 +#define DEFAULT_LFCONVERSION 1 +#else +#define DEFAULT_LFCONVERSION 0 +#endif /* WIN32 */ +#define DEFAULT_INT8AS 0 +#define DEFAULT_BYTEAASLONGVARBINARY 0 +#define DEFAULT_USESERVERSIDEPREPARE 1 +#define DEFAULT_LOWERCASEIDENTIFIER 0 +#define DEFAULT_NUMERIC_AS (-101) + +#ifdef _HANDLE_ENLIST_IN_DTC_ +#define DEFAULT_XAOPT 1 +#endif /* _HANDLE_ENLIST_IN_DTC_ */ + +/* for CC_DSN_info */ +#define CONN_DONT_OVERWRITE 0 +#define CONN_OVERWRITE 1 + +struct authmode { + int authtype_id; + const char *authtype_str; +}; +const struct authmode *GetAuthModes(); + +/* prototypes */ + +#ifdef WIN32 +void SetDlgStuff(HWND hdlg, const ConnInfo *ci); +void GetDlgStuff(HWND hdlg, ConnInfo *ci); +INT_PTR CALLBACK advancedOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam); +INT_PTR CALLBACK logOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, LPARAM lParam); +#endif /* WIN32 */ + +int write_Ci_Drivers(const char *fileName, const char *sectionName, + const GLOBAL_VALUES *); +int writeDriversDefaults(const char *drivername, const GLOBAL_VALUES *); +void writeDSNinfo(const ConnInfo *ci); +void getDriversDefaults(const char *drivername, GLOBAL_VALUES *); +void getDSNinfo(ConnInfo *ci, const char *configDrvrname); +void makeConnectString(char *connect_string, const ConnInfo *ci, UWORD); +BOOL get_DSN_or_Driver(ConnInfo *ci, const char *attribute, const char *value); +BOOL copyConnAttributes(ConnInfo *ci, const char *attribute, const char *value); +int getDriverNameFromDSN(const char *dsn, char *driver_name, int namelen); +UInt4 getExtraOptions(const ConnInfo *); +void SetAuthenticationVisibility(HWND hdlg, const struct authmode *am); +const struct authmode *GetCurrentAuthMode(HWND hdlg); +int *GetLogLevels(); +int GetCurrentLogLevel(HWND hdlg); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __DLG_SPECIFIC_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/dlg_wingui.c b/sql-odbc/src/odfesqlodbc/dlg_wingui.c new file mode 100644 index 0000000000..62f76abe0a --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/dlg_wingui.c @@ -0,0 +1,268 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifdef WIN32 + +#include "dlg_specific.h" +#include "es_apifunc.h" +#include "loadlib.h" +#include "misc.h" // strncpy_null +#include "win_setup.h" +#ifdef _HANDLE_ENLIST_IN_DTC_ +#include "connexp.h" +#include "xalibname.h" +#endif /* _HANDLE_ENLIST_IN_DTC_ */ + +#define AUTHMODE_CNT 3 +#define LOGLEVEL_CNT 8 +extern HINSTANCE s_hModule; + +int loglevels[LOGLEVEL_CNT] = { + {IDS_LOGTYPE_OFF}, + {IDS_LOGTYPE_FATAL}, + {IDS_LOGTYPE_ERROR}, + {IDS_LOGTYPE_WARNING}, + {IDS_LOGTYPE_INFO}, + {IDS_LOGTYPE_DEBUG}, + {IDS_LOGTYPE_TRACE}, + {IDS_LOGTYPE_ALL}}; + +static const struct authmode authmodes[AUTHMODE_CNT] = { + {IDS_AUTHTYPE_NONE, AUTHTYPE_IAM}, + {IDS_AUTHTYPE_BASIC, AUTHTYPE_BASIC}, + {IDS_AUTHTYPE_IAM, AUTHTYPE_NONE}}; + +const struct authmode *GetCurrentAuthMode(HWND hdlg) { + unsigned int ams_cnt = 0; + const struct authmode *ams = GetAuthModes(&ams_cnt); + unsigned int authtype_selection_idx = (unsigned int)(DWORD)SendMessage( + GetDlgItem(hdlg, IDC_AUTHTYPE), CB_GETCURSEL, 0L, 0L); + if (authtype_selection_idx >= ams_cnt) + authtype_selection_idx = 0; + return &ams[authtype_selection_idx]; +} + +int *GetLogLevels(unsigned int *count) { + *count = LOGLEVEL_CNT; + return loglevels; +} + +int GetCurrentLogLevel(HWND hdlg) { + unsigned int log_cnt = 0; + int *log = GetLogLevels(&log_cnt); + unsigned int loglevel_selection_idx = (unsigned int)(DWORD)SendMessage( + GetDlgItem(hdlg, IDC_LOG_LEVEL), CB_GETCURSEL, 0L, 0L); + if (loglevel_selection_idx >= log_cnt) + loglevel_selection_idx = 0; + return log[loglevel_selection_idx]; +} + + +void SetAuthenticationVisibility(HWND hdlg, const struct authmode *am) { + if (strcmp(am->authtype_str, AUTHTYPE_BASIC) == 0) { + EnableWindow(GetDlgItem(hdlg, IDC_USER), TRUE); + EnableWindow(GetDlgItem(hdlg, IDC_PASSWORD), TRUE); + EnableWindow(GetDlgItem(hdlg, IDC_REGION), FALSE); + } else if (strcmp(am->authtype_str, AUTHTYPE_IAM) == 0) { + EnableWindow(GetDlgItem(hdlg, IDC_USER), FALSE); + EnableWindow(GetDlgItem(hdlg, IDC_PASSWORD), FALSE); + EnableWindow(GetDlgItem(hdlg, IDC_REGION), TRUE); + } else { + EnableWindow(GetDlgItem(hdlg, IDC_USER), FALSE); + EnableWindow(GetDlgItem(hdlg, IDC_PASSWORD), FALSE); + EnableWindow(GetDlgItem(hdlg, IDC_REGION), FALSE); + } +} + +void SetDlgStuff(HWND hdlg, const ConnInfo *ci) { + // Connection + SetDlgItemText(hdlg, IDC_DRIVER_VERSION, "V."ELASTICSEARCHDRIVERVERSION); + SetDlgItemText(hdlg, IDC_DSNAME, ci->dsn); + SetDlgItemText(hdlg, IDC_SERVER, ci->server); + SetDlgItemText(hdlg, IDC_PORT, ci->port); + + // Authentication + int authtype_selection_idx = 0; + unsigned int ams_cnt = 0; + const struct authmode *ams = GetAuthModes(&ams_cnt); + char buff[MEDIUM_REGISTRY_LEN + 1]; + for (unsigned int i = 0; i < ams_cnt; i++) { + LoadString(GetWindowInstance(hdlg), ams[i].authtype_id, buff, + MEDIUM_REGISTRY_LEN); + SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_ADDSTRING, 0, (WPARAM)buff); + if (!stricmp(ci->authtype, ams[i].authtype_str)) { + authtype_selection_idx = i; + } + } + SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, + ams[authtype_selection_idx].authtype_id, (WPARAM)0); + SetDlgItemText(hdlg, IDC_USER, ci->username); + SetDlgItemText(hdlg, IDC_PASSWORD, SAFE_NAME(ci->password)); + SetDlgItemText(hdlg, IDC_REGION, ci->region); +} + +static void GetNameField(HWND hdlg, int item, esNAME *name) { + char medium_buf[MEDIUM_REGISTRY_LEN + 1]; + GetDlgItemText(hdlg, item, medium_buf, sizeof(medium_buf)); + STR_TO_NAME((*name), medium_buf); +} + +void GetDlgStuff(HWND hdlg, ConnInfo *ci) { + // Connection + GetDlgItemText(hdlg, IDC_DESC, ci->desc, sizeof(ci->desc)); + GetDlgItemText(hdlg, IDC_SERVER, ci->server, sizeof(ci->server)); + GetDlgItemText(hdlg, IDC_PORT, ci->port, sizeof(ci->port)); + + // Authentication + GetDlgItemText(hdlg, IDC_USER, ci->username, sizeof(ci->username)); + GetNameField(hdlg, IDC_PASSWORD, &ci->password); + GetDlgItemText(hdlg, IDC_REGION, ci->region, sizeof(ci->region)); + const struct authmode *am = GetCurrentAuthMode(hdlg); + SetAuthenticationVisibility(hdlg, am); + STRCPY_FIXED(ci->authtype, am->authtype_str); + +} + +const struct authmode *GetAuthModes(unsigned int *count) { + *count = AUTHMODE_CNT; + return authmodes; +} +static void getDriversDefaultsOfCi(const ConnInfo *ci, GLOBAL_VALUES *glbv) { + const char *drivername = NULL; + + if (ci->drivername[0]) + drivername = ci->drivername; + else if (NAME_IS_VALID(ci->drivers.drivername)) + drivername = SAFE_NAME(ci->drivers.drivername); + if (drivername && drivername[0]) + getDriversDefaults(drivername, glbv); + else + getDriversDefaults(INVALID_DRIVER, glbv); +} + +INT_PTR CALLBACK advancedOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam) { + switch (wMsg) { + case WM_INITDIALOG: { + SetWindowLongPtr(hdlg, DWLP_USER, lParam); + ConnInfo *ci = (ConnInfo *)lParam; + CheckDlgButton(hdlg, IDC_USESSL, ci->use_ssl); + CheckDlgButton(hdlg, IDC_HOST_VER, ci->verify_server); + SetDlgItemText(hdlg, IDC_CONNTIMEOUT, ci->response_timeout); + SetDlgItemText(hdlg, IDC_FETCH_SIZE, ci->fetch_size); + break; + } + + case WM_COMMAND: { + ConnInfo *ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + switch (GET_WM_COMMAND_ID(wParam, lParam)) { + case IDOK: + // Get Dialog Values + ci->use_ssl = (IsDlgButtonChecked(hdlg, IDC_USESSL) ? 1 : 0); + ci->verify_server = (IsDlgButtonChecked(hdlg, IDC_HOST_VER) ? 1 : 0); + GetDlgItemText(hdlg, IDC_CONNTIMEOUT, ci->response_timeout, + sizeof(ci->response_timeout)); + GetDlgItemText(hdlg, IDC_FETCH_SIZE, ci->fetch_size, + sizeof(ci->fetch_size)); + case IDCANCEL: + EndDialog(hdlg, FALSE); + return TRUE; + } + } + } + return FALSE; +} + +INT_PTR CALLBACK logOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam) { + switch (wMsg) { + case WM_INITDIALOG: { + ConnInfo *ci = (ConnInfo *)lParam; + SetWindowLongPtr(hdlg, DWLP_USER, lParam); + + // Logging + int loglevel_selection_idx = 0; + unsigned int log_cnt = 0; + int *log = GetLogLevels(&log_cnt); + char buff[MEDIUM_REGISTRY_LEN + 1]; + for (unsigned int i = 0; i < log_cnt; i++) { + LoadString(GetWindowInstance(hdlg), log[i], buff, + MEDIUM_REGISTRY_LEN); + SendDlgItemMessage(hdlg, IDC_LOG_LEVEL, CB_ADDSTRING, 0, + (WPARAM)buff); + if ((unsigned int)ci->drivers.loglevel == i) { + loglevel_selection_idx = i; + } + } + SendDlgItemMessage(hdlg, IDC_LOG_LEVEL, CB_SETCURSEL, + loglevel_selection_idx, (WPARAM)0); + SetDlgItemText(hdlg, IDC_LOG_PATH, ci->drivers.output_dir); + break; + } + + case WM_COMMAND: { + ConnInfo *ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + switch (GET_WM_COMMAND_ID(wParam, lParam)) { + case IDOK: { + // Get Dialog Values + int log = GetCurrentLogLevel(hdlg); + switch (log) { + case IDS_LOGTYPE_OFF: + ci->drivers.loglevel = (char)ES_OFF; + break; + case IDS_LOGTYPE_FATAL: + ci->drivers.loglevel = (char)ES_FATAL; + break; + case IDS_LOGTYPE_ERROR: + ci->drivers.loglevel = (char)ES_ERROR; + break; + case IDS_LOGTYPE_WARNING: + ci->drivers.loglevel = (char)ES_WARNING; + break; + case IDS_LOGTYPE_INFO: + ci->drivers.loglevel = (char)ES_INFO; + break; + case IDS_LOGTYPE_DEBUG: + ci->drivers.loglevel = (char)ES_DEBUG; + break; + case IDS_LOGTYPE_TRACE: + ci->drivers.loglevel = (char)ES_TRACE; + break; + case IDS_LOGTYPE_ALL: + ci->drivers.loglevel = (char)ES_ALL; + break; + default: + ci->drivers.loglevel = (char)ES_OFF; + break; + } + setGlobalCommlog(ci->drivers.loglevel); + setGlobalDebug(ci->drivers.loglevel); + writeGlobalLogs(); + GetDlgItemText(hdlg, IDC_LOG_PATH, ci->drivers.output_dir, + sizeof(ci->drivers.output_dir)); + setLogDir(ci->drivers.output_dir); + } + + case IDCANCEL: + EndDialog(hdlg, FALSE); + return TRUE; + } + } + } + return FALSE; +} + +#endif /* WIN32 */ diff --git a/sql-odbc/src/odfesqlodbc/drvconn.c b/sql-odbc/src/odfesqlodbc/drvconn.c new file mode 100644 index 0000000000..b000845902 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/drvconn.c @@ -0,0 +1,336 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "drvconn.h" + +#include +#include + +#include "es_connection.h" +#include "es_odbc.h" +#include "misc.h" + +#ifndef WIN32 +#include +#include +#else +#include +#endif + +#include + +#ifdef WIN32 +#include + +#include "resource.h" +#include "win_setup.h" +#endif +#include "dlg_specific.h" +#include "es_apifunc.h" + +#ifdef WIN32 +INT_PTR CALLBACK dconn_FDriverConnectProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam); +extern HINSTANCE s_hModule; /* Saved module handle. */ +#endif + +char *hide_password(const char *str) { + char *outstr, *pwdp; + + if (!str) + return NULL; + outstr = strdup(str); + if (!outstr) + return NULL; + if (pwdp = strstr(outstr, "PWD="), !pwdp) + pwdp = strstr(outstr, "pwd="); + if (pwdp) { + char *p; + + for (p = pwdp + 4; *p && *p != ';'; p++) + *p = 'x'; + } + return outstr; +} + +int paramRequired(const ConnInfo *ci, int reqs) { + int required = 0; + const char *pw = SAFE_NAME(ci->password); + + /* Password is not necessarily a required parameter. */ + if ((reqs & PASSWORD_IS_REQUIRED) != 0) + if ('\0' == pw[0]) + required |= PASSWORD_IS_REQUIRED; + + return required; +} + +#ifdef WIN32 +RETCODE +dconn_DoDialog(HWND hwnd, ConnInfo *ci) { + INT_PTR dialog_result; + + MYLOG(ES_TRACE, "entering ci = %p\n", ci); + + if (hwnd) { + dialog_result = + DialogBoxParam(s_hModule, MAKEINTRESOURCE(DLG_CONFIG), hwnd, + dconn_FDriverConnectProc, (LPARAM)ci); + if (-1 == dialog_result) { + int errc = GetLastError(); + MYLOG(ES_DEBUG, " LastError=%d\n", errc); + } + if (!dialog_result || (dialog_result == -1)) + return SQL_NO_DATA_FOUND; + else + return SQL_SUCCESS; + } + + MYLOG(ES_DEBUG, " No window specified\n"); + return SQL_ERROR; +} + +INT_PTR CALLBACK dconn_FDriverConnectProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam) { + MYLOG(ES_DEBUG, "dconn_FDriverConnectProc\n"); + ConnInfo *ci; + + switch (wMsg) { + case WM_INITDIALOG: + ci = (ConnInfo *)lParam; + + /* Change the caption for the setup dialog */ + SetWindowText(hdlg, "Elasticsearch Connection"); + + /* Hide the DSN and description fields */ + ShowWindow(GetDlgItem(hdlg, IDC_DSNAMETEXT), SW_HIDE); + ShowWindow(GetDlgItem(hdlg, IDC_DSNAME), SW_HIDE); + + SetWindowLongPtr(hdlg, DWLP_USER, + lParam); /* Save the ConnInfo for the "OK" */ + SetDlgStuff(hdlg, ci); + + if (ci->server[0] == '\0') + SetFocus(GetDlgItem(hdlg, IDC_SERVER)); + else if (ci->port[0] == '\0') + SetFocus(GetDlgItem(hdlg, IDC_PORT)); + else if (ci->username[0] == '\0') + SetFocus(GetDlgItem(hdlg, IDC_USER)); + else if (ci->region[0] == '\0') + SetFocus(GetDlgItem(hdlg, IDC_REGION)); + + SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 2, (WPARAM)0); + + // Encryption + ci->use_ssl = (IsDlgButtonChecked(hdlg, IDC_USESSL) ? 1 : 0); + break; + + case WM_COMMAND: + switch (GET_WM_COMMAND_ID(wParam, lParam)) { + case IDOK: + ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + GetDlgStuff(hdlg, ci); + case IDCANCEL: + EndDialog(hdlg, GET_WM_COMMAND_ID(wParam, lParam) == IDOK); + return TRUE; + + case IDOK2: // <== TEST button + { + ConnInfo tmp_info; + ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + GetDlgStuff(hdlg, ci); + CC_copy_conninfo(&tmp_info, ci); + test_connection(hdlg, &tmp_info, FALSE); + CC_conninfo_release(&tmp_info); + break; + } + case ID_ADVANCED_OPTIONS: { + ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + DialogBoxParam(s_hModule, + MAKEINTRESOURCE(DLG_ADVANCED_OPTIONS), hdlg, + advancedOptionsProc, (LPARAM)ci); + break; + } + case ID_LOG_OPTIONS: { + ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + DialogBoxParam(s_hModule, MAKEINTRESOURCE(DLG_LOG_OPTIONS), + hdlg, logOptionsProc, (LPARAM)ci); + break; + } + case IDC_AUTHTYPE: { + ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); + const struct authmode *am = GetCurrentAuthMode(hdlg); + SetAuthenticationVisibility(hdlg, am); + break; + } + } + break; + case WM_CTLCOLORSTATIC: + if (lParam == (LPARAM)GetDlgItem(hdlg, IDC_NOTICE_USER)) { + HBRUSH hBrush = (HBRUSH)GetStockObject(WHITE_BRUSH); + SetTextColor((HDC)wParam, RGB(255, 0, 0)); + return (INT_PTR)hBrush; + } + break; + } + + return FALSE; +} +#endif /* WIN32 */ + +#define ATTRIBUTE_DELIMITER ';' +#define OPENING_BRACKET '{' +#define CLOSING_BRACKET '}' + +typedef BOOL (*copyfunc)(ConnInfo *, const char *attribute, const char *value); +BOOL dconn_get_attributes(copyfunc func, const char *connect_string, + ConnInfo *ci) { + BOOL ret = TRUE; + char *our_connect_string; + const char *pair, *attribute, *value, *termp; + BOOL eoftok; + char *equals, *delp; + char *strtok_arg; +#ifdef HAVE_STRTOK_R + char *last = NULL; +#endif /* HAVE_STRTOK_R */ + + if (our_connect_string = strdup(connect_string), NULL == our_connect_string) { + ret = FALSE; + goto cleanup; + } + strtok_arg = our_connect_string; + +#ifdef FORCE_PASSWORD_DISPLAY + MYLOG(ES_DEBUG, "our_connect_string = '%s'\n", our_connect_string); +#else + if (get_mylog()) { + char *hide_str = hide_password(our_connect_string); + + MYLOG(ES_DEBUG, "our_connect_string = '%s'\n", hide_str); + free(hide_str); + } +#endif /* FORCE_PASSWORD_DISPLAY */ + + termp = strchr(our_connect_string, '\0'); + eoftok = FALSE; + while (!eoftok) { + if (strtok_arg != NULL && strtok_arg >= termp) /* for safety */ + break; +#ifdef HAVE_STRTOK_R + pair = strtok_r(strtok_arg, ";", &last); +#else + pair = strtok(strtok_arg, ";"); +#endif /* HAVE_STRTOK_R */ + if (strtok_arg) + strtok_arg = NULL; + if (!pair) + break; + + equals = strchr(pair, '='); + if (!equals) + continue; + + *equals = '\0'; + attribute = pair; /* ex. DSN */ + value = equals + 1; /* ex. 'CEO co1' */ + /* + * Values enclosed with braces({}) can contain ; etc + * We don't remove the braces here because + * decode_or_remove_braces() in dlg_specifi.c + * would remove them later. + * Just correct the misdetected delimter(;). + */ + switch (*value) { + const char *valuen, *closep; + + case OPENING_BRACKET: + delp = strchr(value, '\0'); + if (delp >= termp) { + eoftok = TRUE; + break; + } + /* Where's a corresponding closing bracket? */ + closep = strchr(value, CLOSING_BRACKET); + if (NULL != closep && closep[1] == '\0') + break; + + for (valuen = value; valuen < termp; + closep = strchr(valuen, CLOSING_BRACKET)) { + if (NULL == closep) { + if (!delp) /* error */ + { + MYLOG(ES_DEBUG, + "closing bracket doesn't exist 1\n"); + ret = FALSE; + goto cleanup; + } + closep = strchr(delp + 1, CLOSING_BRACKET); + if (!closep) /* error */ + { + MYLOG(ES_DEBUG, + "closing bracket doesn't exist 2\n"); + ret = FALSE; + goto cleanup; + } + *delp = ATTRIBUTE_DELIMITER; /* restore delimiter */ + delp = NULL; + } + if (CLOSING_BRACKET == closep[1]) { + valuen = closep + 2; + if (valuen >= termp) + break; + else if (valuen == delp) { + *delp = ATTRIBUTE_DELIMITER; + delp = NULL; + } + continue; + } else if (ATTRIBUTE_DELIMITER == closep[1] + || '\0' == closep[1] || delp == closep + 1) { + delp = (char *)(closep + 1); + *delp = '\0'; + strtok_arg = delp + 1; + if (strtok_arg + 1 >= termp) + eoftok = TRUE; + break; + } + MYLOG(ES_DEBUG, + "subsequent char to the closing bracket is %c " + "value=%s\n", + closep[1], value); + ret = FALSE; + goto cleanup; + } + } + + /* Copy the appropriate value to the conninfo */ + (*func)(ci, attribute, value); + } + +cleanup: + free(our_connect_string); + + return ret; +} + +BOOL dconn_get_DSN_or_Driver(const char *connect_string, ConnInfo *ci) { + CC_conninfo_init(ci, INIT_GLOBALS); + return dconn_get_attributes(get_DSN_or_Driver, connect_string, ci); +} + +BOOL dconn_get_connect_attributes(const char *connect_string, ConnInfo *ci) { + return dconn_get_attributes(copyConnAttributes, connect_string, ci); +} diff --git a/sql-odbc/src/odfesqlodbc/drvconn.h b/sql-odbc/src/odfesqlodbc/drvconn.h new file mode 100644 index 0000000000..f85ebe6f59 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/drvconn.h @@ -0,0 +1,60 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef _DRVCONN_H_ +#define _DRVCONN_H_ + +#include +#include + +#include "es_connection.h" +#include "es_odbc.h" +#include "misc.h" + +#ifndef WIN32 +#include +#include +#else +#include +#endif + +#include + +#ifdef WIN32 +#include + +#include "resource.h" +#endif +#include "dlg_specific.h" +#include "es_apifunc.h" + +#define PASSWORD_IS_REQUIRED 1 + +#ifdef __cplusplus +extern "C" { +#endif +char *hide_password(const char *str); +BOOL dconn_get_connect_attributes(const char *connect_string, ConnInfo *ci); +BOOL dconn_get_DSN_or_Driver(const char *connect_string, ConnInfo *ci); +int paramRequired(const ConnInfo *ci, int reqs); +#ifdef WIN32 +RETCODE dconn_DoDialog(HWND hwnd, ConnInfo *ci); +#endif +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sql-odbc/src/odfesqlodbc/environ.c b/sql-odbc/src/odfesqlodbc/environ.c new file mode 100644 index 0000000000..f5e28df0d9 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/environ.c @@ -0,0 +1,563 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "environ.h" + +#include +#include + +#include "dlg_specific.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "misc.h" +#include "statement.h" +#ifdef WIN32 +#include +#endif /* WIN32 */ +#include "loadlib.h" + +/* The one instance of the handles */ +static int conns_count = 0; +static ConnectionClass **conns = NULL; + +void *conns_cs = NULL; +void *common_cs = NULL; +void *common_lcs = NULL; + +RETCODE SQL_API ESAPI_AllocEnv(HENV *phenv) { + CSTR func = "ESAPI_AllocEnv"; + SQLRETURN ret = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering\n"); + + /* + * For systems on which none of the constructor-making + * techniques in elasticodbc.c work: + * It's ok to call initialize_global_cs() twice. + */ + { initialize_global_cs(); } + + *phenv = (HENV)EN_Constructor(); + if (!*phenv) { + *phenv = SQL_NULL_HENV; + EN_log_error(func, "Error allocating environment", NULL); + ret = SQL_ERROR; + } + + MYLOG(ES_TRACE, "leaving phenv=%p\n", *phenv); + return ret; +} + +RETCODE SQL_API ESAPI_FreeEnv(HENV henv) { + CSTR func = "ESAPI_FreeEnv"; + SQLRETURN ret = SQL_SUCCESS; + EnvironmentClass *env = (EnvironmentClass *)henv; + + MYLOG(ES_TRACE, "entering env=%p\n", env); + + if (env && EN_Destructor(env)) { + MYLOG(ES_DEBUG, " ok\n"); + goto cleanup; + } + + ret = SQL_ERROR; + EN_log_error(func, "Error freeing environment", NULL); +cleanup: + return ret; +} + +#define SIZEOF_SQLSTATE 6 + +static void es_sqlstate_set(const EnvironmentClass *env, UCHAR *szSqlState, + const char *ver3str, const char *ver2str) { + strncpy_null((char *)szSqlState, EN_is_odbc3(env) ? ver3str : ver2str, + SIZEOF_SQLSTATE); +} + +ES_ErrorInfo *ER_Constructor(SDWORD errnumber, const char *msg) { + ES_ErrorInfo *error; + ssize_t aladd, errsize; + + if (DESC_OK == errnumber) + return NULL; + if (msg) { + errsize = strlen(msg); + aladd = errsize - sizeof(error->__error_message) + 1; + if (aladd < 0) + aladd = 0; + } else { + errsize = -1; + aladd = 0; + } + error = (ES_ErrorInfo *)malloc(sizeof(ES_ErrorInfo) + aladd); + if (error) { + memset(error, 0, sizeof(ES_ErrorInfo)); + error->status = errnumber; + error->errorsize = (Int2)errsize; + if (errsize > 0) + memcpy(error->__error_message, msg, errsize); + error->__error_message[errsize] = '\0'; + error->recsize = -1; + } + return error; +} + +void ER_Destructor(ES_ErrorInfo *self) { + free(self); +} + +ES_ErrorInfo *ER_Dup(const ES_ErrorInfo *self) { + ES_ErrorInfo *new; + Int4 alsize; + + if (!self) + return NULL; + alsize = sizeof(ES_ErrorInfo); + if (self->errorsize > 0) + alsize += self->errorsize; + new = (ES_ErrorInfo *)malloc(alsize); + if (new) + memcpy(new, self, alsize); + + return new; +} + +#define DRVMNGRDIV 511 +/* Returns the next SQL error information. */ +RETCODE SQL_API ER_ReturnError(ES_ErrorInfo *eserror, SQLSMALLINT RecNumber, + SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, + SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, + SQLSMALLINT *pcbErrorMsg, UWORD flag) { + /* CC: return an error of a hstmt */ + ES_ErrorInfo *error; + BOOL partial_ok = ((flag & PODBC_ALLOW_PARTIAL_EXTRACT) != 0); + const char *msg; + SWORD msglen, stapos, wrtlen, pcblen; + + if (!eserror) + return SQL_NO_DATA_FOUND; + error = eserror; + msg = error->__error_message; + MYLOG(ES_TRACE, "entering status = %d, msg = #%s#\n", error->status, msg); + msglen = (SQLSMALLINT)strlen(msg); + /* + * Even though an application specifies a larger error message + * buffer, the driver manager changes it silently. + * Therefore we divide the error message into ... + */ + if (error->recsize < 0) { + if (cbErrorMsgMax > 0) + error->recsize = cbErrorMsgMax - 1; /* apply the first request */ + else + error->recsize = DRVMNGRDIV; + } else if (1 == RecNumber && cbErrorMsgMax > 0) + error->recsize = cbErrorMsgMax - 1; + if (RecNumber < 0) { + if (0 == error->errorpos) + RecNumber = 1; + else + RecNumber = 2 + (error->errorpos - 1) / error->recsize; + } + stapos = (RecNumber - 1) * error->recsize; + if (stapos > msglen) + return SQL_NO_DATA_FOUND; + pcblen = wrtlen = msglen - stapos; + if (pcblen > error->recsize) + pcblen = error->recsize; + if (0 == cbErrorMsgMax) + wrtlen = 0; + else if (wrtlen >= cbErrorMsgMax) { + if (partial_ok) + wrtlen = cbErrorMsgMax - 1; + else if (cbErrorMsgMax <= error->recsize) + wrtlen = cbErrorMsgMax - 1; + else + wrtlen = error->recsize; + } + if (wrtlen > pcblen) + wrtlen = pcblen; + if (NULL != pcbErrorMsg) + *pcbErrorMsg = pcblen; + + if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) { + memcpy(szErrorMsg, msg + stapos, wrtlen); + szErrorMsg[wrtlen] = '\0'; + } + + if (NULL != pfNativeError) + *pfNativeError = error->status; + + if (NULL != szSqlState) + strncpy_null((char *)szSqlState, error->sqlstate, 6); + + MYLOG(ES_DEBUG, " szSqlState = '%s',len=%d, szError='%s'\n", + szSqlState, pcblen, szErrorMsg); + if (wrtlen < pcblen) + return SQL_SUCCESS_WITH_INFO; + else + return SQL_SUCCESS; +} + +RETCODE SQL_API ESAPI_ConnectError(HDBC hdbc, SQLSMALLINT RecNumber, + SQLCHAR *szSqlState, + SQLINTEGER *pfNativeError, + SQLCHAR *szErrorMsg, + SQLSMALLINT cbErrorMsgMax, + SQLSMALLINT *pcbErrorMsg, UWORD flag) { + UNUSED(flag); + ConnectionClass *conn = (ConnectionClass *)hdbc; + EnvironmentClass *env = (EnvironmentClass *)conn->henv; + char *msg; + int status; + BOOL once_again = FALSE; + ssize_t msglen; + + MYLOG(ES_ERROR, "entering hdbc=%p <%d>\n", hdbc, cbErrorMsgMax); + if (RecNumber != 1 && RecNumber != -1) + return SQL_NO_DATA_FOUND; + if (cbErrorMsgMax < 0) + return SQL_ERROR; + if (CONN_EXECUTING == conn->status || !CC_get_error(conn, &status, &msg) + || NULL == msg) { + MYLOG(ES_ERROR, "CC_Get_error returned nothing.\n"); + if (NULL != szSqlState) + strncpy_null((char *)szSqlState, "00000", SIZEOF_SQLSTATE); + if (NULL != pcbErrorMsg) + *pcbErrorMsg = 0; + if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) + szErrorMsg[0] = '\0'; + + return SQL_NO_DATA_FOUND; + } + MYLOG(ES_ERROR, "CC_get_error: status = %d, msg = #%s#\n", status, msg); + + msglen = strlen(msg); + if (NULL != pcbErrorMsg) { + *pcbErrorMsg = (SQLSMALLINT)msglen; + if (cbErrorMsgMax == 0) + once_again = TRUE; + else if (msglen >= cbErrorMsgMax) + *pcbErrorMsg = cbErrorMsgMax - 1; + } + if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) + strncpy_null((char *)szErrorMsg, msg, cbErrorMsgMax); + if (NULL != pfNativeError) + *pfNativeError = status; + + if (NULL != szSqlState) { + if (conn->sqlstate[0]) + strncpy_null((char *)szSqlState, conn->sqlstate, SIZEOF_SQLSTATE); + else + switch (status) { + case CONN_OPTION_VALUE_CHANGED: + es_sqlstate_set(env, szSqlState, "01S02", "01S02"); + break; + case CONN_TRUNCATED: + es_sqlstate_set(env, szSqlState, "01004", "01004"); + /* data truncated */ + break; + case CONN_INIREAD_ERROR: + es_sqlstate_set(env, szSqlState, "IM002", "IM002"); + /* data source not found */ + break; + case CONNECTION_SERVER_NOT_REACHED: + case CONN_OPENDB_ERROR: + es_sqlstate_set(env, szSqlState, "08001", "08001"); + /* unable to connect to data source */ + break; + case CONN_INVALID_AUTHENTICATION: + case CONN_AUTH_TYPE_UNSUPPORTED: + es_sqlstate_set(env, szSqlState, "28000", "28000"); + break; + case CONN_STMT_ALLOC_ERROR: + es_sqlstate_set(env, szSqlState, "HY001", "S1001"); + /* memory allocation failure */ + break; + case CONN_IN_USE: + es_sqlstate_set(env, szSqlState, "HY000", "S1000"); + /* general error */ + break; + case CONN_UNSUPPORTED_OPTION: + es_sqlstate_set(env, szSqlState, "HYC00", "IM001"); + /* driver does not support this function */ + break; + case CONN_INVALID_ARGUMENT_NO: + es_sqlstate_set(env, szSqlState, "HY009", "S1009"); + /* invalid argument value */ + break; + case CONN_TRANSACT_IN_PROGRES: + es_sqlstate_set(env, szSqlState, "HY011", "S1011"); + break; + case CONN_NO_MEMORY_ERROR: + es_sqlstate_set(env, szSqlState, "HY001", "S1001"); + break; + case CONN_NOT_IMPLEMENTED_ERROR: + es_sqlstate_set(env, szSqlState, "HYC00", "S1C00"); + break; + case CONN_ILLEGAL_TRANSACT_STATE: + es_sqlstate_set(env, szSqlState, "25000", "S1010"); + break; + case CONN_VALUE_OUT_OF_RANGE: + es_sqlstate_set(env, szSqlState, "HY019", "22003"); + break; + case CONNECTION_COULD_NOT_SEND: + case CONNECTION_COULD_NOT_RECEIVE: + case CONNECTION_COMMUNICATION_ERROR: + case CONNECTION_NO_RESPONSE: + es_sqlstate_set(env, szSqlState, "08S01", "08S01"); + break; + default: + es_sqlstate_set(env, szSqlState, "HY000", "S1000"); + /* general error */ + break; + } + } + + MYLOG(ES_DEBUG, + " szSqlState = '%s',len=" FORMAT_SSIZE_T ", szError='%s'\n", + szSqlState ? (char *)szSqlState : PRINT_NULL, msglen, + szErrorMsg ? (char *)szErrorMsg : PRINT_NULL); + if (once_again) { + CC_set_errornumber(conn, status); + return SQL_SUCCESS_WITH_INFO; + } else + return SQL_SUCCESS; +} + +RETCODE SQL_API ESAPI_EnvError(HENV henv, SQLSMALLINT RecNumber, + SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, + SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, + SQLSMALLINT *pcbErrorMsg, UWORD flag) { + UNUSED(flag); + EnvironmentClass *env = (EnvironmentClass *)henv; + char *msg = NULL; + int status; + + MYLOG(ES_ERROR, "entering henv=%p <%d>\n", henv, cbErrorMsgMax); + if (RecNumber != 1 && RecNumber != -1) + return SQL_NO_DATA_FOUND; + if (cbErrorMsgMax < 0) + return SQL_ERROR; + if (!EN_get_error(env, &status, &msg) || NULL == msg) { + MYLOG(ES_ERROR, "EN_get_error: msg = #%s#\n", msg); + + if (NULL != szSqlState) + es_sqlstate_set(env, szSqlState, "00000", "00000"); + if (NULL != pcbErrorMsg) + *pcbErrorMsg = 0; + if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) + szErrorMsg[0] = '\0'; + + return SQL_NO_DATA_FOUND; + } + MYLOG(ES_ERROR, "EN_get_error: status = %d, msg = #%s#\n", status, msg); + + if (NULL != pcbErrorMsg) + *pcbErrorMsg = (SQLSMALLINT)strlen(msg); + if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) + strncpy_null((char *)szErrorMsg, msg, cbErrorMsgMax); + if (NULL != pfNativeError) + *pfNativeError = status; + + if (szSqlState) { + switch (status) { + case ENV_ALLOC_ERROR: + /* memory allocation failure */ + es_sqlstate_set(env, szSqlState, "HY001", "S1001"); + break; + default: + es_sqlstate_set(env, szSqlState, "HY000", "S1000"); + /* general error */ + break; + } + } + + return SQL_SUCCESS; +} + +/* + * EnvironmentClass implementation + */ +EnvironmentClass *EN_Constructor(void) { + EnvironmentClass *rv = NULL; +#ifdef WIN32 + WORD wVersionRequested; + WSADATA wsaData; + const int major = 2, minor = 2; + + /* Load the WinSock Library */ + wVersionRequested = MAKEWORD(major, minor); + + if (WSAStartup(wVersionRequested, &wsaData)) { + MYLOG(ES_ERROR, " WSAStartup error\n"); + return rv; + } + /* Verify that this is the minimum version of WinSock */ + if (LOBYTE(wsaData.wVersion) >= 1 + && (LOBYTE(wsaData.wVersion) >= 2 || HIBYTE(wsaData.wVersion) >= 1)) + ; + else { + MYLOG(ES_DEBUG, " WSAStartup version=(%d,%d)\n", + LOBYTE(wsaData.wVersion), HIBYTE(wsaData.wVersion)); + goto cleanup; + } +#endif /* WIN32 */ + + rv = (EnvironmentClass *)malloc(sizeof(EnvironmentClass)); + if (NULL == rv) { + MYLOG(ES_ERROR, " malloc error\n"); + goto cleanup; + } + rv->errormsg = 0; + rv->errornumber = 0; + rv->flag = 0; + INIT_ENV_CS(rv); +cleanup: +#ifdef WIN32 + if (NULL == rv) { + WSACleanup(); + } +#endif /* WIN32 */ + + return rv; +} + +char EN_Destructor(EnvironmentClass *self) { + int lf, nullcnt; + char rv = 1; + + MYLOG(ES_TRACE, "entering self=%p\n", self); + if (!self) + return 0; + + /* + * the error messages are static strings distributed throughout the + * source--they should not be freed + */ + + /* Free any connections belonging to this environment */ + ENTER_CONNS_CS; + for (lf = 0, nullcnt = 0; lf < conns_count; lf++) { + if (NULL == conns[lf]) + nullcnt++; + else if (conns[lf]->henv == self) { + if (CC_Destructor(conns[lf])) + conns[lf] = NULL; + else + rv = 0; + nullcnt++; + } + } + if (conns && nullcnt >= conns_count) { + MYLOG(ES_DEBUG, "clearing conns count=%d\n", conns_count); + free(conns); + conns = NULL; + conns_count = 0; + } + LEAVE_CONNS_CS; + DELETE_ENV_CS(self); + free(self); + +#ifdef WIN32 + WSACleanup(); +#endif + MYLOG(ES_TRACE, "leaving rv=%d\n", rv); +#ifdef _MEMORY_DEBUG_ + debug_memory_check(); +#endif /* _MEMORY_DEBUG_ */ + return rv; +} + +char EN_get_error(EnvironmentClass *self, int *number, char **message) { + if (self && self->errormsg && self->errornumber) { + *message = self->errormsg; + *number = self->errornumber; + self->errormsg = 0; + self->errornumber = 0; + return 1; + } else + return 0; +} + +#define INIT_CONN_COUNT 128 + +char EN_add_connection(EnvironmentClass *self, ConnectionClass *conn) { + int i, alloc; + ConnectionClass **newa; + char ret = FALSE; + + MYLOG(ES_TRACE, "entering self = %p, conn = %p\n", self, conn); + + ENTER_CONNS_CS; + for (i = 0; i < conns_count; i++) { + if (!conns[i]) { + conn->henv = self; + conns[i] = conn; + ret = TRUE; + MYLOG( + 0, + " added at i=%d, conn->henv = %p, conns[i]->henv = %p\n", + i, conn->henv, conns[i]->henv); + goto cleanup; + } + } + if (conns_count > 0) + alloc = 2 * conns_count; + else + alloc = INIT_CONN_COUNT; + if (newa = (ConnectionClass **)realloc(conns, + alloc * sizeof(ConnectionClass *)), + NULL == newa) + goto cleanup; + conn->henv = self; + newa[conns_count] = conn; + conns = newa; + ret = TRUE; + MYLOG(ES_DEBUG, + " added at %d, conn->henv = %p, conns[%d]->henv = %p\n", + conns_count, conn->henv, conns_count, conns[conns_count]->henv); + for (i = conns_count + 1; i < alloc; i++) + conns[i] = NULL; + conns_count = alloc; +cleanup: + LEAVE_CONNS_CS; + return ret; +} + +char EN_remove_connection(EnvironmentClass *self, ConnectionClass *conn) { + UNUSED(self); + int i; + + for (i = 0; i < conns_count; i++) + if (conns[i] == conn && conns[i]->status != CONN_EXECUTING) { + ENTER_CONNS_CS; + conns[i] = NULL; + LEAVE_CONNS_CS; + return TRUE; + } + + return FALSE; +} + +void EN_log_error(const char *func, char *desc, EnvironmentClass *self) { + if (self) + MYLOG(ES_ERROR, + "ENVIRON ERROR: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", + func, desc, self->errornumber, self->errormsg); + else + MYLOG(ES_ERROR, "INVALID ENVIRON HANDLE ERROR: func=%s, desc='%s'\n", + func, desc); +} diff --git a/sql-odbc/src/odfesqlodbc/environ.h b/sql-odbc/src/odfesqlodbc/environ.h new file mode 100644 index 0000000000..c4dc26a53d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/environ.h @@ -0,0 +1,72 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ENVIRON_H__ +#define __ENVIRON_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "es_helper.h" +#include "es_odbc.h" + +#define ENV_ALLOC_ERROR 1 + +/********** Environment Handle *************/ +struct EnvironmentClass_ { + char *errormsg; + int errornumber; + Int4 flag; + void *cs; +}; + +/* Environment prototypes */ +EnvironmentClass *EN_Constructor(void); +char EN_Destructor(EnvironmentClass *self); +char EN_get_error(EnvironmentClass *self, int *number, char **message); +char EN_add_connection(EnvironmentClass *self, ConnectionClass *conn); +char EN_remove_connection(EnvironmentClass *self, ConnectionClass *conn); +void EN_log_error(const char *func, char *desc, EnvironmentClass *self); + +#define EN_OV_ODBC2 1L +#define EN_CONN_POOLING (1L << 1) +#define EN_is_odbc2(env) ((env->flag & EN_OV_ODBC2) != 0) +#define EN_is_odbc3(env) (env && (env->flag & EN_OV_ODBC2) == 0) +#define EN_set_odbc2(env) (env->flag |= EN_OV_ODBC2) +#define EN_set_odbc3(env) (env->flag &= ~EN_OV_ODBC2) +#define EN_is_pooling(env) (env && (env->flag & EN_CONN_POOLING) != 0) +#define EN_set_pooling(env) (env->flag |= EN_CONN_POOLING) +#define EN_unset_pooling(env) (env->flag &= ~EN_CONN_POOLING) + +/* For Multi-thread */ +#define INIT_CONNS_CS XPlatformInitializeCriticalSection(&conns_cs) +#define ENTER_CONNS_CS XPlatformEnterCriticalSection(conns_cs) +#define LEAVE_CONNS_CS XPlatformLeaveCriticalSection(conns_cs) +#define DELETE_CONNS_CS XPlatformDeleteCriticalSection(&conns_cs) +#define INIT_ENV_CS(x) XPlatformInitializeCriticalSection(&((x)->cs)) +#define ENTER_ENV_CS(x) XPlatformEnterCriticalSection(((x)->cs)) +#define LEAVE_ENV_CS(x) XPlatformLeaveCriticalSection(((x)->cs)) +#define DELETE_ENV_CS(x) XPlatformDeleteCriticalSection(&((x)->cs)) +#define INIT_COMMON_CS XPlatformInitializeCriticalSection(&common_cs) +#define ENTER_COMMON_CS XPlatformEnterCriticalSection(common_cs) +#define LEAVE_COMMON_CS XPlatformLeaveCriticalSection(common_cs) +#define DELETE_COMMON_CS XPlatformDeleteCriticalSection(&common_cs) + +#ifdef __cplusplus +} +#endif +#endif /* __ENVIRON_H_ */ diff --git a/sql-odbc/src/odfesqlodbc/es_api30.c b/sql-odbc/src/odfesqlodbc/es_api30.c new file mode 100644 index 0000000000..cd2571506d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_api30.c @@ -0,0 +1,1858 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include + +#include "descriptor.h" +#include "dlg_specific.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_odbc.h" +#include "loadlib.h" +#include "misc.h" +#include "qresult.h" +#include "statement.h" + +/* SQLError -> SQLDiagRec */ +RETCODE SQL_API ESAPI_GetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLCHAR *MessageText, + SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering type=%d rec=%d\n", HandleType, RecNumber); + switch (HandleType) { + case SQL_HANDLE_ENV: + ret = ESAPI_EnvError(Handle, RecNumber, Sqlstate, NativeError, + MessageText, BufferLength, TextLength, 0); + break; + case SQL_HANDLE_DBC: + ret = ESAPI_ConnectError(Handle, RecNumber, Sqlstate, NativeError, + MessageText, BufferLength, TextLength, 0); + break; + case SQL_HANDLE_STMT: + ret = ESAPI_StmtError(Handle, RecNumber, Sqlstate, NativeError, + MessageText, BufferLength, TextLength, 0); + break; + case SQL_HANDLE_DESC: + ret = ESAPI_DescError(Handle, RecNumber, Sqlstate, NativeError, + MessageText, BufferLength, TextLength, 0); + break; + default: + ret = SQL_ERROR; + } + MYLOG(ES_TRACE, "leaving %d\n", ret); + return ret; +} + +/* + * Minimal implementation. + * + */ +RETCODE SQL_API ESAPI_GetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, + SQLSMALLINT DiagIdentifier, PTR DiagInfoPtr, + SQLSMALLINT BufferLength, + SQLSMALLINT *StringLengthPtr) { + RETCODE ret = SQL_ERROR, rtn; + ConnectionClass *conn; + StatementClass *stmt; + SQLLEN rc; + SQLSMALLINT pcbErrm; + ssize_t rtnlen = -1; + int rtnctype = SQL_C_CHAR; + + MYLOG(ES_TRACE, "entering rec=%d\n", RecNumber); + switch (HandleType) { + case SQL_HANDLE_ENV: + switch (DiagIdentifier) { + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_SUBCLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + case SQL_DIAG_SERVER_NAME: + rtnlen = 0; + if (DiagInfoPtr && BufferLength > rtnlen) { + ret = SQL_SUCCESS; + *((char *)DiagInfoPtr) = '\0'; + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_MESSAGE_TEXT: + ret = ESAPI_EnvError(Handle, RecNumber, NULL, NULL, + DiagInfoPtr, BufferLength, + StringLengthPtr, 0); + break; + case SQL_DIAG_NATIVE: + rtnctype = SQL_C_LONG; + ret = ESAPI_EnvError(Handle, RecNumber, NULL, + (SQLINTEGER *)DiagInfoPtr, NULL, 0, + NULL, 0); + break; + case SQL_DIAG_NUMBER: + rtnctype = SQL_C_LONG; + ret = ESAPI_EnvError(Handle, RecNumber, NULL, NULL, NULL, 0, + NULL, 0); + if (SQL_SUCCEEDED(ret)) { + *((SQLINTEGER *)DiagInfoPtr) = 1; + } + break; + case SQL_DIAG_SQLSTATE: + rtnlen = 5; + ret = ESAPI_EnvError(Handle, RecNumber, DiagInfoPtr, NULL, + NULL, 0, NULL, 0); + if (SQL_SUCCESS_WITH_INFO == ret) + ret = SQL_SUCCESS; + break; + case SQL_DIAG_RETURNCODE: /* driver manager returns */ + break; + case SQL_DIAG_CURSOR_ROW_COUNT: + case SQL_DIAG_ROW_COUNT: + case SQL_DIAG_DYNAMIC_FUNCTION: + case SQL_DIAG_DYNAMIC_FUNCTION_CODE: + /* options for statement type only */ + break; + } + break; + case SQL_HANDLE_DBC: + conn = (ConnectionClass *)Handle; + switch (DiagIdentifier) { + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_SUBCLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + rtnlen = 0; + if (DiagInfoPtr && BufferLength > rtnlen) { + ret = SQL_SUCCESS; + *((char *)DiagInfoPtr) = '\0'; + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_SERVER_NAME: + rtnlen = strlen(CC_get_DSN(conn)); + if (DiagInfoPtr) { + strncpy_null(DiagInfoPtr, CC_get_DSN(conn), + BufferLength); + ret = (BufferLength > rtnlen ? SQL_SUCCESS + : SQL_SUCCESS_WITH_INFO); + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_MESSAGE_TEXT: + ret = ESAPI_ConnectError(Handle, RecNumber, NULL, NULL, + DiagInfoPtr, BufferLength, + StringLengthPtr, 0); + break; + case SQL_DIAG_NATIVE: + rtnctype = SQL_C_LONG; + ret = ESAPI_ConnectError(Handle, RecNumber, NULL, + (SQLINTEGER *)DiagInfoPtr, NULL, 0, + NULL, 0); + break; + case SQL_DIAG_NUMBER: + rtnctype = SQL_C_LONG; + ret = ESAPI_ConnectError(Handle, RecNumber, NULL, NULL, + NULL, 0, NULL, 0); + if (SQL_SUCCEEDED(ret)) { + *((SQLINTEGER *)DiagInfoPtr) = 1; + } + break; + case SQL_DIAG_SQLSTATE: + rtnlen = 5; + ret = ESAPI_ConnectError(Handle, RecNumber, DiagInfoPtr, + NULL, NULL, 0, NULL, 0); + if (SQL_SUCCESS_WITH_INFO == ret) + ret = SQL_SUCCESS; + break; + case SQL_DIAG_RETURNCODE: /* driver manager returns */ + break; + case SQL_DIAG_CURSOR_ROW_COUNT: + case SQL_DIAG_ROW_COUNT: + case SQL_DIAG_DYNAMIC_FUNCTION: + case SQL_DIAG_DYNAMIC_FUNCTION_CODE: + /* options for statement type only */ + break; + } + break; + case SQL_HANDLE_STMT: + conn = (ConnectionClass *)SC_get_conn(((StatementClass *)Handle)); + switch (DiagIdentifier) { + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_SUBCLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + rtnlen = 0; + if (DiagInfoPtr && BufferLength > rtnlen) { + ret = SQL_SUCCESS; + *((char *)DiagInfoPtr) = '\0'; + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_SERVER_NAME: + rtnlen = strlen(CC_get_DSN(conn)); + if (DiagInfoPtr) { + strncpy_null(DiagInfoPtr, CC_get_DSN(conn), + BufferLength); + ret = (BufferLength > rtnlen ? SQL_SUCCESS + : SQL_SUCCESS_WITH_INFO); + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_MESSAGE_TEXT: + ret = ESAPI_StmtError(Handle, RecNumber, NULL, NULL, + DiagInfoPtr, BufferLength, + StringLengthPtr, 0); + break; + case SQL_DIAG_NATIVE: + rtnctype = SQL_C_LONG; + ret = ESAPI_StmtError(Handle, RecNumber, NULL, + (SQLINTEGER *)DiagInfoPtr, NULL, 0, + NULL, 0); + break; + case SQL_DIAG_NUMBER: + rtnctype = SQL_C_LONG; + *((SQLINTEGER *)DiagInfoPtr) = 0; + ret = SQL_NO_DATA_FOUND; + stmt = (StatementClass *)Handle; + rtn = ESAPI_StmtError(Handle, -1, NULL, NULL, NULL, 0, + &pcbErrm, 0); + switch (rtn) { + case SQL_SUCCESS: + case SQL_SUCCESS_WITH_INFO: + ret = SQL_SUCCESS; + if (pcbErrm > 0 && stmt->eserror) + + *((SQLINTEGER *)DiagInfoPtr) = + (pcbErrm - 1) / stmt->eserror->recsize + 1; + break; + default: + break; + } + break; + case SQL_DIAG_SQLSTATE: + rtnlen = 5; + ret = ESAPI_StmtError(Handle, RecNumber, DiagInfoPtr, NULL, + NULL, 0, NULL, 0); + if (SQL_SUCCESS_WITH_INFO == ret) + ret = SQL_SUCCESS; + break; + case SQL_DIAG_CURSOR_ROW_COUNT: + rtnctype = SQL_C_LONG; + stmt = (StatementClass *)Handle; + rc = -1; + if (stmt->status == STMT_FINISHED) { + QResultClass *res = SC_get_Curres(stmt); + + /*if (!res) + return SQL_ERROR;*/ + if (stmt->proc_return > 0) + rc = 0; + else if (res && QR_NumResultCols(res) > 0 + && !SC_is_fetchcursor(stmt)) + rc = QR_get_num_total_tuples(res) - res->dl_count; + } + *((SQLLEN *)DiagInfoPtr) = rc; + MYLOG(ES_ALL, "rc=" FORMAT_LEN "\n", rc); + ret = SQL_SUCCESS; + break; + case SQL_DIAG_ROW_COUNT: + rtnctype = SQL_C_LONG; + stmt = (StatementClass *)Handle; + *((SQLLEN *)DiagInfoPtr) = stmt->diag_row_count; + ret = SQL_SUCCESS; + break; + case SQL_DIAG_ROW_NUMBER: + rtnctype = SQL_C_LONG; + *((SQLLEN *)DiagInfoPtr) = SQL_ROW_NUMBER_UNKNOWN; + ret = SQL_SUCCESS; + break; + case SQL_DIAG_COLUMN_NUMBER: + rtnctype = SQL_C_LONG; + *((SQLINTEGER *)DiagInfoPtr) = SQL_COLUMN_NUMBER_UNKNOWN; + ret = SQL_SUCCESS; + break; + case SQL_DIAG_RETURNCODE: /* driver manager returns */ + break; + } + break; + case SQL_HANDLE_DESC: + conn = DC_get_conn(((DescriptorClass *)Handle)); + switch (DiagIdentifier) { + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_SUBCLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + rtnlen = 0; + if (DiagInfoPtr && BufferLength > rtnlen) { + ret = SQL_SUCCESS; + *((char *)DiagInfoPtr) = '\0'; + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_SERVER_NAME: + rtnlen = strlen(CC_get_DSN(conn)); + if (DiagInfoPtr) { + strncpy_null(DiagInfoPtr, CC_get_DSN(conn), + BufferLength); + ret = (BufferLength > rtnlen ? SQL_SUCCESS + : SQL_SUCCESS_WITH_INFO); + } else + ret = SQL_SUCCESS_WITH_INFO; + break; + case SQL_DIAG_MESSAGE_TEXT: + case SQL_DIAG_NATIVE: + case SQL_DIAG_NUMBER: + break; + case SQL_DIAG_SQLSTATE: + rtnlen = 5; + ret = ESAPI_DescError(Handle, RecNumber, DiagInfoPtr, NULL, + NULL, 0, NULL, 0); + if (SQL_SUCCESS_WITH_INFO == ret) + ret = SQL_SUCCESS; + break; + case SQL_DIAG_RETURNCODE: /* driver manager returns */ + break; + case SQL_DIAG_CURSOR_ROW_COUNT: + case SQL_DIAG_ROW_COUNT: + case SQL_DIAG_DYNAMIC_FUNCTION: + case SQL_DIAG_DYNAMIC_FUNCTION_CODE: + rtnctype = SQL_C_LONG; + /* options for statement type only */ + break; + } + break; + default: + ret = SQL_ERROR; + } + if (SQL_C_LONG == rtnctype) { + if (SQL_SUCCESS_WITH_INFO == ret) + ret = SQL_SUCCESS; + if (StringLengthPtr) + *StringLengthPtr = sizeof(SQLINTEGER); + } else if (rtnlen >= 0) { + if (rtnlen >= BufferLength) { + if (SQL_SUCCESS == ret) + ret = SQL_SUCCESS_WITH_INFO; + if (BufferLength > 0) + ((char *)DiagInfoPtr)[BufferLength - 1] = '\0'; + } + if (StringLengthPtr) + *StringLengthPtr = (SQLSMALLINT)rtnlen; + } + MYLOG(ES_TRACE, "leaving %d\n", ret); + return ret; +} + +/* SQLGetConnectOption -> SQLGetconnectAttr */ +RETCODE SQL_API ESAPI_GetConnectAttr(HDBC ConnectionHandle, + SQLINTEGER Attribute, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + RETCODE ret = SQL_SUCCESS; + SQLINTEGER len = 4; + + MYLOG(ES_TRACE, "entering " FORMAT_INTEGER "\n", Attribute); + switch (Attribute) { + case SQL_ATTR_ASYNC_ENABLE: + *((SQLINTEGER *)Value) = SQL_ASYNC_ENABLE_OFF; + break; + case SQL_ATTR_AUTO_IPD: + *((SQLINTEGER *)Value) = SQL_FALSE; + break; + case SQL_ATTR_CONNECTION_DEAD: + *((SQLUINTEGER *)Value) = CC_not_connected(conn); + break; + case SQL_ATTR_CONNECTION_TIMEOUT: + *((SQLUINTEGER *)Value) = 0; + break; + case SQL_ATTR_METADATA_ID: + *((SQLUINTEGER *)Value) = conn->stmtOptions.metadata_id; + break; + case SQL_ATTR_ESOPT_DEBUG: + *((SQLINTEGER *)Value) = conn->connInfo.drivers.loglevel; + break; + case SQL_ATTR_ESOPT_COMMLOG: + *((SQLINTEGER *)Value) = conn->connInfo.drivers.loglevel; + break; + default: + ret = ESAPI_GetConnectOption(ConnectionHandle, (UWORD)Attribute, + Value, &len, BufferLength); + } + if (StringLength) + *StringLength = len; + return ret; +} + +static SQLHDESC descHandleFromStatementHandle(HSTMT StatementHandle, + SQLINTEGER descType) { + StatementClass *stmt = (StatementClass *)StatementHandle; + + switch (descType) { + case SQL_ATTR_APP_ROW_DESC: /* 10010 */ + return (HSTMT)stmt->ard; + case SQL_ATTR_APP_PARAM_DESC: /* 10011 */ + return (HSTMT)stmt->apd; + case SQL_ATTR_IMP_ROW_DESC: /* 10012 */ + return (HSTMT)stmt->ird; + case SQL_ATTR_IMP_PARAM_DESC: /* 10013 */ + return (HSTMT)stmt->ipd; + } + return (HSTMT)0; +} + +static void column_bindings_set(ARDFields *opts, SQLSMALLINT cols, + BOOL maxset) { + int i; + + if (cols == opts->allocated) + return; + if (cols > opts->allocated) { + extend_column_bindings(opts, cols); + return; + } + if (maxset) + return; + + for (i = opts->allocated; i > cols; i--) + reset_a_column_binding(opts, i); + opts->allocated = cols; + if (0 == cols) { + free(opts->bindings); + opts->bindings = NULL; + } +} + +static RETCODE SQL_API ARDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + UNUSED(BufferLength); + RETCODE ret = SQL_SUCCESS; + ARDFields *opts = &(desc->ardf); + SQLSMALLINT row_idx; + BOOL unbind = TRUE; + + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_SIZE: + opts->size_of_rowset = CAST_UPTR(SQLULEN, Value); + return ret; + case SQL_DESC_ARRAY_STATUS_PTR: + opts->row_operation_ptr = Value; + return ret; + case SQL_DESC_BIND_OFFSET_PTR: + opts->row_offset_ptr = Value; + return ret; + case SQL_DESC_BIND_TYPE: + opts->bind_size = CAST_UPTR(SQLUINTEGER, Value); + return ret; + case SQL_DESC_COUNT: + column_bindings_set(opts, CAST_PTR(SQLSMALLINT, Value), FALSE); + return ret; + + case SQL_DESC_TYPE: + case SQL_DESC_DATETIME_INTERVAL_CODE: + case SQL_DESC_CONCISE_TYPE: + column_bindings_set(opts, RecNumber, TRUE); + break; + } + if (RecNumber < 0 || RecNumber > opts->allocated) { + DC_set_error(desc, DESC_INVALID_COLUMN_NUMBER_ERROR, + "invalid column number"); + return SQL_ERROR; + } + if (0 == RecNumber) /* bookmark column */ + { + BindInfoClass *bookmark = ARD_AllocBookmark(opts); + + switch (FieldIdentifier) { + case SQL_DESC_DATA_PTR: + bookmark->buffer = Value; + break; + case SQL_DESC_INDICATOR_PTR: + bookmark->indicator = Value; + break; + case SQL_DESC_OCTET_LENGTH_PTR: + bookmark->used = Value; + break; + default: + DC_set_error(desc, DESC_INVALID_COLUMN_NUMBER_ERROR, + "invalid column number"); + ret = SQL_ERROR; + } + return ret; + } + row_idx = RecNumber - 1; + switch (FieldIdentifier) { + case SQL_DESC_TYPE: + opts->bindings[row_idx].returntype = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_DATETIME_INTERVAL_CODE: + switch (opts->bindings[row_idx].returntype) { + case SQL_DATETIME: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + switch ((LONG_PTR)Value) { + case SQL_CODE_DATE: + opts->bindings[row_idx].returntype = + SQL_C_TYPE_DATE; + break; + case SQL_CODE_TIME: + opts->bindings[row_idx].returntype = + SQL_C_TYPE_TIME; + break; + case SQL_CODE_TIMESTAMP: + opts->bindings[row_idx].returntype = + SQL_C_TYPE_TIMESTAMP; + break; + } + break; + } + break; + case SQL_DESC_CONCISE_TYPE: + opts->bindings[row_idx].returntype = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_DATA_PTR: + unbind = FALSE; + opts->bindings[row_idx].buffer = Value; + break; + case SQL_DESC_INDICATOR_PTR: + unbind = FALSE; + opts->bindings[row_idx].indicator = Value; + break; + case SQL_DESC_OCTET_LENGTH_PTR: + unbind = FALSE; + opts->bindings[row_idx].used = Value; + break; + case SQL_DESC_OCTET_LENGTH: + opts->bindings[row_idx].buflen = CAST_PTR(SQLLEN, Value); + break; + case SQL_DESC_PRECISION: + opts->bindings[row_idx].precision = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_SCALE: + opts->bindings[row_idx].scale = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_PRECISION: + case SQL_DESC_LENGTH: + case SQL_DESC_NUM_PREC_RADIX: + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } + if (unbind) + opts->bindings[row_idx].buffer = NULL; + return ret; +} + +static void parameter_bindings_set(APDFields *opts, SQLSMALLINT params, + BOOL maxset) { + int i; + + if (params == opts->allocated) + return; + if (params > opts->allocated) { + extend_parameter_bindings(opts, params); + return; + } + if (maxset) + return; + + for (i = opts->allocated; i > params; i--) + reset_a_parameter_binding(opts, i); + opts->allocated = params; + if (0 == params) { + free(opts->parameters); + opts->parameters = NULL; + } +} + +static void parameter_ibindings_set(IPDFields *opts, SQLSMALLINT params, + BOOL maxset) { + int i; + + if (params == opts->allocated) + return; + if (params > opts->allocated) { + extend_iparameter_bindings(opts, params); + return; + } + if (maxset) + return; + + for (i = opts->allocated; i > params; i--) + reset_a_iparameter_binding(opts, i); + opts->allocated = params; + if (0 == params) { + free(opts->parameters); + opts->parameters = NULL; + } +} + +static RETCODE SQL_API APDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + UNUSED(BufferLength); + RETCODE ret = SQL_SUCCESS; + APDFields *opts = &(desc->apdf); + SQLSMALLINT para_idx; + BOOL unbind = TRUE; + + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_SIZE: + opts->paramset_size = CAST_UPTR(SQLUINTEGER, Value); + return ret; + case SQL_DESC_ARRAY_STATUS_PTR: + opts->param_operation_ptr = Value; + return ret; + case SQL_DESC_BIND_OFFSET_PTR: + opts->param_offset_ptr = Value; + return ret; + case SQL_DESC_BIND_TYPE: + opts->param_bind_type = CAST_UPTR(SQLUINTEGER, Value); + return ret; + case SQL_DESC_COUNT: + parameter_bindings_set(opts, CAST_PTR(SQLSMALLINT, Value), FALSE); + return ret; + + case SQL_DESC_TYPE: + case SQL_DESC_DATETIME_INTERVAL_CODE: + case SQL_DESC_CONCISE_TYPE: + parameter_bindings_set(opts, RecNumber, TRUE); + break; + } + if (RecNumber <= 0) { + MYLOG(ES_ALL, "RecN=%d allocated=%d\n", RecNumber, opts->allocated); + DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, + "bad parameter number"); + return SQL_ERROR; + } + if (RecNumber > opts->allocated) { + MYLOG(ES_ALL, "RecN=%d allocated=%d\n", RecNumber, opts->allocated); + parameter_bindings_set(opts, RecNumber, TRUE); + /* DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, + "bad parameter number"); + return SQL_ERROR;*/ + } + para_idx = RecNumber - 1; + switch (FieldIdentifier) { + case SQL_DESC_TYPE: + opts->parameters[para_idx].CType = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_DATETIME_INTERVAL_CODE: + switch (opts->parameters[para_idx].CType) { + case SQL_DATETIME: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + switch ((LONG_PTR)Value) { + case SQL_CODE_DATE: + opts->parameters[para_idx].CType = SQL_C_TYPE_DATE; + break; + case SQL_CODE_TIME: + opts->parameters[para_idx].CType = SQL_C_TYPE_TIME; + break; + case SQL_CODE_TIMESTAMP: + opts->parameters[para_idx].CType = + SQL_C_TYPE_TIMESTAMP; + break; + } + break; + } + break; + case SQL_DESC_CONCISE_TYPE: + opts->parameters[para_idx].CType = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_DATA_PTR: + unbind = FALSE; + opts->parameters[para_idx].buffer = Value; + break; + case SQL_DESC_INDICATOR_PTR: + unbind = FALSE; + opts->parameters[para_idx].indicator = Value; + break; + case SQL_DESC_OCTET_LENGTH: + opts->parameters[para_idx].buflen = CAST_PTR(Int4, Value); + break; + case SQL_DESC_OCTET_LENGTH_PTR: + unbind = FALSE; + opts->parameters[para_idx].used = Value; + break; + case SQL_DESC_PRECISION: + opts->parameters[para_idx].precision = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_SCALE: + opts->parameters[para_idx].scale = CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_PRECISION: + case SQL_DESC_LENGTH: + case SQL_DESC_NUM_PREC_RADIX: + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invaid descriptor identifier"); + } + if (unbind) + opts->parameters[para_idx].buffer = NULL; + + return ret; +} + +static RETCODE SQL_API IRDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + UNUSED(BufferLength, RecNumber); + RETCODE ret = SQL_SUCCESS; + IRDFields *opts = &(desc->irdf); + + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_STATUS_PTR: + opts->rowStatusArray = (SQLUSMALLINT *)Value; + break; + case SQL_DESC_ROWS_PROCESSED_PTR: + opts->rowsFetched = (SQLULEN *)Value; + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + case SQL_DESC_COUNT: /* read-only */ + case SQL_DESC_AUTO_UNIQUE_VALUE: /* read-only */ + case SQL_DESC_BASE_COLUMN_NAME: /* read-only */ + case SQL_DESC_BASE_TABLE_NAME: /* read-only */ + case SQL_DESC_CASE_SENSITIVE: /* read-only */ + case SQL_DESC_CATALOG_NAME: /* read-only */ + case SQL_DESC_CONCISE_TYPE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_CODE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_PRECISION: /* read-only */ + case SQL_DESC_DISPLAY_SIZE: /* read-only */ + case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ + case SQL_DESC_LABEL: /* read-only */ + case SQL_DESC_LENGTH: /* read-only */ + case SQL_DESC_LITERAL_PREFIX: /* read-only */ + case SQL_DESC_LITERAL_SUFFIX: /* read-only */ + case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ + case SQL_DESC_NAME: /* read-only */ + case SQL_DESC_NULLABLE: /* read-only */ + case SQL_DESC_NUM_PREC_RADIX: /* read-only */ + case SQL_DESC_OCTET_LENGTH: /* read-only */ + case SQL_DESC_PRECISION: /* read-only */ + case SQL_DESC_ROWVER: /* read-only */ + case SQL_DESC_SCALE: /* read-only */ + case SQL_DESC_SCHEMA_NAME: /* read-only */ + case SQL_DESC_SEARCHABLE: /* read-only */ + case SQL_DESC_TABLE_NAME: /* read-only */ + case SQL_DESC_TYPE: /* read-only */ + case SQL_DESC_TYPE_NAME: /* read-only */ + case SQL_DESC_UNNAMED: /* read-only */ + case SQL_DESC_UNSIGNED: /* read-only */ + case SQL_DESC_UPDATABLE: /* read-only */ + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } + return ret; +} + +static RETCODE SQL_API IPDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + UNUSED(BufferLength); + RETCODE ret = SQL_SUCCESS; + IPDFields *ipdopts = &(desc->ipdf); + SQLSMALLINT para_idx; + + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_STATUS_PTR: + ipdopts->param_status_ptr = (SQLUSMALLINT *)Value; + return ret; + case SQL_DESC_ROWS_PROCESSED_PTR: + ipdopts->param_processed_ptr = (SQLULEN *)Value; + return ret; + case SQL_DESC_COUNT: + parameter_ibindings_set(ipdopts, CAST_PTR(SQLSMALLINT, Value), + FALSE); + return ret; + case SQL_DESC_UNNAMED: /* only SQL_UNNAMED is allowed */ + if (SQL_UNNAMED != CAST_PTR(SQLSMALLINT, Value)) { + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + return ret; + } + case SQL_DESC_NAME: + case SQL_DESC_TYPE: + case SQL_DESC_DATETIME_INTERVAL_CODE: + case SQL_DESC_CONCISE_TYPE: + parameter_ibindings_set(ipdopts, RecNumber, TRUE); + break; + } + if (RecNumber <= 0 || RecNumber > ipdopts->allocated) { + MYLOG(ES_ALL, "RecN=%d allocated=%d\n", RecNumber, ipdopts->allocated); + DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, + "bad parameter number"); + return SQL_ERROR; + } + para_idx = RecNumber - 1; + switch (FieldIdentifier) { + case SQL_DESC_TYPE: + if (ipdopts->parameters[para_idx].SQLType + != CAST_PTR(SQLSMALLINT, Value)) { + reset_a_iparameter_binding(ipdopts, RecNumber); + ipdopts->parameters[para_idx].SQLType = + CAST_PTR(SQLSMALLINT, Value); + } + break; + case SQL_DESC_DATETIME_INTERVAL_CODE: + switch (ipdopts->parameters[para_idx].SQLType) { + case SQL_DATETIME: + case SQL_TYPE_DATE: + case SQL_TYPE_TIME: + case SQL_TYPE_TIMESTAMP: + switch ((LONG_PTR)Value) { + case SQL_CODE_DATE: + ipdopts->parameters[para_idx].SQLType = + SQL_TYPE_DATE; + break; + case SQL_CODE_TIME: + ipdopts->parameters[para_idx].SQLType = + SQL_TYPE_TIME; + break; + case SQL_CODE_TIMESTAMP: + ipdopts->parameters[para_idx].SQLType = + SQL_TYPE_TIMESTAMP; + break; + } + break; + } + break; + case SQL_DESC_CONCISE_TYPE: + ipdopts->parameters[para_idx].SQLType = + CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_NAME: + if (Value) + STR_TO_NAME(ipdopts->parameters[para_idx].paramName, Value); + else + NULL_THE_NAME(ipdopts->parameters[para_idx].paramName); + break; + case SQL_DESC_PARAMETER_TYPE: + ipdopts->parameters[para_idx].paramType = + CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_SCALE: + ipdopts->parameters[para_idx].decimal_digits = + CAST_PTR(SQLSMALLINT, Value); + break; + case SQL_DESC_UNNAMED: /* only SQL_UNNAMED is allowed */ + if (SQL_UNNAMED != CAST_PTR(SQLSMALLINT, Value)) { + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } else + NULL_THE_NAME(ipdopts->parameters[para_idx].paramName); + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + case SQL_DESC_CASE_SENSITIVE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_PRECISION: + case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ + case SQL_DESC_LENGTH: + case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ + case SQL_DESC_NULLABLE: /* read-only */ + case SQL_DESC_NUM_PREC_RADIX: + case SQL_DESC_OCTET_LENGTH: + case SQL_DESC_PRECISION: + case SQL_DESC_ROWVER: /* read-only */ + case SQL_DESC_TYPE_NAME: /* read-only */ + case SQL_DESC_UNSIGNED: /* read-only */ + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } + return ret; +} + +static RETCODE SQL_API ARDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + UNUSED(BufferLength); + RETCODE ret = SQL_SUCCESS; + SQLLEN ival = 0; + SQLINTEGER len, rettype = 0; + PTR ptr = NULL; + const ARDFields *opts = &(desc->ardf); + SQLSMALLINT row_idx; + + len = sizeof(SQLINTEGER); + if (0 == RecNumber) /* bookmark */ + { + BindInfoClass *bookmark = opts->bookmark; + switch (FieldIdentifier) { + case SQL_DESC_DATA_PTR: + rettype = SQL_IS_POINTER; + ptr = bookmark ? bookmark->buffer : NULL; + break; + case SQL_DESC_INDICATOR_PTR: + rettype = SQL_IS_POINTER; + ptr = bookmark ? bookmark->indicator : NULL; + break; + case SQL_DESC_OCTET_LENGTH_PTR: + rettype = SQL_IS_POINTER; + ptr = bookmark ? bookmark->used : NULL; + break; + } + if (ptr) { + *((void **)Value) = ptr; + if (StringLength) + *StringLength = len; + return ret; + } + } + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_SIZE: + case SQL_DESC_ARRAY_STATUS_PTR: + case SQL_DESC_BIND_OFFSET_PTR: + case SQL_DESC_BIND_TYPE: + case SQL_DESC_COUNT: + break; + default: + if (RecNumber <= 0 || RecNumber > opts->allocated) { + DC_set_error(desc, DESC_INVALID_COLUMN_NUMBER_ERROR, + "invalid column number"); + return SQL_ERROR; + } + } + row_idx = RecNumber - 1; + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_SIZE: + ival = opts->size_of_rowset; + break; + case SQL_DESC_ARRAY_STATUS_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->row_operation_ptr; + break; + case SQL_DESC_BIND_OFFSET_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->row_offset_ptr; + break; + case SQL_DESC_BIND_TYPE: + ival = opts->bind_size; + break; + case SQL_DESC_TYPE: + rettype = SQL_IS_SMALLINT; + switch (opts->bindings[row_idx].returntype) { + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + ival = SQL_DATETIME; + break; + default: + ival = opts->bindings[row_idx].returntype; + } + break; + case SQL_DESC_DATETIME_INTERVAL_CODE: + rettype = SQL_IS_SMALLINT; + switch (opts->bindings[row_idx].returntype) { + case SQL_C_TYPE_DATE: + ival = SQL_CODE_DATE; + break; + case SQL_C_TYPE_TIME: + ival = SQL_CODE_TIME; + break; + case SQL_C_TYPE_TIMESTAMP: + ival = SQL_CODE_TIMESTAMP; + break; + default: + ival = 0; + break; + } + break; + case SQL_DESC_CONCISE_TYPE: + rettype = SQL_IS_SMALLINT; + ival = opts->bindings[row_idx].returntype; + break; + case SQL_DESC_DATA_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->bindings[row_idx].buffer; + break; + case SQL_DESC_INDICATOR_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->bindings[row_idx].indicator; + break; + case SQL_DESC_OCTET_LENGTH_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->bindings[row_idx].used; + break; + case SQL_DESC_COUNT: + rettype = SQL_IS_SMALLINT; + ival = opts->allocated; + break; + case SQL_DESC_OCTET_LENGTH: + ival = opts->bindings[row_idx].buflen; + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + rettype = SQL_IS_SMALLINT; + if (DC_get_embedded(desc)) + ival = SQL_DESC_ALLOC_AUTO; + else + ival = SQL_DESC_ALLOC_USER; + break; + case SQL_DESC_PRECISION: + rettype = SQL_IS_SMALLINT; + ival = opts->bindings[row_idx].precision; + break; + case SQL_DESC_SCALE: + rettype = SQL_IS_SMALLINT; + ival = opts->bindings[row_idx].scale; + break; + case SQL_DESC_NUM_PREC_RADIX: + ival = 10; + break; + case SQL_DESC_DATETIME_INTERVAL_PRECISION: + case SQL_DESC_LENGTH: + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } + switch (rettype) { + case 0: + case SQL_IS_INTEGER: + len = sizeof(SQLINTEGER); + *((SQLINTEGER *)Value) = (SQLINTEGER)ival; + break; + case SQL_IS_SMALLINT: + len = sizeof(SQLSMALLINT); + *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; + break; + case SQL_IS_POINTER: + len = sizeof(SQLPOINTER); + *((void **)Value) = ptr; + break; + } + + if (StringLength) + *StringLength = len; + return ret; +} + +static RETCODE SQL_API APDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + UNUSED(BufferLength); + RETCODE ret = SQL_SUCCESS; + SQLLEN ival = 0; + SQLINTEGER len, rettype = 0; + PTR ptr = NULL; + const APDFields *opts = (const APDFields *)&(desc->apdf); + SQLSMALLINT para_idx; + + len = sizeof(SQLINTEGER); + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_SIZE: + case SQL_DESC_ARRAY_STATUS_PTR: + case SQL_DESC_BIND_OFFSET_PTR: + case SQL_DESC_BIND_TYPE: + case SQL_DESC_COUNT: + break; + default: + if (RecNumber <= 0 || RecNumber > opts->allocated) { + MYLOG(ES_ALL, "RecN=%d allocated=%d\n", RecNumber, + opts->allocated); + DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, + "bad parameter number"); + return SQL_ERROR; + } + } + para_idx = RecNumber - 1; + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_SIZE: + rettype = SQL_IS_LEN; + ival = opts->paramset_size; + break; + case SQL_DESC_ARRAY_STATUS_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->param_operation_ptr; + break; + case SQL_DESC_BIND_OFFSET_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->param_offset_ptr; + break; + case SQL_DESC_BIND_TYPE: + ival = opts->param_bind_type; + break; + + case SQL_DESC_TYPE: + rettype = SQL_IS_SMALLINT; + switch (opts->parameters[para_idx].CType) { + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + ival = SQL_DATETIME; + break; + default: + ival = opts->parameters[para_idx].CType; + } + break; + case SQL_DESC_DATETIME_INTERVAL_CODE: + rettype = SQL_IS_SMALLINT; + switch (opts->parameters[para_idx].CType) { + case SQL_C_TYPE_DATE: + ival = SQL_CODE_DATE; + break; + case SQL_C_TYPE_TIME: + ival = SQL_CODE_TIME; + break; + case SQL_C_TYPE_TIMESTAMP: + ival = SQL_CODE_TIMESTAMP; + break; + default: + ival = 0; + break; + } + break; + case SQL_DESC_CONCISE_TYPE: + rettype = SQL_IS_SMALLINT; + ival = opts->parameters[para_idx].CType; + break; + case SQL_DESC_DATA_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->parameters[para_idx].buffer; + break; + case SQL_DESC_INDICATOR_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->parameters[para_idx].indicator; + break; + case SQL_DESC_OCTET_LENGTH: + ival = opts->parameters[para_idx].buflen; + break; + case SQL_DESC_OCTET_LENGTH_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->parameters[para_idx].used; + break; + case SQL_DESC_COUNT: + rettype = SQL_IS_SMALLINT; + ival = opts->allocated; + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + rettype = SQL_IS_SMALLINT; + if (DC_get_embedded(desc)) + ival = SQL_DESC_ALLOC_AUTO; + else + ival = SQL_DESC_ALLOC_USER; + break; + case SQL_DESC_NUM_PREC_RADIX: + ival = 10; + break; + case SQL_DESC_PRECISION: + rettype = SQL_IS_SMALLINT; + ival = opts->parameters[para_idx].precision; + break; + case SQL_DESC_SCALE: + rettype = SQL_IS_SMALLINT; + ival = opts->parameters[para_idx].scale; + break; + case SQL_DESC_DATETIME_INTERVAL_PRECISION: + case SQL_DESC_LENGTH: + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifer"); + } + switch (rettype) { + case SQL_IS_LEN: + len = sizeof(SQLLEN); + *((SQLLEN *)Value) = ival; + break; + case 0: + case SQL_IS_INTEGER: + len = sizeof(SQLINTEGER); + *((SQLINTEGER *)Value) = (SQLINTEGER)ival; + break; + case SQL_IS_SMALLINT: + len = sizeof(SQLSMALLINT); + *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; + break; + case SQL_IS_POINTER: + len = sizeof(SQLPOINTER); + *((void **)Value) = ptr; + break; + } + + if (StringLength) + *StringLength = len; + return ret; +} + +static RETCODE SQL_API IRDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + RETCODE ret = SQL_SUCCESS; + SQLLEN ival = 0; + SQLINTEGER len = 0, rettype = 0; + PTR ptr = NULL; + BOOL bCallColAtt = FALSE; + const IRDFields *opts = &(desc->irdf); + + switch (FieldIdentifier) { + case SQL_DESC_ROWVER: /* read-only */ + // Database is read-only, and does not support transactions + rettype = SQL_IS_SMALLINT; + ival = SQL_FALSE; + break; + case SQL_DESC_ARRAY_STATUS_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->rowStatusArray; + break; + case SQL_DESC_ROWS_PROCESSED_PTR: + rettype = SQL_IS_POINTER; + ptr = opts->rowsFetched; + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + rettype = SQL_IS_SMALLINT; + ival = SQL_DESC_ALLOC_AUTO; + break; + case SQL_DESC_AUTO_UNIQUE_VALUE: /* read-only */ + case SQL_DESC_CASE_SENSITIVE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_PRECISION: /* read-only */ + case SQL_DESC_NUM_PREC_RADIX: /* read-only */ + rettype = SQL_IS_INTEGER; + bCallColAtt = TRUE; + break; + case SQL_DESC_DISPLAY_SIZE: /* read-only */ + case SQL_DESC_LENGTH: /* read-only */ + case SQL_DESC_OCTET_LENGTH: /* read-only */ + rettype = SQL_IS_LEN; + bCallColAtt = TRUE; + break; + case SQL_DESC_NULLABLE: /* read-only */ + case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_CODE: /* read-only */ + case SQL_DESC_CONCISE_TYPE: /* read-only */ + case SQL_DESC_COUNT: /* read-only */ + case SQL_DESC_PRECISION: /* read-only */ + case SQL_DESC_SCALE: /* read-only */ + case SQL_DESC_SEARCHABLE: /* read-only */ + case SQL_DESC_TYPE: /* read-only */ + case SQL_DESC_UNNAMED: /* read-only */ + case SQL_DESC_UNSIGNED: /* read-only */ + case SQL_DESC_UPDATABLE: /* read-only */ + rettype = SQL_IS_SMALLINT; + bCallColAtt = TRUE; + break; + case SQL_DESC_BASE_COLUMN_NAME: /* read-only */ + case SQL_DESC_BASE_TABLE_NAME: /* read-only */ + case SQL_DESC_CATALOG_NAME: /* read-only */ + case SQL_DESC_LABEL: /* read-only */ + case SQL_DESC_LITERAL_PREFIX: /* read-only */ + case SQL_DESC_LITERAL_SUFFIX: /* read-only */ + case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ + case SQL_DESC_NAME: /* read-only */ + case SQL_DESC_SCHEMA_NAME: /* read-only */ + case SQL_DESC_TABLE_NAME: /* read-only */ + case SQL_DESC_TYPE_NAME: /* read-only */ + rettype = SQL_NTS; + bCallColAtt = TRUE; + break; + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } + if (bCallColAtt) { + SQLSMALLINT pcbL; + StatementClass *stmt; + + stmt = opts->stmt; + ret = ESAPI_ColAttributes(stmt, RecNumber, FieldIdentifier, Value, + (SQLSMALLINT)BufferLength, &pcbL, &ival); + len = pcbL; + } + switch (rettype) { + case 0: + case SQL_IS_INTEGER: + len = sizeof(SQLINTEGER); + *((SQLINTEGER *)Value) = (SQLINTEGER)ival; + break; + case SQL_IS_UINTEGER: + len = sizeof(SQLUINTEGER); + *((SQLUINTEGER *)Value) = (SQLUINTEGER)ival; + break; + case SQL_IS_SMALLINT: + len = sizeof(SQLSMALLINT); + *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; + break; + case SQL_IS_POINTER: + len = sizeof(SQLPOINTER); + *((void **)Value) = ptr; + break; + case SQL_NTS: + break; + } + + if (StringLength) + *StringLength = len; + return ret; +} + +static RETCODE SQL_API IPDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + UNUSED(BufferLength); + RETCODE ret = SQL_SUCCESS; + SQLINTEGER ival = 0, len = 0, rettype = 0; + PTR ptr = NULL; + const IPDFields *ipdopts = (const IPDFields *)&(desc->ipdf); + SQLSMALLINT para_idx; + + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_STATUS_PTR: + case SQL_DESC_ROWS_PROCESSED_PTR: + case SQL_DESC_COUNT: + break; + default: + if (RecNumber <= 0 || RecNumber > ipdopts->allocated) { + MYLOG(ES_ALL, "RecN=%d allocated=%d\n", RecNumber, + ipdopts->allocated); + DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, + "bad parameter number"); + return SQL_ERROR; + } + } + para_idx = RecNumber - 1; + switch (FieldIdentifier) { + case SQL_DESC_ARRAY_STATUS_PTR: + rettype = SQL_IS_POINTER; + ptr = ipdopts->param_status_ptr; + break; + case SQL_DESC_ROWS_PROCESSED_PTR: + rettype = SQL_IS_POINTER; + ptr = ipdopts->param_processed_ptr; + break; + case SQL_DESC_UNNAMED: + rettype = SQL_IS_SMALLINT; + ival = NAME_IS_NULL(ipdopts->parameters[para_idx].paramName) + ? SQL_UNNAMED + : SQL_NAMED; + break; + case SQL_DESC_TYPE: + rettype = SQL_IS_SMALLINT; + switch (ipdopts->parameters[para_idx].SQLType) { + case SQL_TYPE_DATE: + case SQL_TYPE_TIME: + case SQL_TYPE_TIMESTAMP: + ival = SQL_DATETIME; + break; + default: + ival = ipdopts->parameters[para_idx].SQLType; + } + break; + case SQL_DESC_DATETIME_INTERVAL_CODE: + rettype = SQL_IS_SMALLINT; + switch (ipdopts->parameters[para_idx].SQLType) { + case SQL_TYPE_DATE: + ival = SQL_CODE_DATE; + break; + case SQL_TYPE_TIME: + ival = SQL_CODE_TIME; + break; + case SQL_TYPE_TIMESTAMP: + ival = SQL_CODE_TIMESTAMP; + break; + default: + ival = 0; + } + break; + case SQL_DESC_CONCISE_TYPE: + rettype = SQL_IS_SMALLINT; + ival = ipdopts->parameters[para_idx].SQLType; + break; + case SQL_DESC_COUNT: + rettype = SQL_IS_SMALLINT; + ival = ipdopts->allocated; + break; + case SQL_DESC_PARAMETER_TYPE: + rettype = SQL_IS_SMALLINT; + ival = ipdopts->parameters[para_idx].paramType; + break; + case SQL_DESC_PRECISION: + rettype = SQL_IS_SMALLINT; + switch (ipdopts->parameters[para_idx].SQLType) { + case SQL_TYPE_DATE: + case SQL_TYPE_TIME: + case SQL_TYPE_TIMESTAMP: + case SQL_DATETIME: + ival = ipdopts->parameters[para_idx].decimal_digits; + break; + } + break; + case SQL_DESC_SCALE: + rettype = SQL_IS_SMALLINT; + switch (ipdopts->parameters[para_idx].SQLType) { + case SQL_NUMERIC: + ival = ipdopts->parameters[para_idx].decimal_digits; + break; + } + break; + case SQL_DESC_ALLOC_TYPE: /* read-only */ + rettype = SQL_IS_SMALLINT; + ival = SQL_DESC_ALLOC_AUTO; + break; + case SQL_DESC_CASE_SENSITIVE: /* read-only */ + case SQL_DESC_DATETIME_INTERVAL_PRECISION: + case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ + case SQL_DESC_LENGTH: + case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ + case SQL_DESC_NAME: + case SQL_DESC_NULLABLE: /* read-only */ + case SQL_DESC_NUM_PREC_RADIX: + case SQL_DESC_OCTET_LENGTH: + case SQL_DESC_ROWVER: /* read-only */ + case SQL_DESC_TYPE_NAME: /* read-only */ + case SQL_DESC_UNSIGNED: /* read-only */ + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, + "invalid descriptor identifier"); + } + switch (rettype) { + case 0: + case SQL_IS_INTEGER: + len = sizeof(SQLINTEGER); + *((SQLINTEGER *)Value) = ival; + break; + case SQL_IS_SMALLINT: + len = sizeof(SQLSMALLINT); + *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; + break; + case SQL_IS_POINTER: + len = sizeof(SQLPOINTER); + *((void **)Value) = ptr; + break; + } + + if (StringLength) + *StringLength = len; + return ret; +} + +/* SQLGetStmtOption -> SQLGetStmtAttr */ +RETCODE SQL_API ESAPI_GetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + CSTR func = "ESAPI_GetStmtAttr"; + StatementClass *stmt = (StatementClass *)StatementHandle; + RETCODE ret = SQL_SUCCESS; + SQLINTEGER len = 0; + + MYLOG(ES_TRACE, "entering Handle=%p " FORMAT_INTEGER "\n", StatementHandle, + Attribute); + switch (Attribute) { + case SQL_ATTR_FETCH_BOOKMARK_PTR: /* 16 */ + *((void **)Value) = stmt->options.bookmark_ptr; + len = sizeof(SQLPOINTER); + break; + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: /* 17 */ + *((SQLULEN **)Value) = SC_get_APDF(stmt)->param_offset_ptr; + len = sizeof(SQLPOINTER); + break; + case SQL_ATTR_PARAM_BIND_TYPE: /* 18 */ + *((SQLUINTEGER *)Value) = SC_get_APDF(stmt)->param_bind_type; + len = sizeof(SQLUINTEGER); + break; + case SQL_ATTR_PARAM_OPERATION_PTR: /* 19 */ + *((SQLUSMALLINT **)Value) = SC_get_APDF(stmt)->param_operation_ptr; + len = sizeof(SQLPOINTER); + break; + case SQL_ATTR_PARAM_STATUS_PTR: /* 20 */ + *((SQLUSMALLINT **)Value) = SC_get_IPDF(stmt)->param_status_ptr; + len = sizeof(SQLPOINTER); + break; + case SQL_ATTR_PARAMS_PROCESSED_PTR: /* 21 */ + *((SQLULEN **)Value) = SC_get_IPDF(stmt)->param_processed_ptr; + len = sizeof(SQLPOINTER); + break; + case SQL_ATTR_PARAMSET_SIZE: /* 22 */ + *((SQLULEN *)Value) = SC_get_APDF(stmt)->paramset_size; + len = sizeof(SQLUINTEGER); + break; + case SQL_ATTR_ROW_BIND_OFFSET_PTR: /* 23 */ + *((SQLULEN **)Value) = SC_get_ARDF(stmt)->row_offset_ptr; + len = 4; + break; + case SQL_ATTR_ROW_OPERATION_PTR: /* 24 */ + *((SQLUSMALLINT **)Value) = SC_get_ARDF(stmt)->row_operation_ptr; + len = 4; + break; + case SQL_ATTR_ROW_STATUS_PTR: /* 25 */ + *((SQLUSMALLINT **)Value) = SC_get_IRDF(stmt)->rowStatusArray; + len = 4; + break; + case SQL_ATTR_ROWS_FETCHED_PTR: /* 26 */ + *((SQLULEN **)Value) = SC_get_IRDF(stmt)->rowsFetched; + len = 4; + break; + case SQL_ATTR_ROW_ARRAY_SIZE: /* 27 */ + *((SQLULEN *)Value) = SC_get_ARDF(stmt)->size_of_rowset; + len = 4; + break; + case SQL_ATTR_APP_ROW_DESC: /* 10010 */ + case SQL_ATTR_APP_PARAM_DESC: /* 10011 */ + case SQL_ATTR_IMP_ROW_DESC: /* 10012 */ + case SQL_ATTR_IMP_PARAM_DESC: /* 10013 */ + len = 4; + *((HSTMT *)Value) = + descHandleFromStatementHandle(StatementHandle, Attribute); + break; + + case SQL_ATTR_CURSOR_SCROLLABLE: /* -1 */ + len = 4; + if (SQL_CURSOR_FORWARD_ONLY == stmt->options.cursor_type) + *((SQLUINTEGER *)Value) = SQL_NONSCROLLABLE; + else + *((SQLUINTEGER *)Value) = SQL_SCROLLABLE; + break; + case SQL_ATTR_CURSOR_SENSITIVITY: /* -2 */ + len = 4; + if (SQL_CONCUR_READ_ONLY == stmt->options.scroll_concurrency) + *((SQLUINTEGER *)Value) = SQL_INSENSITIVE; + else + *((SQLUINTEGER *)Value) = SQL_UNSPECIFIED; + break; + case SQL_ATTR_METADATA_ID: /* 10014 */ + *((SQLUINTEGER *)Value) = stmt->options.metadata_id; + break; + case SQL_ATTR_ENABLE_AUTO_IPD: /* 15 */ + *((SQLUINTEGER *)Value) = SQL_FALSE; + break; + case SQL_ATTR_AUTO_IPD: /* 10001 */ + /* case SQL_ATTR_ROW_BIND_TYPE: ** == SQL_BIND_TYPE(ODBC2.0) */ + SC_set_error(stmt, DESC_INVALID_OPTION_IDENTIFIER, + "Unsupported statement option (Get)", func); + return SQL_ERROR; + default: + ret = ESAPI_GetStmtOption(StatementHandle, (SQLSMALLINT)Attribute, + Value, &len, BufferLength); + } + if (ret == SQL_SUCCESS && StringLength) + *StringLength = len; + return ret; +} + +/* SQLSetConnectOption -> SQLSetConnectAttr */ +RETCODE SQL_API ESAPI_SetConnectAttr(HDBC ConnectionHandle, + SQLINTEGER Attribute, PTR Value, + SQLINTEGER StringLength) { + UNUSED(StringLength); + CSTR func = "ESAPI_SetConnectAttr"; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + RETCODE ret = SQL_SUCCESS; + BOOL unsupported = FALSE; + int newValue; + + MYLOG(ES_TRACE, "entering for %p: " FORMAT_INTEGER " %p\n", + ConnectionHandle, Attribute, Value); + switch (Attribute) { + case SQL_ATTR_METADATA_ID: + conn->stmtOptions.metadata_id = CAST_UPTR(SQLUINTEGER, Value); + break; + case SQL_ATTR_ANSI_APP: + if (SQL_AA_FALSE != CAST_PTR(SQLINTEGER, Value)) { + MYLOG(ES_DEBUG, "the application is ansi\n"); + if (CC_is_in_unicode_driver(conn)) /* the driver is unicode */ + CC_set_in_ansi_app(conn); /* but the app is ansi */ + } else { + MYLOG(ES_DEBUG, "the application is unicode\n"); + } + /*return SQL_ERROR;*/ + return SQL_SUCCESS; + case SQL_ATTR_ENLIST_IN_DTC: + unsupported = TRUE; + break; + case SQL_ATTR_AUTO_IPD: + if (SQL_FALSE != Value) + unsupported = TRUE; + break; + case SQL_ATTR_ASYNC_ENABLE: + case SQL_ATTR_CONNECTION_DEAD: + case SQL_ATTR_CONNECTION_TIMEOUT: + unsupported = TRUE; + break; + case SQL_ATTR_ESOPT_DEBUG: + newValue = CAST_UPTR(SQLCHAR, Value); + if (newValue > 0) { + logs_on_off(-1, conn->connInfo.drivers.loglevel, 0); + conn->connInfo.drivers.loglevel = (char)newValue; + logs_on_off(1, conn->connInfo.drivers.loglevel, 0); + MYLOG(ES_DEBUG, "debug => %d\n", + conn->connInfo.drivers.loglevel); + } else if (newValue == 0 && conn->connInfo.drivers.loglevel > 0) { + MYLOG(ES_DEBUG, "debug => %d\n", newValue); + logs_on_off(-1, conn->connInfo.drivers.loglevel, 0); + conn->connInfo.drivers.loglevel = (char)newValue; + logs_on_off(1, 0, 0); + } + break; + case SQL_ATTR_ESOPT_COMMLOG: + newValue = CAST_UPTR(SQLCHAR, Value); + if (newValue > 0) { + logs_on_off(-1, 0, conn->connInfo.drivers.loglevel); + conn->connInfo.drivers.loglevel = (char)newValue; + logs_on_off(1, 0, conn->connInfo.drivers.loglevel); + MYLOG(ES_DEBUG, "commlog => %d\n", + conn->connInfo.drivers.loglevel); + } else if (newValue == 0 && conn->connInfo.drivers.loglevel > 0) { + MYLOG(ES_DEBUG, "commlog => %d\n", newValue); + logs_on_off(-1, 0, conn->connInfo.drivers.loglevel); + conn->connInfo.drivers.loglevel = (char)newValue; + logs_on_off(1, 0, 0); + } + break; + default: + if (Attribute < 65536) + ret = ESAPI_SetConnectOption( + ConnectionHandle, (SQLUSMALLINT)Attribute, (SQLLEN)Value); + else + unsupported = TRUE; + } + if (unsupported) { + char msg[64]; + SPRINTF_FIXED( + msg, "Couldn't set unsupported connect attribute " FORMAT_INTEGER, + Attribute); + CC_set_error(conn, CONN_OPTION_NOT_FOR_THE_DRIVER, msg, func); + return SQL_ERROR; + } + return ret; +} + +/* new function */ +RETCODE SQL_API ESAPI_GetDescField(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + CSTR func = "ESAPI_GetDescField"; + RETCODE ret = SQL_SUCCESS; + DescriptorClass *desc = (DescriptorClass *)DescriptorHandle; + + MYLOG(ES_TRACE, + "entering h=%p rec=" FORMAT_SMALLI " field=" FORMAT_SMALLI + " blen=" FORMAT_INTEGER "\n", + DescriptorHandle, RecNumber, FieldIdentifier, BufferLength); + switch (DC_get_desc_type(desc)) { + case SQL_ATTR_APP_ROW_DESC: + ret = ARDGetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength, StringLength); + break; + case SQL_ATTR_APP_PARAM_DESC: + ret = APDGetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength, StringLength); + break; + case SQL_ATTR_IMP_ROW_DESC: + ret = IRDGetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength, StringLength); + break; + case SQL_ATTR_IMP_PARAM_DESC: + ret = IPDGetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength, StringLength); + break; + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INTERNAL_ERROR, "Error not implemented"); + } + if (ret == SQL_ERROR) { + if (!DC_get_errormsg(desc)) { + switch (DC_get_errornumber(desc)) { + case DESC_INVALID_DESCRIPTOR_IDENTIFIER: + DC_set_errormsg( + desc, + "can't SQLGetDescField for this descriptor identifier"); + break; + case DESC_INVALID_COLUMN_NUMBER_ERROR: + DC_set_errormsg( + desc, "can't SQLGetDescField for this column number"); + break; + case DESC_BAD_PARAMETER_NUMBER_ERROR: + DC_set_errormsg( + desc, + "can't SQLGetDescField for this parameter number"); + break; + } + } + DC_log_error(func, "", desc); + } + return ret; +} + +/* new function */ +RETCODE SQL_API ESAPI_SetDescField(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + CSTR func = "ESAPI_SetDescField"; + RETCODE ret = SQL_SUCCESS; + DescriptorClass *desc = (DescriptorClass *)DescriptorHandle; + + MYLOG(ES_TRACE, + "entering h=%p(%d) rec=" FORMAT_SMALLI " field=" FORMAT_SMALLI + " val=%p," FORMAT_INTEGER "\n", + DescriptorHandle, DC_get_desc_type(desc), RecNumber, FieldIdentifier, + Value, BufferLength); + switch (DC_get_desc_type(desc)) { + case SQL_ATTR_APP_ROW_DESC: + ret = ARDSetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength); + break; + case SQL_ATTR_APP_PARAM_DESC: + ret = APDSetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength); + break; + case SQL_ATTR_IMP_ROW_DESC: + ret = IRDSetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength); + break; + case SQL_ATTR_IMP_PARAM_DESC: + ret = IPDSetField(desc, RecNumber, FieldIdentifier, Value, + BufferLength); + break; + default: + ret = SQL_ERROR; + DC_set_error(desc, DESC_INTERNAL_ERROR, "Error not implemented"); + } + if (ret == SQL_ERROR) { + if (!DC_get_errormsg(desc)) { + switch (DC_get_errornumber(desc)) { + case DESC_INVALID_DESCRIPTOR_IDENTIFIER: + DC_set_errormsg( + desc, + "can't SQLSetDescField for this descriptor identifier"); + break; + case DESC_INVALID_COLUMN_NUMBER_ERROR: + DC_set_errormsg( + desc, "can't SQLSetDescField for this column number"); + break; + case DESC_BAD_PARAMETER_NUMBER_ERROR: + DC_set_errormsg( + desc, + "can't SQLSetDescField for this parameter number"); + break; + break; + } + } + DC_log_error(func, "", desc); + } + return ret; +} + +/* SQLSet(Param/Scroll/Stmt)Option -> SQLSetStmtAttr */ +RETCODE SQL_API ESAPI_SetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER StringLength) { + UNUSED(StringLength); + RETCODE ret = SQL_SUCCESS; + CSTR func = "ESAPI_SetStmtAttr"; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, + "entering Handle=%p " FORMAT_INTEGER "," FORMAT_ULEN "(%p)\n", + StatementHandle, Attribute, (SQLULEN)Value, Value); + switch (Attribute) { + case SQL_ATTR_ENABLE_AUTO_IPD: /* 15 */ + if (SQL_FALSE == Value) + break; + case SQL_ATTR_CURSOR_SCROLLABLE: /* -1 */ + case SQL_ATTR_CURSOR_SENSITIVITY: /* -2 */ + case SQL_ATTR_AUTO_IPD: /* 10001 */ + SC_set_error(stmt, DESC_OPTION_NOT_FOR_THE_DRIVER, + "Unsupported statement option (Set)", func); + return SQL_ERROR; + /* case SQL_ATTR_ROW_BIND_TYPE: ** == SQL_BIND_TYPE(ODBC2.0) */ + case SQL_ATTR_IMP_ROW_DESC: /* 10012 (read-only) */ + case SQL_ATTR_IMP_PARAM_DESC: /* 10013 (read-only) */ + + /* + * case SQL_ATTR_PREDICATE_PTR: case + * SQL_ATTR_PREDICATE_OCTET_LENGTH_PTR: + */ + SC_set_error(stmt, DESC_INVALID_OPTION_IDENTIFIER, + "Unsupported statement option (Set)", func); + return SQL_ERROR; + + case SQL_ATTR_METADATA_ID: /* 10014 */ + stmt->options.metadata_id = CAST_UPTR(SQLUINTEGER, Value); + break; + case SQL_ATTR_APP_ROW_DESC: /* 10010 */ + if (SQL_NULL_HDESC == Value) { + stmt->ard = &(stmt->ardi); + } else { + stmt->ard = (DescriptorClass *)Value; + MYLOG(ES_ALL, "set ard=%p\n", stmt->ard); + } + break; + case SQL_ATTR_APP_PARAM_DESC: /* 10011 */ + if (SQL_NULL_HDESC == Value) { + stmt->apd = &(stmt->apdi); + } else { + stmt->apd = (DescriptorClass *)Value; + } + break; + case SQL_ATTR_FETCH_BOOKMARK_PTR: /* 16 */ + stmt->options.bookmark_ptr = Value; + break; + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: /* 17 */ + SC_get_APDF(stmt)->param_offset_ptr = (SQLULEN *)Value; + break; + case SQL_ATTR_PARAM_BIND_TYPE: /* 18 */ + SC_get_APDF(stmt)->param_bind_type = CAST_UPTR(SQLUINTEGER, Value); + break; + case SQL_ATTR_PARAM_OPERATION_PTR: /* 19 */ + SC_get_APDF(stmt)->param_operation_ptr = Value; + break; + case SQL_ATTR_PARAM_STATUS_PTR: /* 20 */ + SC_get_IPDF(stmt)->param_status_ptr = (SQLUSMALLINT *)Value; + break; + case SQL_ATTR_PARAMS_PROCESSED_PTR: /* 21 */ + SC_get_IPDF(stmt)->param_processed_ptr = (SQLULEN *)Value; + break; + case SQL_ATTR_PARAMSET_SIZE: /* 22 */ + SC_get_APDF(stmt)->paramset_size = CAST_UPTR(SQLULEN, Value); + break; + case SQL_ATTR_ROW_BIND_OFFSET_PTR: /* 23 */ + SC_get_ARDF(stmt)->row_offset_ptr = (SQLULEN *)Value; + break; + case SQL_ATTR_ROW_OPERATION_PTR: /* 24 */ + SC_get_ARDF(stmt)->row_operation_ptr = Value; + break; + case SQL_ATTR_ROW_STATUS_PTR: /* 25 */ + SC_get_IRDF(stmt)->rowStatusArray = (SQLUSMALLINT *)Value; + break; + case SQL_ATTR_ROWS_FETCHED_PTR: /* 26 */ + SC_get_IRDF(stmt)->rowsFetched = (SQLULEN *)Value; + break; + case SQL_ATTR_ROW_ARRAY_SIZE: /* 27 */ + SC_get_ARDF(stmt)->size_of_rowset = CAST_UPTR(SQLULEN, Value); + break; + default: + return ESAPI_SetStmtOption(StatementHandle, (SQLUSMALLINT)Attribute, + (SQLULEN)Value); + } + return ret; +} diff --git a/sql-odbc/src/odfesqlodbc/es_apifunc.h b/sql-odbc/src/odfesqlodbc/es_apifunc.h new file mode 100644 index 0000000000..8a248abf4a --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_apifunc.h @@ -0,0 +1,243 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef _ES_API_FUNC_H__ +#define _ES_API_FUNC_H__ + +#include +#include +#include "es_odbc.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Internal flags for catalog functions */ +#define PODBC_NOT_SEARCH_PATTERN 1L +#define PODBC_SEARCH_PUBLIC_SCHEMA (1L << 1) +#define PODBC_SEARCH_BY_IDS (1L << 2) +#define PODBC_SHOW_OID_COLUMN (1L << 3) +#define PODBC_ROW_VERSIONING (1L << 4) +/* Internal flags for ESAPI_AllocStmt functions */ +#define PODBC_EXTERNAL_STATEMENT 1L /* visible to the driver manager */ +#define PODBC_INHERIT_CONNECT_OPTIONS (1L << 1) +/* Internal flags for ESAPI_Exec... functions */ +/* Flags for the error handling */ +#define PODBC_ALLOW_PARTIAL_EXTRACT 1L +/* #define PODBC_ERROR_CLEAR (1L << 1) no longer used */ + +RETCODE SQL_API ESAPI_AllocConnect(HENV EnvironmentHandle, + HDBC *ConnectionHandle); +RETCODE SQL_API ESAPI_AllocEnv(HENV *EnvironmentHandle); +RETCODE SQL_API ESAPI_AllocStmt(HDBC ConnectionHandle, HSTMT *StatementHandle, + UDWORD flag); +RETCODE SQL_API ESAPI_BindCol(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, + SQLSMALLINT TargetType, PTR TargetValue, + SQLLEN BufferLength, SQLLEN *StrLen_or_Ind); +RETCODE SQL_API ESAPI_Connect(HDBC ConnectionHandle, const SQLCHAR *ServerName, + SQLSMALLINT NameLength1, const SQLCHAR *UserName, + SQLSMALLINT NameLength2, + const SQLCHAR *Authentication, + SQLSMALLINT NameLength3); +RETCODE SQL_API ESAPI_BrowseConnect(HDBC hdbc, const SQLCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut); +RETCODE SQL_API ESAPI_DescribeCol( + HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, SQLCHAR *ColumnName, + SQLSMALLINT BufferLength, SQLSMALLINT *NameLength, SQLSMALLINT *DataType, + SQLULEN *ColumnSize, SQLSMALLINT *DecimalDigits, SQLSMALLINT *Nullable); +RETCODE SQL_API ESAPI_Disconnect(HDBC ConnectionHandle); +/* Helper functions for Error handling */ +RETCODE SQL_API ESAPI_EnvError(HENV EnvironmentHandle, SQLSMALLINT RecNumber, + SQLCHAR *Sqlstate, SQLINTEGER *NativeError, + SQLCHAR *MessageText, SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength, UWORD flag); +RETCODE SQL_API ESAPI_ConnectError(HDBC ConnectionHandle, SQLSMALLINT RecNumber, + SQLCHAR *Sqlstate, SQLINTEGER *NativeError, + SQLCHAR *MessageText, + SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength, UWORD flag); +RETCODE SQL_API ESAPI_StmtError(HSTMT StatementHandle, SQLSMALLINT RecNumber, + SQLCHAR *Sqlstate, SQLINTEGER *NativeError, + SQLCHAR *MessageText, SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength, UWORD flag); +RETCODE SQL_API ESAPI_ExecDirect(HSTMT StatementHandle, + const SQLCHAR *StatementText, + SQLINTEGER TextLength, BOOL commit); +RETCODE SQL_API ESAPI_Execute(HSTMT StatementHandle); +RETCODE SQL_API ESAPI_Fetch(HSTMT StatementHandle); +RETCODE SQL_API ESAPI_FreeConnect(HDBC ConnectionHandle); +RETCODE SQL_API ESAPI_FreeEnv(HENV EnvironmentHandle); +RETCODE SQL_API ESAPI_FreeStmt(HSTMT StatementHandle, SQLUSMALLINT Option); +RETCODE SQL_API ESAPI_GetConnectOption(HDBC ConnectionHandle, + SQLUSMALLINT Option, PTR Value, + SQLINTEGER *StringLength, + SQLINTEGER BufferLength); +RETCODE SQL_API ESAPI_GetCursorName(HSTMT StatementHandle, SQLCHAR *CursorName, + SQLSMALLINT BufferLength, + SQLSMALLINT *NameLength); +RETCODE SQL_API ESAPI_GetData(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, + SQLSMALLINT TargetType, PTR TargetValue, + SQLLEN BufferLength, SQLLEN *StrLen_or_Ind); +RETCODE SQL_API ESAPI_GetFunctions(HDBC ConnectionHandle, + SQLUSMALLINT FunctionId, + SQLUSMALLINT *Supported); +RETCODE SQL_API ESAPI_GetFunctions30(HDBC ConnectionHandle, + SQLUSMALLINT FunctionId, + SQLUSMALLINT *Supported); +RETCODE SQL_API ESAPI_GetInfo(HDBC ConnectionHandle, SQLUSMALLINT InfoType, + PTR InfoValue, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength); +RETCODE SQL_API ESAPI_GetStmtOption(HSTMT StatementHandle, SQLUSMALLINT Option, + PTR Value, SQLINTEGER *StringLength, + SQLINTEGER BufferLength); +RETCODE SQL_API ESAPI_NumResultCols(HSTMT StatementHandle, + SQLSMALLINT *ColumnCount); +RETCODE SQL_API ESAPI_RowCount(HSTMT StatementHandle, SQLLEN *RowCount); +RETCODE SQL_API ESAPI_SetConnectOption(HDBC ConnectionHandle, + SQLUSMALLINT Option, SQLULEN Value); +RETCODE SQL_API ESAPI_SetCursorName(HSTMT StatementHandle, + const SQLCHAR *CursorName, + SQLSMALLINT NameLength); +RETCODE SQL_API ESAPI_SetStmtOption(HSTMT StatementHandle, SQLUSMALLINT Option, + SQLULEN Value); +RETCODE SQL_API +ESAPI_SpecialColumns(HSTMT StatementHandle, SQLUSMALLINT IdentifierType, + const SQLCHAR *CatalogName, SQLSMALLINT NameLength1, + const SQLCHAR *SchemaName, SQLSMALLINT NameLength2, + const SQLCHAR *TableName, SQLSMALLINT NameLength3, + SQLUSMALLINT Scope, SQLUSMALLINT Nullable); +RETCODE SQL_API ESAPI_Statistics( + HSTMT StatementHandle, const SQLCHAR *CatalogName, SQLSMALLINT NameLength1, + const SQLCHAR *SchemaName, SQLSMALLINT NameLength2, + const SQLCHAR *TableName, SQLSMALLINT NameLength3, SQLUSMALLINT Unique, + SQLUSMALLINT Reserved); +RETCODE SQL_API ESAPI_ColAttributes(HSTMT hstmt, SQLUSMALLINT icol, + SQLUSMALLINT fDescType, PTR rgbDesc, + SQLSMALLINT cbDescMax, SQLSMALLINT *pcbDesc, + SQLLEN *pfDesc); +RETCODE SQL_API ESAPI_Prepare(HSTMT hstmt, const SQLCHAR *szSqlStr, + SQLINTEGER cbSqlStr); +RETCODE SQL_API ESAPI_ColumnPrivileges( + HSTMT hstmt, const SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, + const SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, + const SQLCHAR *szTableName, SQLSMALLINT cbTableName, + const SQLCHAR *szColumnName, SQLSMALLINT cbColumnName, UWORD flag); +RETCODE SQL_API ESAPI_ExtendedFetch(HSTMT hstmt, SQLUSMALLINT fFetchType, + SQLLEN irow, SQLULEN *pcrow, + SQLUSMALLINT *rgfRowStatus, + SQLLEN FetchOffset, SQLLEN rowsetSize); +RETCODE SQL_API ESAPI_ForeignKeys( + HSTMT hstmt, const SQLCHAR *szPkCatalogName, SQLSMALLINT cbPkCatalogName, + const SQLCHAR *szPkSchemaName, SQLSMALLINT cbPkSchemaName, + const SQLCHAR *szPkTableName, SQLSMALLINT cbPkTableName, + const SQLCHAR *szFkCatalogName, SQLSMALLINT cbFkCatalogName, + const SQLCHAR *szFkSchemaName, SQLSMALLINT cbFkSchemaName, + const SQLCHAR *szFkTableName, SQLSMALLINT cbFkTableName); +RETCODE SQL_API ESAPI_MoreResults(HSTMT hstmt); +RETCODE SQL_API ESAPI_NativeSql(HDBC hdbc, const SQLCHAR *szSqlStrIn, + SQLINTEGER cbSqlStrIn, SQLCHAR *szSqlStr, + SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr); +RETCODE SQL_API ESAPI_NumParams(HSTMT hstmt, SQLSMALLINT *pcpar); +RETCODE SQL_API ESAPI_PrimaryKeys(HSTMT hstmt, const SQLCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + const SQLCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, + const SQLCHAR *szTableName, + SQLSMALLINT cbTableName, OID reloid); +RETCODE SQL_API ESAPI_ProcedureColumns( + HSTMT hstmt, const SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, + const SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, + const SQLCHAR *szProcName, SQLSMALLINT cbProcName, + const SQLCHAR *szColumnName, SQLSMALLINT cbColumnName, UWORD flag); +RETCODE SQL_API ESAPI_Procedures(HSTMT hstmt, const SQLCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + const SQLCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, + const SQLCHAR *szProcName, + SQLSMALLINT cbProcName, UWORD flag); +RETCODE SQL_API ESAPI_TablePrivileges(HSTMT hstmt, const SQLCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + const SQLCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, + const SQLCHAR *szTableName, + SQLSMALLINT cbTableName, UWORD flag); +RETCODE SQL_API ESAPI_GetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLCHAR *MessageText, + SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength); +RETCODE SQL_API ESAPI_GetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, + SQLSMALLINT DiagIdentifier, PTR DiagInfoPtr, + SQLSMALLINT BufferLength, + SQLSMALLINT *StringLengthPtr); +RETCODE SQL_API ESAPI_GetConnectAttr(HDBC ConnectionHandle, + SQLINTEGER Attribute, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength); +RETCODE SQL_API ESAPI_GetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER BufferLength, + SQLINTEGER *StringLength); + +/* Driver-specific connection attributes, for SQLSet/GetConnectAttr() */ +enum { + SQL_ATTR_ESOPT_DEBUG = 65536, + SQL_ATTR_ESOPT_COMMLOG = 65537, + SQL_ATTR_ESOPT_PARSE = 65538, + SQL_ATTR_ESOPT_USE_DECLAREFETCH = 65539, + SQL_ATTR_ESOPT_SERVER_SIDE_PREPARE = 65540, + SQL_ATTR_ESOPT_FETCH = 65541, + SQL_ATTR_ESOPT_UNKNOWNSIZES = 65542, + SQL_ATTR_ESOPT_TEXTASLONGVARCHAR = 65543, + SQL_ATTR_ESOPT_UNKNOWNSASLONGVARCHAR = 65544, + SQL_ATTR_ESOPT_BOOLSASCHAR = 65545, + SQL_ATTR_ESOPT_MAXVARCHARSIZE = 65546, + SQL_ATTR_ESOPT_MAXLONGVARCHARSIZE = 65547, + SQL_ATTR_ESOPT_WCSDEBUG = 65548, + SQL_ATTR_ESOPT_MSJET = 65549 +}; +RETCODE SQL_API ESAPI_SetConnectAttr(HDBC ConnectionHandle, + SQLINTEGER Attribute, PTR Value, + SQLINTEGER StringLength); +RETCODE SQL_API ESAPI_SetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER StringLength); +RETCODE SQL_API ESAPI_AllocDesc(HDBC ConnectionHandle, + SQLHDESC *DescriptorHandle); +RETCODE SQL_API ESAPI_FreeDesc(SQLHDESC DescriptorHandle); +RETCODE SQL_API ESAPI_CopyDesc(SQLHDESC SourceDescHandle, + SQLHDESC TargetDescHandle); +RETCODE SQL_API ESAPI_SetDescField(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength); +RETCODE SQL_API ESAPI_GetDescField(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength); +RETCODE SQL_API ESAPI_DescError(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLCHAR *MessageText, + SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength, UWORD flag); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* define_ES_API_FUNC_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/es_communication.cpp b/sql-odbc/src/odfesqlodbc/es_communication.cpp new file mode 100644 index 0000000000..cf25b070a7 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_communication.cpp @@ -0,0 +1,719 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_communication.h" + +// odfesqlodbc needs to be included before mylog, otherwise mylog will generate +// compiler warnings +// clang-format off +#include "es_odbc.h" +#include "mylog.h" +#include +#include +#include +#include +#include +#include +#include +// clang-format on + +static const std::string ctype = "application/json"; +static const std::string SQL_ENDPOINT_FORMAT_JDBC = + "/_opendistro/_sql?format=jdbc"; +static const std::string SQL_ENDPOINT_CLOSE_CURSOR = "/_opendistro/_sql/close"; +static const std::string PLUGIN_ENDPOINT_FORMAT_JSON = + "/_cat/plugins?format=json"; +static const std::string OPENDISTRO_SQL_PLUGIN_NAME = "opendistro_sql"; +static const std::string ALLOCATION_TAG = "AWS_SIGV4_AUTH"; +static const std::string SERVICE_NAME = "es"; +static const std::string ESODBC_PROFILE_NAME = "elasticsearchodbc"; +static const std::string JSON_SCHEMA = + "{" // This was generated from the example elasticsearch data + "\"type\": \"object\"," + "\"properties\": {" + "\"schema\": {" + "\"type\": \"array\"," + "\"items\": [{" + "\"type\": \"object\"," + "\"properties\": {" + "\"name\": { \"type\": \"string\" }," + "\"type\": { \"type\": \"string\" }" + "}," + "\"required\": [ \"name\", \"type\" ]" + "}]" + "}," + "\"cursor\": { \"type\": \"string\" }," + "\"total\": { \"type\": \"integer\" }," + "\"datarows\": {" + "\"type\": \"array\"," + "\"items\": {}" + "}," + "\"size\": { \"type\": \"integer\" }," + "\"status\": { \"type\": \"integer\" }" + "}," + "\"required\": [\"schema\", \"total\", \"datarows\", \"size\", \"status\"]" + "}"; +static const std::string CURSOR_JSON_SCHEMA = + "{" // This was generated from the example elasticsearch data + "\"type\": \"object\"," + "\"properties\": {" + "\"cursor\": { \"type\": \"string\" }," + "\"datarows\": {" + "\"type\": \"array\"," + "\"items\": {}" + "}," + "\"status\": { \"type\": \"integer\" }" + "}," + "\"required\": [\"datarows\"]" + "}"; + +void ESCommunication::AwsHttpResponseToString( + std::shared_ptr< Aws::Http::HttpResponse > response, std::string& output) { + // This function has some unconventional stream operations because we need + // performance over readability here. Equivalent code done in conventional + // ways (using stringstream operators) takes ~30x longer than this code + // below and bottlenecks our query performance + + // Get streambuffer from response and set position to start + std::streambuf* stream_buffer = response->GetResponseBody().rdbuf(); + stream_buffer->pubseekpos(0); + + // Get size of streambuffer and reserver that much space in the output + size_t avail = static_cast< size_t >(stream_buffer->in_avail()); + std::vector< char > buf(avail, '\0'); + output.clear(); + output.reserve(avail); + + // Directly copy memory from buffer into our string buffer + stream_buffer->sgetn(buf.data(), avail); + output.assign(buf.data(), avail); +} + +void ESCommunication::PrepareCursorResult(ESResult& es_result) { + // Prepare document and validate result + try { + LogMsg(ES_DEBUG, "Parsing result JSON with cursor."); + es_result.es_result_doc.parse(es_result.result_json, + CURSOR_JSON_SCHEMA); + } catch (const rabbit::parse_error& e) { + // The exception rabbit gives is quite useless - providing the json + // will aid debugging for users + std::string str = "Exception obtained '" + std::string(e.what()) + + "' when parsing json string '" + + es_result.result_json + "'."; + throw std::runtime_error(str.c_str()); + } +} + +void ESCommunication::GetJsonSchema(ESResult& es_result) { + // Prepare document and validate schema + try { + LogMsg(ES_DEBUG, "Parsing result JSON with schema."); + es_result.es_result_doc.parse(es_result.result_json, JSON_SCHEMA); + } catch (const rabbit::parse_error& e) { + // The exception rabbit gives is quite useless - providing the json + // will aid debugging for users + std::string str = "Exception obtained '" + std::string(e.what()) + + "' when parsing json string '" + + es_result.result_json + "'."; + throw std::runtime_error(str.c_str()); + } +} + +ESCommunication::ESCommunication() +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wreorder" +#endif // __APPLE__ + : m_status(ConnStatusType::CONNECTION_BAD), + m_valid_connection_options(false), + m_is_retrieving(false), + m_error_message(""), + m_result_queue(2), + m_client_encoding(m_supported_client_encodings[0]) +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +{ + LogMsg(ES_ALL, "Initializing Aws API."); + Aws::InitAPI(m_options); +} + +ESCommunication::~ESCommunication() { + LogMsg(ES_ALL, "Shutting down Aws API."); + Aws::ShutdownAPI(m_options); +} + +std::string ESCommunication::GetErrorMessage() { + // TODO #35 - Check if they expect NULL or "" when there is no error. + return m_error_message; +} + +bool ESCommunication::ConnectionOptions(runtime_options& rt_opts, + bool use_defaults, int expand_dbname, + unsigned int option_count) { + (void)(expand_dbname); + (void)(option_count); + (void)(use_defaults); + m_rt_opts = rt_opts; + return CheckConnectionOptions(); +} + +bool ESCommunication::ConnectionOptions2() { + return true; +} + +bool ESCommunication::ConnectDBStart() { + LogMsg(ES_ALL, "Starting DB connection."); + m_status = ConnStatusType::CONNECTION_BAD; + if (!m_valid_connection_options) { + m_error_message = + "Invalid connection options, unable to connect to DB."; + LogMsg(ES_ERROR, m_error_message.c_str()); + DropDBConnection(); + return false; + } + + m_status = ConnStatusType::CONNECTION_NEEDED; + if (!EstablishConnection()) { + m_error_message = "Failed to establish connection to DB."; + LogMsg(ES_ERROR, m_error_message.c_str()); + DropDBConnection(); + return false; + } + + LogMsg(ES_DEBUG, "Connection established."); + m_status = ConnStatusType::CONNECTION_OK; + return true; +} + +ConnStatusType ESCommunication::GetConnectionStatus() { + return m_status; +} + +void ESCommunication::DropDBConnection() { + LogMsg(ES_ALL, "Dropping DB connection."); + if (m_http_client) { + m_http_client.reset(); + } + + m_status = ConnStatusType::CONNECTION_BAD; + StopResultRetrieval(); +} + +bool ESCommunication::CheckConnectionOptions() { + LogMsg(ES_ALL, "Verifying connection options."); + m_error_message = ""; + if (m_rt_opts.auth.auth_type != AUTHTYPE_NONE + && m_rt_opts.auth.auth_type != AUTHTYPE_IAM) { + if (m_rt_opts.auth.auth_type == AUTHTYPE_BASIC) { + if (m_rt_opts.auth.username.empty() + || m_rt_opts.auth.password.empty()) { + m_error_message = AUTHTYPE_BASIC + " authentication requires a username and password."; + } + } else { + m_error_message = "Unknown authentication type: '" + + m_rt_opts.auth.auth_type + "'"; + } + } else if (m_rt_opts.conn.server == "") { + m_error_message = "Host connection option was not specified."; + } + + if (m_error_message != "") { + LogMsg(ES_ERROR, m_error_message.c_str()); + m_valid_connection_options = false; + return false; + } else { + LogMsg(ES_DEBUG, "Required connection option are valid."); + m_valid_connection_options = true; + } + return m_valid_connection_options; +} + +void ESCommunication::InitializeConnection() { + Aws::Client::ClientConfiguration config; + config.scheme = (m_rt_opts.crypt.use_ssl ? Aws::Http::Scheme::HTTPS + : Aws::Http::Scheme::HTTP); + config.verifySSL = m_rt_opts.crypt.verify_server; + long response_timeout = + static_cast< long >(DEFAULT_RESPONSE_TIMEOUT) * 1000L; + try { + response_timeout = + std::stol(m_rt_opts.conn.timeout, nullptr, 10) * 1000L; + } catch (...) { + } + config.connectTimeoutMs = response_timeout; + config.httpRequestTimeoutMs = response_timeout; + config.requestTimeoutMs = response_timeout; + m_http_client = Aws::Http::CreateHttpClient(config); +} + +std::shared_ptr< Aws::Http::HttpResponse > ESCommunication::IssueRequest( + const std::string& endpoint, const Aws::Http::HttpMethod request_type, + const std::string& content_type, const std::string& query, + const std::string& fetch_size, const std::string& cursor) { + // Generate http request + std::shared_ptr< Aws::Http::HttpRequest > request = + Aws::Http::CreateHttpRequest( + Aws::String( + m_rt_opts.conn.server + + (m_rt_opts.conn.port.empty() ? "" : ":" + m_rt_opts.conn.port) + + endpoint), + request_type, + Aws::Utils::Stream::DefaultResponseStreamFactoryMethod); + + // Set header type + if (!content_type.empty()) + request->SetHeaderValue(Aws::Http::CONTENT_TYPE_HEADER, ctype); + + // Set body + if (!query.empty() || !cursor.empty()) { + rabbit::object body; + if (!query.empty()) { + body["query"] = query; + if (!fetch_size.empty() && fetch_size != "-1") + body["fetch_size"] = fetch_size; + } else if (!cursor.empty()) { + body["cursor"] = cursor; + } + std::shared_ptr< Aws::StringStream > aws_ss = + Aws::MakeShared< Aws::StringStream >("RabbitStream"); + *aws_ss << std::string(body.str()); + request->AddContentBody(aws_ss); + request->SetContentLength(std::to_string(body.str().size())); + } + + // Handle authentication + if (m_rt_opts.auth.auth_type == AUTHTYPE_BASIC) { + std::string userpw_str = + m_rt_opts.auth.username + ":" + m_rt_opts.auth.password; + Aws::Utils::Array< unsigned char > userpw_arr( + reinterpret_cast< const unsigned char* >(userpw_str.c_str()), + userpw_str.length()); + std::string hashed_userpw = + Aws::Utils::HashingUtils::Base64Encode(userpw_arr); + request->SetAuthorization("Basic " + hashed_userpw); + } else if (m_rt_opts.auth.auth_type == AUTHTYPE_IAM) { + std::shared_ptr< Aws::Auth::ProfileConfigFileAWSCredentialsProvider > + credential_provider = + Aws::MakeShared< Aws::Auth::ProfileConfigFileAWSCredentialsProvider >( + ALLOCATION_TAG.c_str(), ESODBC_PROFILE_NAME.c_str()); + Aws::Client::AWSAuthV4Signer signer(credential_provider, + SERVICE_NAME.c_str(), + m_rt_opts.auth.region.c_str()); + signer.SignRequest(*request); + } + + // Issue request and return response + return m_http_client->MakeRequest(request); +} + +bool ESCommunication::IsSQLPluginInstalled(const std::string& plugin_response) { + try { + rabbit::document doc; + doc.parse(plugin_response); + + rabbit::array plugin_array = doc; + for (auto it : plugin_array) { + if (it.has("component") && it.has("version")) { + std::string plugin_name = it.at("component").as_string(); + if (!plugin_name.compare(OPENDISTRO_SQL_PLUGIN_NAME)) { + std::string sql_plugin_version = + it.at("version").as_string(); + LogMsg(ES_ERROR, std::string("Found SQL plugin version '" + + sql_plugin_version + "'.") + .c_str()); + return true; + } + } else { + m_error_message = + "Could not find all necessary fields in the plugin " + "response object. " + "(\"component\", \"version\")"; + throw std::runtime_error(m_error_message.c_str()); + } + } + } catch (const rabbit::type_mismatch& e) { + m_error_message = + "Error parsing endpoint response: " + std::string(e.what()); + } catch (const rabbit::parse_error& e) { + m_error_message = + "Error parsing endpoint response: " + std::string(e.what()); + } catch (const std::exception& e) { + m_error_message = + "Error parsing endpoint response: " + std::string(e.what()); + } catch (...) { + m_error_message = + "Unknown exception thrown when parsing plugin endpoint response."; + } + + LogMsg(ES_ERROR, m_error_message.c_str()); + return false; +} + +bool ESCommunication::EstablishConnection() { + // Generate HttpClient Connection class if it does not exist + LogMsg(ES_ALL, "Attempting to establish DB connection."); + if (!m_http_client) { + InitializeConnection(); + } + + // Check whether SQL plugin has been installed on the Elasticsearch server. + // This is required for executing driver queries with the server. + LogMsg(ES_ALL, "Checking for SQL plugin"); + std::shared_ptr< Aws::Http::HttpResponse > response = + IssueRequest(PLUGIN_ENDPOINT_FORMAT_JSON, + Aws::Http::HttpMethod::HTTP_GET, "", "", ""); + if (response == nullptr) { + m_error_message = + "The SQL plugin must be installed in order to use this driver. " + "Received NULL response."; + } else { + AwsHttpResponseToString(response, m_response_str); + if (response->GetResponseCode() != Aws::Http::HttpResponseCode::OK) { + m_error_message = + "The SQL plugin must be installed in order to use this driver."; + if (response->HasClientError()) + m_error_message += " Client error: '" + + response->GetClientErrorMessage() + "'."; + if (!m_response_str.empty()) + m_error_message += " Response error: '" + m_response_str + "'."; + } else { + if (IsSQLPluginInstalled(m_response_str)) { + return true; + } else { + m_error_message = + "The SQL plugin must be installed in order to use this " + "driver. Response body: '" + + m_response_str + "'"; + } + } + } + LogMsg(ES_ERROR, m_error_message.c_str()); + return false; +} + +int ESCommunication::ExecDirect(const char* query, const char* fetch_size_) { + if (!query) { + m_error_message = "Query is NULL"; + LogMsg(ES_ERROR, m_error_message.c_str()); + return -1; + } else if (!m_http_client) { + m_error_message = "Unable to connect. Please try connecting again."; + LogMsg(ES_ERROR, m_error_message.c_str()); + return -1; + } + + // Prepare statement + std::string statement(query); + std::string fetch_size(fetch_size_); + std::string msg = "Attempting to execute a query \"" + statement + "\""; + LogMsg(ES_DEBUG, msg.c_str()); + + // Issue request + std::shared_ptr< Aws::Http::HttpResponse > response = + IssueRequest(SQL_ENDPOINT_FORMAT_JDBC, Aws::Http::HttpMethod::HTTP_POST, + ctype, statement, fetch_size); + + // Validate response + if (response == nullptr) { + m_error_message = + "Failed to receive response from query. " + "Received NULL response."; + LogMsg(ES_ERROR, m_error_message.c_str()); + return -1; + } + + // Convert body from Aws IOStream to string + std::unique_ptr< ESResult > result = std::make_unique< ESResult >(); + AwsHttpResponseToString(response, result->result_json); + + // If response was not valid, set error + if (response->GetResponseCode() != Aws::Http::HttpResponseCode::OK) { + m_error_message = + "Http response code was not OK. Code received: " + + std::to_string(static_cast< long >(response->GetResponseCode())) + + "."; + if (response->HasClientError()) + m_error_message += + " Client error: '" + response->GetClientErrorMessage() + "'."; + if (!result->result_json.empty()) { + m_error_message += + " Response error: '" + result->result_json + "'."; + } + LogMsg(ES_ERROR, m_error_message.c_str()); + return -1; + } + + // Add to result queue and return + try { + ConstructESResult(*result); + } catch (std::runtime_error& e) { + m_error_message = "Received runtime exception: " + std::string(e.what()); + if (!result->result_json.empty()) { + m_error_message += " Result body: " + result->result_json; + } + LogMsg(ES_ERROR, m_error_message.c_str()); + return -1; + } + + const std::string cursor = result->cursor; + while (!m_result_queue.push(QUEUE_TIMEOUT, result.get())) { + if (ConnStatusType::CONNECTION_OK == m_status) { + return -1; + } + } + + result.release(); + + if (!cursor.empty()) { + // If the response has a cursor, this thread will retrieve more result pages asynchronously. + std::thread([&, cursor]() { + SendCursorQueries(cursor); + }).detach(); + } + + return 0; +} + +void ESCommunication::SendCursorQueries(std::string cursor) { + if (cursor.empty()) { + return; + } + m_is_retrieving = true; + + try { + while (!cursor.empty() && m_is_retrieving) { + std::shared_ptr< Aws::Http::HttpResponse > response = IssueRequest( + SQL_ENDPOINT_FORMAT_JDBC, Aws::Http::HttpMethod::HTTP_POST, + ctype, "", "", cursor); + if (response == nullptr) { + m_error_message = + "Failed to receive response from cursor. " + "Received NULL response."; + LogMsg(ES_ERROR, m_error_message.c_str()); + return; + } + + std::unique_ptr result = std::make_unique(); + AwsHttpResponseToString(response, result->result_json); + PrepareCursorResult(*result); + + if (result->es_result_doc.has("cursor")) { + cursor = result->es_result_doc["cursor"].as_string(); + result->cursor = result->es_result_doc["cursor"].as_string(); + } else { + SendCloseCursorRequest(cursor); + cursor.clear(); + } + + while (m_is_retrieving + && !m_result_queue.push(QUEUE_TIMEOUT, result.get())) { + } + + // Don't release when attempting to push to the queue as it may take multiple tries. + result.release(); + } + } catch (std::runtime_error& e) { + m_error_message = + "Received runtime exception: " + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } + + if (!m_is_retrieving) { + m_result_queue.clear(); + } else { + m_is_retrieving = false; + } +} + +void ESCommunication::SendCloseCursorRequest(const std::string& cursor) { + std::shared_ptr< Aws::Http::HttpResponse > response = + IssueRequest(SQL_ENDPOINT_CLOSE_CURSOR, + Aws::Http::HttpMethod::HTTP_POST, ctype, "", "", cursor); + if (response == nullptr) { + m_error_message = + "Failed to receive response from cursor. " + "Received NULL response."; + LogMsg(ES_ERROR, m_error_message.c_str()); + } +} + +void ESCommunication::StopResultRetrieval() { + m_is_retrieving = false; + m_result_queue.clear(); +} + +void ESCommunication::ConstructESResult(ESResult& result) { + GetJsonSchema(result); + rabbit::array schema_array = result.es_result_doc["schema"]; + for (rabbit::array::iterator it = schema_array.begin(); + it != schema_array.end(); ++it) { + std::string column_name = it->at("name").as_string(); + + ColumnInfo col_info; + col_info.field_name = column_name; + col_info.type_oid = KEYWORD_TYPE_OID; + col_info.type_size = KEYWORD_TYPE_SIZE; + col_info.display_size = KEYWORD_DISPLAY_SIZE; + col_info.length_of_str = KEYWORD_TYPE_SIZE; + col_info.relation_id = 0; + col_info.attribute_number = 0; + + result.column_info.push_back(col_info); + } + if (result.es_result_doc.has("cursor")) { + result.cursor = result.es_result_doc["cursor"].as_string(); + } + result.command_type = "SELECT"; + result.num_fields = (uint16_t)schema_array.size(); +} + +inline void ESCommunication::LogMsg(ESLogLevel level, const char* msg) { +#if WIN32 +#pragma warning(push) +#pragma warning(disable : 4551) +#endif // WIN32 + // cppcheck outputs an erroneous missing argument error which breaks build. + // Disable for this function call + MYLOG(level, "%s\n", msg); +#if WIN32 +#pragma warning(pop) +#endif // WIN32 +} + +ESResult* ESCommunication::PopResult() { + ESResult* result = NULL; + while (!m_result_queue.pop(QUEUE_TIMEOUT, result) && m_is_retrieving) { + } + + return result; +} + +// TODO #36 - Send query to database to get encoding +std::string ESCommunication::GetClientEncoding() { + return m_client_encoding; +} + +// TODO #36 - Send query to database to set encoding +bool ESCommunication::SetClientEncoding(std::string& encoding) { + if (std::find(m_supported_client_encodings.begin(), + m_supported_client_encodings.end(), encoding) + != m_supported_client_encodings.end()) { + m_client_encoding = encoding; + return true; + } + LogMsg(ES_ERROR, + std::string("Failed to find encoding " + encoding).c_str()); + return false; +} + +std::string ESCommunication::GetServerVersion() { + if (!m_http_client) { + InitializeConnection(); + } + + // Issue request + std::shared_ptr< Aws::Http::HttpResponse > response = + IssueRequest("", Aws::Http::HttpMethod::HTTP_GET, "", "", ""); + if (response == nullptr) { + m_error_message = + "Failed to receive response from query. " + "Received NULL response."; + LogMsg(ES_ERROR, m_error_message.c_str()); + return ""; + } + + // Parse server version + if (response->GetResponseCode() == Aws::Http::HttpResponseCode::OK) { + try { + AwsHttpResponseToString(response, m_response_str); + rabbit::document doc; + doc.parse(m_response_str); + if (doc.has("version") && doc["version"].has("number")) { + return doc["version"]["number"].as_string(); + } + + } catch (const rabbit::type_mismatch& e) { + m_error_message = "Error parsing main endpoint response: " + + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } catch (const rabbit::parse_error& e) { + m_error_message = "Error parsing main endpoint response: " + + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } catch (const std::exception& e) { + m_error_message = "Error parsing main endpoint response: " + + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } catch (...) { + LogMsg(ES_ERROR, + "Unknown exception thrown when parsing main endpoint " + "response."); + } + } + LogMsg(ES_ERROR, m_error_message.c_str()); + return ""; +} + +std::string ESCommunication::GetClusterName() { + if (!m_http_client) { + InitializeConnection(); + } + + // Issue request + std::shared_ptr< Aws::Http::HttpResponse > response = + IssueRequest("", Aws::Http::HttpMethod::HTTP_GET, "", "", ""); + if (response == nullptr) { + m_error_message = + "Failed to receive response from query. " + "Received NULL response."; + LogMsg(ES_ERROR, m_error_message.c_str()); + return ""; + } + + // Parse cluster name + if (response->GetResponseCode() == Aws::Http::HttpResponseCode::OK) { + try { + AwsHttpResponseToString(response, m_response_str); + rabbit::document doc; + doc.parse(m_response_str); + if (doc.has("cluster_name")) { + return doc["cluster_name"].as_string(); + } + + } catch (const rabbit::type_mismatch& e) { + m_error_message = "Error parsing main endpoint response: " + + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } catch (const rabbit::parse_error& e) { + m_error_message = "Error parsing main endpoint response: " + + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } catch (const std::exception& e) { + m_error_message = "Error parsing main endpoint response: " + + std::string(e.what()); + LogMsg(ES_ERROR, m_error_message.c_str()); + } catch (...) { + LogMsg(ES_ERROR, + "Unknown exception thrown when parsing main endpoint " + "response."); + } + } + LogMsg(ES_ERROR, m_error_message.c_str()); + return ""; +} diff --git a/sql-odbc/src/odfesqlodbc/es_communication.h b/sql-odbc/src/odfesqlodbc/es_communication.h new file mode 100644 index 0000000000..38ca25b1de --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_communication.h @@ -0,0 +1,101 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef ES_COMMUNICATION +#define ES_COMMUNICATION + +// clang-format off +#include +#include +#include +#include "es_types.h" +#include "es_result_queue.h" + +//Keep rabbit at top otherwise it gives build error because of some variable names like max, min +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif // __APPLE__ +#include "rabbit.hpp" +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +#include +#include +#include +#include +#include +#include +#include +#include +// clang-format on + +class ESCommunication { + public: + ESCommunication(); + ~ESCommunication(); + + // Create function for factory + std::string GetErrorMessage(); + bool ConnectionOptions(runtime_options& rt_opts, bool use_defaults, + int expand_dbname, unsigned int option_count); + bool ConnectionOptions2(); + bool ConnectDBStart(); + ConnStatusType GetConnectionStatus(); + void DropDBConnection(); + void LogMsg(ESLogLevel level, const char* msg); + int ExecDirect(const char* query, const char* fetch_size_); + void SendCursorQueries(std::string cursor); + ESResult* PopResult(); + std::string GetClientEncoding(); + bool SetClientEncoding(std::string& encoding); + bool IsSQLPluginInstalled(const std::string& plugin_response); + std::string GetServerVersion(); + std::string GetClusterName(); + std::shared_ptr< Aws::Http::HttpResponse > IssueRequest( + const std::string& endpoint, const Aws::Http::HttpMethod request_type, + const std::string& content_type, const std::string& query, + const std::string& fetch_size = "", const std::string& cursor = ""); + void AwsHttpResponseToString( + std::shared_ptr< Aws::Http::HttpResponse > response, + std::string& output); + void SendCloseCursorRequest(const std::string& cursor); + void StopResultRetrieval(); + + private: + void InitializeConnection(); + bool CheckConnectionOptions(); + bool EstablishConnection(); + void ConstructESResult(ESResult& result); + void GetJsonSchema(ESResult& es_result); + void PrepareCursorResult(ESResult& es_result); + + // TODO #35 - Go through and add error messages on exit conditions + std::string m_error_message; + const std::vector< std::string > m_supported_client_encodings = {"UTF8"}; + + ConnStatusType m_status; + bool m_valid_connection_options; + bool m_is_retrieving; + ESResultQueue m_result_queue; + runtime_options m_rt_opts; + std::string m_client_encoding; + Aws::SDKOptions m_options; + std::string m_response_str; + std::shared_ptr< Aws::Http::HttpClient > m_http_client; +}; + +#endif diff --git a/sql-odbc/src/odfesqlodbc/es_connection.cpp b/sql-odbc/src/odfesqlodbc/es_connection.cpp new file mode 100644 index 0000000000..5699fb0a10 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_connection.cpp @@ -0,0 +1,210 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +/* TryEnterCriticalSection needs the following #define */ +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0400 +#endif /* _WIN32_WINNT */ + +#include "es_connection.h" + +#include +#include +#include + +#include "misc.h" + +/* for htonl */ +#ifdef WIN32 +#include +#else +#include +#endif + +#include +#include + +#include "dlg_specific.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_helper.h" +#include "loadlib.h" +#include "multibyte.h" +#include "qresult.h" +#include "statement.h" + +#define PROTOCOL3_OPTS_MAX 30 +#define ERROR_BUFF_SIZE 200 +#define OPTION_COUNT 4 +#if OPTION_COUNT > PROTOCOL3_OPTS_MAX +#error("Option count (OPTION_COUNT) is greater than max option count allow (PROTOCOL3_OPTS_MAX).") +#endif + +void CC_determine_locale_encoding(ConnectionClass *self); + +char CC_connect(ConnectionClass *self) { + if (self == NULL) + return 0; + + // Attempt to connect to ES + int conn_code = LIBES_connect(self); + if (conn_code <= 0) + return static_cast< char >(conn_code); + + // Set encodings + CC_determine_locale_encoding(self); +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(self)) { + if (!SQL_SUCCEEDED(CC_send_client_encoding(self, "UTF8"))) { + return 0; + } + } else +#endif + { + if (!SQL_SUCCEEDED( + CC_send_client_encoding(self, self->locale_encoding))) { + return 0; + } + } + + // Set cursor parameters based on connection info + self->status = CONN_CONNECTED; + if ((CC_is_in_unicode_driver(self)) && (CC_is_in_ansi_app(self))) + self->unicode |= CONN_DISALLOW_WCHAR; + + // 1 is SQL_SUCCESS and 2 is SQL_SCCUESS_WITH_INFO + return 1; +} + +int LIBES_connect(ConnectionClass *self) { + if (self == NULL) + return 0; + + // Setup options + runtime_options rt_opts; + + // Connection + rt_opts.conn.server.assign(self->connInfo.server); + rt_opts.conn.port.assign(self->connInfo.port); + rt_opts.conn.timeout.assign(self->connInfo.response_timeout); + + // Authentication + rt_opts.auth.auth_type.assign(self->connInfo.authtype); + rt_opts.auth.username.assign(self->connInfo.username); + rt_opts.auth.password.assign(SAFE_NAME(self->connInfo.password)); + rt_opts.auth.region.assign(self->connInfo.region); + + // Encryption + rt_opts.crypt.verify_server = (self->connInfo.verify_server == 1); + rt_opts.crypt.use_ssl = (self->connInfo.use_ssl == 1); + + void *esconn = ESConnectDBParams(rt_opts, FALSE, OPTION_COUNT); + if (esconn == NULL) { + std::string err = GetErrorMsg(esconn); + CC_set_error(self, CONN_OPENDB_ERROR, + (err.empty()) ? "ESConnectDBParams error" : err.c_str(), + "LIBES_connect"); + return 0; + } + + // Check connection status + if (ESStatus(esconn) != CONNECTION_OK) { + std::string msg = GetErrorMsg(esconn); + char error_message_out[ERROR_BUFF_SIZE] = ""; + if (!msg.empty()) + SPRINTF_FIXED( + error_message_out, + "elasticsearch connection status was not CONNECTION_OK: %s", + msg.c_str()); + else + STRCPY_FIXED(error_message_out, + "elasticsearch connection status was not " + "CONNECTION_OK. No error message " + "available."); + CC_set_error(self, CONN_OPENDB_ERROR, error_message_out, + "LIBES_connect"); + ESDisconnect(esconn); + return 0; + } + + // Set server version + std::string server_version = GetServerVersion(esconn); + STRCPY_FIXED(self->es_version, server_version.c_str()); + + std::string cluster_name = GetClusterName(esconn); + STRCPY_FIXED(self->cluster_name, cluster_name.c_str()); + + self->esconn = (void *)esconn; + return 1; +} + +// TODO #36 - When we fix encoding, we should look into returning a code here. This +// is called in connection.c and the return code isn't checked +void CC_set_locale_encoding(ConnectionClass *self, const char *encoding) { + if (self == NULL) + return; + + // Set encoding + char *prev_encoding = self->locale_encoding; + self->locale_encoding = (encoding == NULL) ? NULL : strdup(encoding); + if (prev_encoding) + free(prev_encoding); +} + +// TODO #36 - Add return code - see above function comment +void CC_determine_locale_encoding(ConnectionClass *self) { + // Don't update if it's already set + if ((self == NULL) || (self->locale_encoding != NULL)) + return; + + // Get current db encoding and derive the locale encoding + // TODO #34 - Investigate locale + CC_set_locale_encoding(self, "SQL_ASCII"); +} + +int CC_send_client_encoding(ConnectionClass *self, const char *encoding) { + if ((self == NULL) || (encoding == NULL)) + return SQL_ERROR; + + // Update client encoding + std::string des_db_encoding(encoding); + std::string cur_db_encoding = ESGetClientEncoding(self->esconn); + if (des_db_encoding != cur_db_encoding) { + if (!ESSetClientEncoding(self->esconn, des_db_encoding)) { + return SQL_ERROR; + } + } + + // Update connection class to reflect updated client encoding + char *prev_encoding = self->original_client_encoding; + self->original_client_encoding = strdup(des_db_encoding.c_str()); + self->ccsc = static_cast< short >(es_CS_code(des_db_encoding.c_str())); + self->mb_maxbyte_per_char = static_cast< short >(es_mb_maxlen(self->ccsc)); + if (prev_encoding != NULL) + free(prev_encoding); + + return SQL_SUCCESS; +} + +void CC_initialize_es_version(ConnectionClass *self) { + STRCPY_FIXED(self->es_version, "7.4"); + self->es_version_major = 7; + self->es_version_minor = 4; +} + +void LIBES_disconnect(void *conn) { + ESDisconnect(conn); +} diff --git a/sql-odbc/src/odfesqlodbc/es_connection.h b/sql-odbc/src/odfesqlodbc/es_connection.h new file mode 100644 index 0000000000..b523a126f7 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_connection.h @@ -0,0 +1,472 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ESCONNECTION_H__ +#define __ESCONNECTION_H__ + +#include +#include +#include + +#include "descriptor.h" +#include "es_helper.h" +#include "es_odbc.h" +#include "es_utility.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef enum { + CONN_NOT_CONNECTED, /* Connection has not been established */ + CONN_CONNECTED, /* Connection is up and has been established */ + CONN_DOWN, /* Connection is broken */ + CONN_EXECUTING /* the connection is currently executing a + * statement */ +} CONN_Status; + +/* These errors have general sql error state */ +#define CONNECTION_SERVER_NOT_REACHED 101 +#define CONNECTION_MSG_TOO_LONG 103 +#define CONNECTION_COULD_NOT_SEND 104 +#define CONNECTION_NO_SUCH_DATABASE 105 +#define CONNECTION_BACKEND_CRAZY 106 +#define CONNECTION_NO_RESPONSE 107 +#define CONNECTION_SERVER_REPORTED_SEVERITY_FATAL 108 +#define CONNECTION_COULD_NOT_RECEIVE 109 +#define CONNECTION_SERVER_REPORTED_SEVERITY_ERROR 110 +#define CONNECTION_NEED_PASSWORD 112 +#define CONNECTION_COMMUNICATION_ERROR 113 + +#define CONN_ERROR_IGNORED (-3) +#define CONN_TRUNCATED (-2) +#define CONN_OPTION_VALUE_CHANGED (-1) +/* These errors correspond to specific SQL states */ +#define CONN_INIREAD_ERROR 201 +#define CONN_OPENDB_ERROR 202 +#define CONN_STMT_ALLOC_ERROR 203 +#define CONN_IN_USE 204 +#define CONN_UNSUPPORTED_OPTION 205 +/* Used by SetConnectoption to indicate unsupported options */ +#define CONN_INVALID_ARGUMENT_NO 206 +/* SetConnectOption: corresponds to ODBC--"S1009" */ +#define CONN_TRANSACT_IN_PROGRES 207 +#define CONN_NO_MEMORY_ERROR 208 +#define CONN_NOT_IMPLEMENTED_ERROR 209 +#define CONN_INVALID_AUTHENTICATION 210 +#define CONN_AUTH_TYPE_UNSUPPORTED 211 +#define CONN_UNABLE_TO_LOAD_DLL 212 +#define CONN_ILLEGAL_TRANSACT_STATE 213 +#define CONN_VALUE_OUT_OF_RANGE 214 + +#define CONN_OPTION_NOT_FOR_THE_DRIVER 216 +#define CONN_EXEC_ERROR 217 + +/* Conn_status defines */ +#define CONN_IN_AUTOCOMMIT 1L +#define CONN_IN_TRANSACTION (1L << 1) +#define CONN_IN_MANUAL_TRANSACTION (1L << 2) +#define CONN_IN_ERROR_BEFORE_IDLE (1L << 3) + +/* not connected yet || already disconnected */ +#define CC_not_connected(x) \ + (!(x) || CONN_DOWN == (x)->status || CONN_NOT_CONNECTED == (x)->status) + +/* AutoCommit functions */ +#define CC_is_in_autocommit(x) (x->transact_status & CONN_IN_AUTOCOMMIT) +#define CC_does_autocommit(x) \ + (CONN_IN_AUTOCOMMIT \ + == ((x)->transact_status \ + & (CONN_IN_AUTOCOMMIT | CONN_IN_MANUAL_TRANSACTION))) +#define CC_loves_visible_trans(x) \ + ((0 == ((x)->transact_status & CONN_IN_AUTOCOMMIT)) \ + || (0 != ((x)->transact_status & CONN_IN_MANUAL_TRANSACTION))) + +/* Transaction in/not functions */ +#define CC_set_in_trans(x) (x->transact_status |= CONN_IN_TRANSACTION) +#define CC_set_no_trans(x) \ + (x->transact_status &= ~(CONN_IN_TRANSACTION | CONN_IN_ERROR_BEFORE_IDLE)) +#define CC_is_in_trans(x) (0 != (x->transact_status & CONN_IN_TRANSACTION)) + +/* Manual transaction in/not functions */ +#define CC_set_in_manual_trans(x) \ + (x->transact_status |= CONN_IN_MANUAL_TRANSACTION) +#define CC_set_no_manual_trans(x) \ + (x->transact_status &= ~CONN_IN_MANUAL_TRANSACTION) +#define CC_is_in_manual_trans(x) \ + (0 != (x->transact_status & CONN_IN_MANUAL_TRANSACTION)) + +/* Error waiting for ROLLBACK */ +#define CC_set_in_error_trans(x) \ + (x->transact_status |= CONN_IN_ERROR_BEFORE_IDLE) +#define CC_set_no_error_trans(x) \ + (x->transact_status &= ~CONN_IN_ERROR_BEFORE_IDLE) +#define CC_is_in_error_trans(x) (x->transact_status & CONN_IN_ERROR_BEFORE_IDLE) + +#define CC_get_errornumber(x) (x->__error_number) +#define CC_get_errormsg(x) (x->__error_message) +#define CC_set_errornumber(x, n) (x->__error_number = n) + +/* Unicode handling */ +#define CONN_UNICODE_DRIVER (1L) +#define CONN_ANSI_APP (1L << 1) +#define CONN_DISALLOW_WCHAR (1L << 2) +#define CC_set_in_unicode_driver(x) (x->unicode |= CONN_UNICODE_DRIVER) +#define CC_set_in_ansi_app(x) (x->unicode |= CONN_ANSI_APP) +#define CC_is_in_unicode_driver(x) (0 != (x->unicode & CONN_UNICODE_DRIVER)) +#define CC_is_in_ansi_app(x) (0 != (x->unicode & CONN_ANSI_APP)) +#define CC_is_in_global_trans(x) (NULL != (x)->asdum) +#define ALLOW_WCHAR(x) \ + (0 != (x->unicode & CONN_UNICODE_DRIVER) \ + && 0 == (x->unicode & CONN_DISALLOW_WCHAR)) + +#define CC_MALLOC_return_with_error(t, tp, s, x, m, ret) \ + do { \ + if (t = malloc(s), NULL == t) { \ + CC_set_error(x, CONN_NO_MEMORY_ERROR, m, ""); \ + return ret; \ + } \ + } while (0) +#define CC_REALLOC_return_with_error(t, tp, s, x, m, ret) \ + do { \ + tp *tmp; \ + if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ + CC_set_error(x, CONN_NO_MEMORY_ERROR, m, ""); \ + return ret; \ + } \ + t = tmp; \ + } while (0) + +/* For Multi-thread */ +#define INIT_CONN_CS(x) XPlatformInitializeCriticalSection(&((x)->cs)) +#define INIT_CONNLOCK(x) XPlatformInitializeCriticalSection(&((x)->slock)) +#define ENTER_CONN_CS(x) XPlatformEnterCriticalSection(((x)->cs)) +#define CONNLOCK_ACQUIRE(x) XPlatformEnterCriticalSection(((x)->slock)) +#define LEAVE_CONN_CS(x) XPlatformLeaveCriticalSection(((x)->cs)) +#define CONNLOCK_RELEASE(x) XPlatformLeaveCriticalSection(((x)->slock)) +#define DELETE_CONN_CS(x) XPlatformDeleteCriticalSection(&((x)->cs)) +#define DELETE_CONNLOCK(x) XPlatformDeleteCriticalSection(&((x)->slock)) + +#define ENTER_INNER_CONN_CS(conn, entered) \ + do { \ + ENTER_CONN_CS(conn); \ + entered++; \ + } while (0) + +#define LEAVE_INNER_CONN_CS(entered, conn) \ + do { \ + if (entered > 0) { \ + LEAVE_CONN_CS(conn); \ + entered--; \ + } \ + } while (0) + +#define CLEANUP_FUNC_CONN_CS(entered, conn) \ + do { \ + while (entered > 0) { \ + LEAVE_CONN_CS(conn); \ + entered--; \ + } \ + } while (0) + +/* + * Macros to compare the server's version with a specified version + * 1st parameter: pointer to a ConnectionClass object + * 2nd parameter: major version number + * 3rd parameter: minor version number + */ +#define SERVER_VERSION_GT(conn, major, minor) \ + ((conn)->es_version_major > major \ + || ((conn)->es_version_major == major \ + && (conn)->es_version_minor > minor)) +#define SERVER_VERSION_GE(conn, major, minor) \ + ((conn)->es_version_major > major \ + || ((conn)->es_version_major == major \ + && (conn)->es_version_minor >= minor)) +#define SERVER_VERSION_EQ(conn, major, minor) \ + ((conn)->es_version_major == major && (conn)->es_version_minor == minor) +#define STRING_AFTER_DOT(string) (strchr(#string, '.') + 1) + +/* + * Simplified macros to compare the server's version with a + * specified version + * Note: Never pass a variable as the second parameter. + * It must be a decimal constant of the form %d.%d . + */ +#define ES_VERSION_GT(conn, ver) \ + (SERVER_VERSION_GT(conn, (int)ver, atoi(STRING_AFTER_DOT(ver)))) +#define ES_VERSION_GE(conn, ver) \ + (SERVER_VERSION_GE(conn, (int)ver, atoi(STRING_AFTER_DOT(ver)))) +#define ES_VERSION_EQ(conn, ver) \ + (SERVER_VERSION_EQ(conn, (int)ver, atoi(STRING_AFTER_DOT(ver)))) +#define ES_VERSION_LE(conn, ver) (!ES_VERSION_GT(conn, ver)) +#define ES_VERSION_LT(conn, ver) (!ES_VERSION_GE(conn, ver)) + +/* This is used to store cached table information in the connection */ +struct col_info { + Int2 refcnt; + QResultClass *result; + esNAME schema_name; + esNAME table_name; + OID table_oid; + int table_info; + time_t acc_time; +}; +enum { TBINFO_HASOIDS = 1L, TBINFO_HASSUBCLASS = (1L << 1) }; +#define free_col_info_contents(coli) \ + { \ + if (NULL != coli->result) \ + QR_Destructor(coli->result); \ + coli->result = NULL; \ + NULL_THE_NAME(coli->schema_name); \ + NULL_THE_NAME(coli->table_name); \ + coli->table_oid = 0; \ + coli->refcnt = 0; \ + coli->acc_time = 0; \ + } +#define col_info_initialize(coli) (memset(coli, 0, sizeof(COL_INFO))) + +/* Translation DLL entry points */ +#ifdef WIN32 +#define DLLHANDLE HINSTANCE +#else +#define WINAPI CALLBACK +#define DLLHANDLE void * +#define HINSTANCE void * +#endif + +typedef BOOL(WINAPI *DataSourceToDriverProc)(UDWORD, SWORD, PTR, SDWORD, PTR, + SDWORD, SDWORD *, UCHAR *, SWORD, + SWORD *); +typedef BOOL(WINAPI *DriverToDataSourceProc)(UDWORD, SWORD, PTR, SDWORD, PTR, + SDWORD, SDWORD *, UCHAR *, SWORD, + SWORD *); + +/******* The Connection handle ************/ +struct ConnectionClass_ { + HENV henv; /* environment this connection was + * created on */ + SQLUINTEGER login_timeout; + signed char autocommit_public; + StatementOptions stmtOptions; + ARDFields ardOptions; + APDFields apdOptions; + char *__error_message; + int __error_number; + char sqlstate[8]; + CONN_Status status; + ConnInfo connInfo; + StatementClass **stmts; + Int2 num_stmts; + Int2 ncursors; + void *esconn; + Int4 lobj_type; + Int2 coli_allocated; + Int2 ntables; + COL_INFO **col_info; + long translation_option; + HINSTANCE translation_handle; + DataSourceToDriverProc DataSourceToDriver; + DriverToDataSourceProc DriverToDataSource; + char transact_status; /* Is a transaction is currently + * in progress */ + char cluster_name[MAX_INFO_STRING]; + char es_version[MAX_INFO_STRING]; /* Version of Elasticsearch driver + * we're connected to - + * DJP 25-1-2001 */ + Int2 es_version_major; + Int2 es_version_minor; + char ms_jet; + char unicode; + char result_uncommitted; + char lo_is_domain; + char current_schema_valid; /* is current_schema valid? TRUE when + * current_schema == NULL means it's + * really NULL, while FALSE means it's + * unknown */ + unsigned char on_commit_in_progress; + /* for per statement rollback */ + char internal_svp; /* is set? */ + char internal_op; /* operation being executed as to internal savepoint */ + unsigned char rbonerr; + unsigned char opt_in_progress; + unsigned char opt_previous; + + char *original_client_encoding; + char *locale_encoding; + char *server_encoding; + Int2 ccsc; + Int2 mb_maxbyte_per_char; + SQLUINTEGER isolation; /* isolation level initially unknown */ + SQLUINTEGER server_isolation; /* isolation at server initially unknown */ + char *current_schema; + StatementClass *unnamed_prepared_stmt; + Int2 max_identifier_length; + Int2 num_discardp; + char **discardp; + int num_descs; + SQLUINTEGER + default_isolation; /* server's default isolation initially unkown */ + DescriptorClass **descs; + esNAME schemaIns; + esNAME tableIns; + SQLULEN stmt_timeout_in_effect; + void *cs; + void *slock; +#ifdef _HANDLE_ENLIST_IN_DTC_ + UInt4 gTranInfo; + void *asdum; +#endif /* _HANDLE_ENLIST_IN_DTC_ */ +}; + +/* Accessor functions */ +#define CC_get_env(x) ((x)->henv) +#define CC_get_database(x) (x->connInfo.database) +#define CC_get_server(x) (x->connInfo.server) +#define CC_get_DSN(x) (x->connInfo.dsn) +#define CC_get_username(x) (x->connInfo.username) +#define CC_is_onlyread(x) (x->connInfo.onlyread[0] == '1') +#define CC_fake_mss(x) (/* 0 != (x)->ms_jet && */ 0 < (x)->connInfo.fake_mss) +#define CC_accessible_only(x) (0 < (x)->connInfo.accessible_only) +#define CC_default_is_c(x) \ + (CC_is_in_ansi_app(x) \ + || x->ms_jet /* not only */ || TRUE /* but for any other ? */) + +#ifdef _HANDLE_ENLIST_IN_DTC_ +enum { + DTC_IN_PROGRESS = 1L, + DTC_ENLISTED = (1L << 1), + DTC_REQUEST_EXECUTING = (1L << 2), + DTC_ISOLATED = (1L << 3), + DTC_PREPARE_REQUESTED = (1L << 4) +}; +#define CC_set_dtc_clear(x) ((x)->gTranInfo = 0) +#define CC_set_dtc_enlisted(x) \ + ((x)->gTranInfo |= (DTC_IN_PROGRESS | DTC_ENLISTED)) +#define CC_no_dtc_enlisted(x) ((x)->gTranInfo &= (~DTC_ENLISTED)) +#define CC_is_dtc_enlisted(x) (0 != ((x)->gTranInfo & DTC_ENLISTED)) +#define CC_set_dtc_executing(x) ((x)->gTranInfo |= DTC_REQUEST_EXECUTING) +#define CC_no_dtc_executing(x) ((x)->gTranInfo &= (~DTC_REQUEST_EXECUTING)) +#define CC_is_dtc_executing(x) (0 != ((x)->gTranInfo & DTC_REQUEST_EXECUTING)) +#define CC_set_dtc_prepareRequested(x) \ + ((x)->gTranInfo |= (DTC_PREPARE_REQUESTED)) +#define CC_no_dtc_prepareRequested(x) \ + ((x)->gTranInfo &= (~DTC_PREPARE_REQUESTED)) +#define CC_is_dtc_prepareRequested(x) \ + (0 != ((x)->gTranInfo & DTC_PREPARE_REQUESTED)) +#define CC_is_dtc_executing(x) (0 != ((x)->gTranInfo & DTC_REQUEST_EXECUTING)) +#define CC_set_dtc_isolated(x) ((x)->gTranInfo |= DTC_ISOLATED) +#define CC_is_idle_in_global_transaction(x) \ + (0 != ((x)->gTranInfo & DTC_PREPARE_REQUESTED) \ + || (x)->gTranInfo == DTC_IN_PROGRESS) +#endif /* _HANDLE_ENLIST_IN_DTC_ */ +/* statement callback */ +#define CC_start_stmt(a) ((a)->rbonerr = 0) +#define CC_start_tc_stmt(a) ((a)->rbonerr = (1L << 1)) +#define CC_is_tc_stmt(a) (((a)->rbonerr & (1L << 1)) != 0) +#define CC_start_rb_stmt(a) ((a)->rbonerr = (1L << 2)) +#define CC_is_rb_stmt(a) (((a)->rbonerr & (1L << 2)) != 0) +#define CC_set_accessed_db(a) ((a)->rbonerr |= (1L << 3)) +#define CC_accessed_db(a) (((a)->rbonerr & (1L << 3)) != 0) +#define CC_start_rbpoint(a) ((a)->rbonerr |= (1L << 4), (a)->internal_svp = 1) +#define CC_started_rbpoint(a) (((a)->rbonerr & (1L << 4)) != 0) + +/* prototypes */ +ConnectionClass *CC_Constructor(void); +char CC_Destructor(ConnectionClass *self); +RETCODE CC_cleanup(ConnectionClass *self, BOOL keepCommunication); +BOOL CC_set_autocommit(ConnectionClass *self, BOOL on); +char CC_add_statement(ConnectionClass *self, StatementClass *stmt); +char CC_remove_statement(ConnectionClass *self, StatementClass *stmt); +char CC_add_descriptor(ConnectionClass *self, DescriptorClass *desc); +void CC_set_error(ConnectionClass *self, int number, const char *message, + const char *func); +void CC_set_errormsg(ConnectionClass *self, const char *message); +int CC_get_error(ConnectionClass *self, int *number, char **message); +void CC_clear_error(ConnectionClass *self); +void CC_log_error(const char *func, const char *desc, + const ConnectionClass *self); + +int CC_get_max_idlen(ConnectionClass *self); +char CC_get_escape(const ConnectionClass *self); +char *identifierEscape(const SQLCHAR *src, SQLLEN srclen, + const ConnectionClass *conn, char *buf, size_t bufsize, + BOOL double_quote); +int findIdentifier(const UCHAR *str, int ccsc, const UCHAR **next_token); +int eatTableIdentifiers(const UCHAR *str, int ccsc, esNAME *table, + esNAME *schema); + +char CC_connect(ConnectionClass *self); +int LIBES_connect(ConnectionClass *self); +void LIBES_disconnect(void *conn); +int CC_send_client_encoding(ConnectionClass *self, const char *encoding); +void CC_set_locale_encoding(ConnectionClass *self, const char *encoding); +void CC_initialize_es_version(ConnectionClass *self); + +const char *CurrCat(const ConnectionClass *self); +const char *CurrCatString(const ConnectionClass *self); +SQLUINTEGER CC_get_isolation(ConnectionClass *self); + +SQLCHAR *make_lstring_ifneeded(ConnectionClass *, const SQLCHAR *s, ssize_t len, + BOOL); + +#define TABLE_IS_VALID(tbname, tblen) \ + ((tbname) && (tblen > 0 || SQL_NTS == tblen)) + +/* CC_send_query options */ +enum { + IGNORE_ABORT_ON_CONN = 1L /* not set the error result even when */ + , + CREATE_KEYSET = (1L << 1) /* create keyset for updatable cursors */ + , + GO_INTO_TRANSACTION = (1L << 2) /* issue BEGIN in advance */ + , + ROLLBACK_ON_ERROR = (1L << 3) /* rollback the query when an error occurs */ + , + END_WITH_COMMIT = (1L << 4) /* the query ends with COMMIT command */ + , + READ_ONLY_QUERY = (1L << 5) /* the query is read-only */ +}; +/* CC_on_abort options */ +#define NO_TRANS 1L +#define CONN_DEAD (1L << 1) /* connection is no longer valid */ + +/* + * internal savepoint related + */ + +#define _RELEASE_INTERNAL_SAVEPOINT + +/* Internal rollback */ +enum { PER_STATEMENT_ROLLBACK = 1, PER_QUERY_ROLLBACK }; + +/* Commands generated */ +enum { INTERNAL_SAVEPOINT_OPERATION = 1, INTERNAL_ROLLBACK_OPERATION }; + +/* Operations in progress */ +enum { SAVEPOINT_IN_PROGRESS = 1, PREPEND_IN_PROGRESS }; +/* StatementSvp entry option */ +enum { SVPOPT_RDONLY = 1L, SVPOPT_REDUCE_ROUNDTRIP = (1L << 1) }; +#define INIT_SVPOPT (SVPOPT_RDONLY) +#define CC_svp_init(a) \ + ((a)->internal_svp = (a)->internal_op = 0, \ + (a)->opt_in_progress = (a)->opt_previous = INIT_SVPOPT) +#define CC_init_opt_in_progress(a) ((a)->opt_in_progress = INIT_SVPOPT) +#define CC_init_opt_previous(a) ((a)->opt_previous = INIT_SVPOPT) + +#ifdef __cplusplus +} +#endif +#endif /* __ESCONNECTION_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/es_driver_connect.cpp b/sql-odbc/src/odfesqlodbc/es_driver_connect.cpp new file mode 100644 index 0000000000..cb91984f7c --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_driver_connect.cpp @@ -0,0 +1,262 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_driver_connect.h" + +#include +#include + +#include "es_odbc.h" +#include "misc.h" + +#ifndef WIN32 +#include +#include +#else +#include +#endif + +#include + +#ifdef WIN32 +#include + +#include "resource.h" +#endif +#include +#include + +#include "dlg_specific.h" +#include "drvconn.h" +#include "es_apifunc.h" + +static RETCODE CheckDriverComplete(const SQLUSMALLINT driver_completion, + const HWND hwnd, ConnInfo *ci, + const int reqs) { + (void)(ci); + (void)(reqs); + if (hwnd == NULL) + return SQL_SUCCESS; + switch (driver_completion) { +#ifdef WIN32 + case SQL_DRIVER_COMPLETE_REQUIRED: + case SQL_DRIVER_COMPLETE: + if (!paramRequired(ci, reqs)) + break; + case SQL_DRIVER_PROMPT: { + const RETCODE dialog_result = dconn_DoDialog(hwnd, ci); + if (dialog_result != SQL_SUCCESS) + return dialog_result; + break; + } +#endif // WIN32 + default: + break; + } + return SQL_SUCCESS; +} + +static RETCODE GetRequirementsAndConnect(const SQLUSMALLINT driver_completion, + const HWND hwnd, ConnInfo *ci, + int &reqs, ConnectionClass *conn, + int &ret_val) { + const RETCODE res = CheckDriverComplete(driver_completion, hwnd, ci, reqs); + if (res != SQL_SUCCESS) + return res; + + // Password is not a required parameter unless authentication asks for it. + // Let the application ask over and over until a password is entered (the + // user can always hit Cancel to get out) + if (paramRequired(ci, reqs)) { + CC_set_error(conn, CONN_OPENDB_ERROR, "Please supply password", + "ESAPI_DriverConnect->GetRequirements"); + return SQL_ERROR; + } + ret_val = CC_connect(conn); + return SQL_SUCCESS; +} + +static RETCODE CreateOutputConnectionString(ssize_t &len, ConnectionClass *conn, + const ConnInfo *ci, + const SQLSMALLINT conn_str_out_len, + SQLCHAR *conn_str_out, + const int retval) { + // Create the output connection string + SQLSMALLINT len_str_out = conn_str_out_len; + if (conn->ms_jet && len_str_out > 255) + len_str_out = 255; + char conn_str[MAX_CONNECT_STRING]; + makeConnectString(conn_str, ci, len_str_out); + + // Set result and check connection string + RETCODE result = ((retval == 1) ? SQL_SUCCESS : SQL_SUCCESS_WITH_INFO); + len = strlen(conn_str); + if (conn_str_out) { + // Return the completed string to the caller. The correct method is to + // only construct the connect string if a dialog was put up, otherwise, + // it should just copy the connection input string to the output. + // However, it seems ok to just always construct an output string. + // There are possible bad side effects on working applications (Access) + // by implementing the correct behavior + strncpy((char *)conn_str_out, conn_str, conn_str_out_len); + if (len >= conn_str_out_len) { + for (int clen = conn_str_out_len - 1; + clen >= 0 && conn_str_out[clen] != ';'; clen--) + conn_str_out[clen] = '\0'; + result = SQL_SUCCESS_WITH_INFO; + CC_set_error(conn, CONN_TRUNCATED, + "Buffer is too small for output conn str.", + "CreateOutputConnectionString"); + } + } + return result; +} + +static std::string CheckRetVal(const int retval, const HWND hwnd, + const SQLUSMALLINT driver_completion, + const int reqs, const ConnInfo *ci) { + (void)(ci); + (void)(reqs); + (void)(hwnd); + if (retval > 0) + return ""; + // Error attempting to connect + else if (retval == 0) + return "Error from CC_Connect"; + // More info is required + else if (retval < 0) { + // Not allowed to prompt, but PW is required - Error + if (driver_completion == SQL_DRIVER_NOPROMPT) { + return "Need password but Driver_NoPrompt is set"; + } else { +#ifdef WIN32 + if (!(hwnd && paramRequired(ci, reqs))) + return "Unable to prompt for required parameter"; +#else + return "Driver prompt only supported on Windows"; +#endif + } + } + return ""; +} + +static SQLRETURN SetupConnString(const SQLCHAR *conn_str_in, + const SQLSMALLINT conn_str_in_len, + ConnInfo *ci, ConnectionClass *conn) { + CSTR func = "SetupConnString"; + + // make_string uses malloc, need to overwrite delete operator to use free + // for unique_ptr + struct free_delete { + void operator()(void *x) { + if (x != NULL) { + free(x); + x = NULL; + } + } + }; + + // Make connection string and get DSN + std::unique_ptr< char, free_delete > conn_str( + make_string(conn_str_in, conn_str_in_len, NULL, 0)); + + if (!dconn_get_DSN_or_Driver(conn_str.get(), ci)) { + CC_set_error(conn, CONN_OPENDB_ERROR, "Connection string parse error", + func); + return SQL_ERROR; + } + + //This will be used to restore the log output dir fetched from connection string + //Since getDSNinfo overrides all available connection attributes + std::string conn_string_log_dir(ci->drivers.output_dir); + + // If the ConnInfo in the hdbc is missing anything, this function will fill + // them in from the registry (assuming of course there is a DSN given -- if + // not, it does nothing!) + getDSNinfo(ci, NULL); + + // Parse the connect string and fill in conninfo + if (!dconn_get_connect_attributes(conn_str.get(), ci)) { + CC_set_error(conn, CONN_OPENDB_ERROR, "Connection string parse error", + func); + return SQL_ERROR; + } + logs_on_off(1, ci->drivers.loglevel, ci->drivers.loglevel); + + //Sets log output dir to path retrived from connection string + //If connection string doesn't have log path then takes value from DSN + //If connection string & DSN both doesn't include log path then takes default value + if (!conn_string_log_dir.empty()) { + setLogDir(conn_string_log_dir.c_str()); + conn_string_log_dir.clear(); + } else { + setLogDir(ci->drivers.output_dir); + } + InitializeLogging(); + return SQL_SUCCESS; +} + +RETCODE ESAPI_DriverConnect(HDBC hdbc, HWND hwnd, SQLCHAR *conn_str_in, + SQLSMALLINT conn_str_in_len, SQLCHAR *conn_str_out, + SQLSMALLINT conn_str_out_len, + SQLSMALLINT *pcb_conn_str_out, + SQLUSMALLINT driver_completion) { + CSTR func = "ESAPI_DriverConnect"; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + if (!conn) { + CC_log_error(func, "ConnectionClass handle is NULL", NULL); + return SQL_INVALID_HANDLE; + } + ConnInfo *ci = &(conn->connInfo); + + // Setup connection string + { + const SQLRETURN return_code = + SetupConnString(conn_str_in, conn_str_in_len, ci, conn); + if (return_code != SQL_SUCCESS) + return return_code; + } + + // Initialize es_version + CC_initialize_es_version(conn); + + int reqs = 0; + int retval = 0; + do { + const SQLRETURN return_code = GetRequirementsAndConnect( + driver_completion, hwnd, ci, reqs, conn, retval); + if (return_code != SQL_SUCCESS) + return return_code; + + // Check for errors + const std::string error_msg = + CheckRetVal(retval, hwnd, driver_completion, reqs, ci); + + // If we have an error, log it and exit + if (error_msg != "") { + CC_log_error(func, error_msg.c_str(), conn); + return SQL_ERROR; + } + } while (retval <= 0); + + ssize_t len = 0; + const RETCODE result = CreateOutputConnectionString( + len, conn, ci, conn_str_out_len, conn_str_out, retval); + if (pcb_conn_str_out) + *pcb_conn_str_out = static_cast< SQLSMALLINT >(len); + return result; +} diff --git a/sql-odbc/src/odfesqlodbc/es_driver_connect.h b/sql-odbc/src/odfesqlodbc/es_driver_connect.h new file mode 100644 index 0000000000..be6bb2d6ca --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_driver_connect.h @@ -0,0 +1,34 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ES_DRIVER_CONNECT_H__ +#define __ES_DRIVER_CONNECT_H__ +#include "es_connection.h" + +// C Interface +#ifdef __cplusplus +extern "C" { +#endif +RETCODE ESAPI_DriverConnect(HDBC hdbc, HWND hwnd, SQLCHAR *conn_str_in, + SQLSMALLINT conn_str_in_len, SQLCHAR *conn_str_out, + SQLSMALLINT conn_str_out_len, + SQLSMALLINT *pcb_conn_str_out, + SQLUSMALLINT driver_completion); +#ifdef __cplusplus +} +#endif + +#endif /* __ES_DRIVER_CONNECT_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/es_helper.cpp b/sql-odbc/src/odfesqlodbc/es_helper.cpp new file mode 100644 index 0000000000..cf243137ce --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_helper.cpp @@ -0,0 +1,219 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_helper.h" + +#include +#include +#include + +#include "es_communication.h" + +void* ESConnectDBParams(runtime_options& rt_opts, int expand_dbname, + unsigned int option_count) { + // Initialize Connection + ESCommunication* conn = static_cast< ESCommunication* >(InitializeESConn()); + if (!conn) + return NULL; + + // Set user defined connection options + if (!conn->ConnectionOptions(rt_opts, true, expand_dbname, option_count)) + return conn; + + // Set user derived connection options + if (!conn->ConnectionOptions2()) + return conn; + + // Connect to DB + if (!conn->ConnectDBStart()) + return conn; + + // Technically this is always the result, so we could remove the above or + // make 1 large if statement, but I think this is more legible + return conn; +} + +ConnStatusType ESStatus(void* es_conn) { + return es_conn + ? static_cast< ESCommunication* >(es_conn)->GetConnectionStatus() + : ConnStatusType::CONNECTION_BAD; +} + +std::string GetErrorMsg(void* es_conn) { + return es_conn ? static_cast< ESCommunication* >(es_conn)->GetErrorMessage() + : NULL; +} + +std::string GetServerVersion(void* es_conn) { + return es_conn + ? static_cast< ESCommunication* >(es_conn)->GetServerVersion() + : ""; +} + +std::string GetClusterName(void* es_conn) { + return es_conn + ? static_cast< ESCommunication* >(es_conn)->GetClusterName() + : ""; +} + +void* InitializeESConn() { + return new ESCommunication(); +} + +int ESExecDirect(void* es_conn, const char* statement, const char* fetch_size) { + return (es_conn && statement) + ? static_cast< ESCommunication* >(es_conn)->ExecDirect( + statement, fetch_size) + : -1; +} + +void ESSendCursorQueries(void* es_conn, const char* cursor) { + static_cast< ESCommunication* >(es_conn)->SendCursorQueries(cursor); +} + +ESResult* ESGetResult(void* es_conn) { + return es_conn ? static_cast< ESCommunication* >(es_conn)->PopResult() + : NULL; +} + +std::string ESGetClientEncoding(void* es_conn) { + return es_conn + ? static_cast< ESCommunication* >(es_conn)->GetClientEncoding() + : ""; +} + +bool ESSetClientEncoding(void* es_conn, std::string& encoding) { + return es_conn + ? static_cast< ESCommunication* >(es_conn)->SetClientEncoding( + encoding) + : false; +} + +void ESDisconnect(void* es_conn) { + delete static_cast< ESCommunication* >(es_conn); +} + +void ESClearResult(ESResult* es_result) { + delete es_result; +} + +void ESStopRetrieval(void* es_conn) { + static_cast< ESCommunication* >(es_conn)->StopResultRetrieval(); +} + +// This class provides a cross platform way of entering critical sections +class CriticalSectionHelper { + public: + // Don't need to initialize lock owner because default constructor sets it + // to thread id 0, which is invalid + CriticalSectionHelper() : m_lock_count(0) { + } + ~CriticalSectionHelper() { + } + + void EnterCritical() { + // Get current thread id, if it's the lock owner, increment lock count, + // otherwise lock and take ownership + std::thread::id current_thread = std::this_thread::get_id(); + if (m_lock_owner == current_thread) { + m_lock_count++; + } else { + m_lock.lock(); + m_lock_owner = current_thread; + m_lock_count = 1; + } + } + + void ExitCritical() { + // Get current thread id, if it's the owner, decerement and unlock if + // the lock count is 0. Otherwise, log critical warning because we + // should only allow the lock owner to unlock + std::thread::id current_thread = std::this_thread::get_id(); + if (m_lock_owner == current_thread) { + if (m_lock_count == 0) { +// This should never happen. Log critical warning +#ifdef WIN32 +#pragma warning(push) +#pragma warning(disable : 4551) // MYLOG complains 'function call missing + // argument list' on Windows, which is isn't +#endif + MYLOG(ES_ERROR, "%s\n", + "CRITICAL WARNING: ExitCritical section called when lock " + "count was already 0!"); +#ifdef WIN32 +#pragma warning(pop) +#endif + } else if (--m_lock_count == 0) { + // Reset lock owner to invalid thread id (0) + m_lock_owner = std::thread::id(); + m_lock.unlock(); + } + } else { +// This should never happen. Log critical warning +#ifdef WIN32 +#pragma warning(push) +#pragma warning(disable : 4551) // MYLOG complains 'function call missing + // argument list' on Windows, which is isn't +#endif + MYLOG(ES_ERROR, "%s\n", + "CRITICAL WARNING: ExitCritical section called by thread " + "that does not own the lock!"); +#ifdef WIN32 +#pragma warning(pop) +#endif + } + } + + private: + size_t m_lock_count; + std::atomic< std::thread::id > m_lock_owner; + std::mutex m_lock; +}; + +// Initialize pointer to point to our helper class +void XPlatformInitializeCriticalSection(void** critical_section_helper) { + if (critical_section_helper != NULL) { + try { + *critical_section_helper = new CriticalSectionHelper(); + } catch (...) { + *critical_section_helper = NULL; + } + } +} + +// Call enter critical section +void XPlatformEnterCriticalSection(void* critical_section_helper) { + if (critical_section_helper != NULL) { + static_cast< CriticalSectionHelper* >(critical_section_helper) + ->EnterCritical(); + } +} + +// Call exit critical section +void XPlatformLeaveCriticalSection(void* critical_section_helper) { + if (critical_section_helper != NULL) { + static_cast< CriticalSectionHelper* >(critical_section_helper) + ->ExitCritical(); + } +} + +// Delete our helper class +void XPlatformDeleteCriticalSection(void** critical_section_helper) { + if (critical_section_helper != NULL) { + delete static_cast< CriticalSectionHelper* >(*critical_section_helper); + *critical_section_helper = NULL; + } +} diff --git a/sql-odbc/src/odfesqlodbc/es_helper.h b/sql-odbc/src/odfesqlodbc/es_helper.h new file mode 100644 index 0000000000..2328b07d7a --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_helper.h @@ -0,0 +1,52 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ES_HELPER_H__ +#define __ES_HELPER_H__ + +#include "es_types.h" + +#ifdef __cplusplus +// C++ interface +std::string ESGetClientEncoding(void* es_conn); +bool ESSetClientEncoding(void* es_conn, std::string& encoding); +ESResult* ESGetResult(void* es_conn); +void ESClearResult(ESResult* es_result); +void* ESConnectDBParams(runtime_options& rt_opts, int expand_dbname, + unsigned int option_count); +std::string GetServerVersion(void* es_conn); +std::string GetClusterName(void* es_conn); +std::string GetErrorMsg(void* es_conn); + +// C Interface +extern "C" { +#endif +void XPlatformInitializeCriticalSection(void** critical_section_helper); +void XPlatformEnterCriticalSection(void* critical_section_helper); +void XPlatformLeaveCriticalSection(void* critical_section_helper); +void XPlatformDeleteCriticalSection(void** critical_section_helper); +ConnStatusType ESStatus(void* es_conn); +int ESExecDirect(void* es_conn, const char* statement, const char* fetch_size); +void ESSendCursorQueries(void* es_conn, const char* cursor); +void ESDisconnect(void* es_conn); +void ESStopRetrieval(void* es_conn); +#ifdef __cplusplus +} +#endif + +void* InitializeESConn(); + +#endif // __ES_HELPER_H__ diff --git a/sql-odbc/src/odfesqlodbc/es_info.cpp b/sql-odbc/src/odfesqlodbc/es_info.cpp new file mode 100644 index 0000000000..4c037f1ba9 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_info.cpp @@ -0,0 +1,962 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ +#include "es_info.h" + +#include +#include + +#include +#include +#include +#include +#include +#include + +// TODO #324 (SQL Plugin)- Update if Elasticsearch extends support for multiple +// tables +#define DEFAULT_TYPE_STR \ + { 'k', 'e', 'y', 'w', 'o', 'r', 'd', '\0' } +#define DEFAULT_TYPE_INT (SQL_WVARCHAR) +#define EMPTY_VARCHAR \ + { '\0' } +#define ES_UNINITIALIZED (-2) +#define COLUMN_TEMPLATE_COUNT 18 +#define TABLE_TEMPLATE_COUNT 5 + +#define TABLE_CAT "TABLE_CAT" +#define TABLE_SCHEM "TABLE_SCHEM" +#define TABLE_NAME "TABLE_NAME" +#define COLUMN_NAME "COLUMN_NAME" +#define DATA_TYPE "DATA_TYPE" +#define TYPE_NAME "TYPE_NAME" +#define COLUMN_SIZE "COLUMN_SIZE" +#define BUFFER_LENGTH "BUFFER_LENGTH" +#define DECIMAL_DIGITS "DECIMAL_DIGITS" +#define NUM_PREC_RADIX "NUM_PREC_RADIX" +#define NULLABLE "NULLABLE" +#define REMARKS "REMARKS" +#define COLUMN_DEF "COLUMN_DEF" +#define SQL_DATA_TYPE "SQL_DATA_TYPE" +#define SQL_DATETIME_SUB "SQL_DATETIME_SUB" +#define CHAR_OCTET_LENGTH "CHAR_OCTET_LENGTH" +#define ORDINAL_POSITION "ORDINAL_POSITION" +#define IS_NULLABLE "IS_NULLABLE" +#define TABLE_QUALIFIER "TABLE_QUALIFIER" +#define TABLE_OWNER "TABLE_OWNER" +#define TABLE_TYPE "TABLE_TYPE" +#define PRECISION "PRECISION" +#define LITERAL_PREFIX "LITERAL_PREFIX" +#define LITERAL_SUFFIX "LITERAL_SUFFIX" +#define CREATE_PARAMS "CREATE_PARAMS" +#define CASE_SENSITIVE "CASE_SENSITIVE" +#define SEARCHABLE "SEARCHABLE" +#define UNSIGNED_ATTRIBUTE "UNSIGNED_ATTRIBUTE" +#define FIXED_PREC_SCALE "FIXED_PREC_SCALE" +#define AUTO_INCREMENT "AUTO_INCREMENT" +#define LOCAL_TYPE_NAME "LOCAL_TYPE_NAME" +#define MINIMUM_SCALE "MINIMUM_SCALE" +#define MAXIMUM_SCALE "MAXIMUM_SCALE" +#define INTERVAL_PRECISION "INTERVAL_PRECISION" + +const std::unordered_map< int, std::vector< int > > sql_es_type_map = { + {SQL_BIT, {ES_TYPE_BOOL}}, + {SQL_TINYINT, {ES_TYPE_INT1}}, + {SQL_SMALLINT, {ES_TYPE_INT2}}, + {SQL_INTEGER, {ES_TYPE_INT4}}, + {SQL_BIGINT, {ES_TYPE_INT8}}, + {SQL_REAL, {ES_TYPE_HALF_FLOAT, ES_TYPE_FLOAT4}}, + {SQL_DOUBLE, {ES_TYPE_FLOAT8, ES_TYPE_SCALED_FLOAT}}, + {SQL_WVARCHAR, + {ES_TYPE_KEYWORD, ES_TYPE_TEXT, ES_TYPE_NESTED, ES_TYPE_OBJECT}}, + {SQL_TYPE_TIMESTAMP, {ES_TYPE_DATETIME}}}; + +// Boilerplate code for easy column bind handling +class BindTemplate { + public: + BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal) + : m_len(ES_UNINITIALIZED), m_ordinal(ordinal) { + if (!can_be_null) + throw std::runtime_error( + "Do not use this constructor for values that can be NULL. A " + "constructor with " + "supplied default value must be used if value can be NULL."); + } + BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal, const Int2) + : m_len(ES_UNINITIALIZED), m_ordinal(ordinal) { + (void)(can_be_null); + } + BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal, const Int4) + : m_len(ES_UNINITIALIZED), m_ordinal(ordinal) { + (void)(can_be_null); + } + BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal, + const std::vector< SQLCHAR > &) + : m_len(ES_UNINITIALIZED), m_ordinal(ordinal) { + (void)(can_be_null); + } + virtual ~BindTemplate() { + } + + SQLPOINTER GetData() { + if (m_len == ES_UNINITIALIZED) + throw std::runtime_error( + "Length is uninitialized - Fetch must be executed before data " + "is retreived."); + return (m_len == SQL_NULL_DATA) ? NULL : GetDataForBind(); + } + + void BindColumn(StatementClass *stmt) { + RETCODE err = ESAPI_BindCol(stmt, m_ordinal, GetType(), + GetDataForBind(), GetSize(), &m_len); + if (!SQL_SUCCEEDED(err)) { + std::string error_msg = + "Failed to bind column with ordinal " + + std::to_string(m_ordinal) + + ". SQL Error code: " + std::to_string(err); + throw std::runtime_error(error_msg.c_str()); + } + } + void AssignData(TupleField *tuple) { + SQLPOINTER data = GetData(); + if ((data == NULL) || (m_len == SQL_NULL_DATA)) { + set_tuplefield_null(tuple); + return; + } + switch (GetType()) { + case SQL_C_LONG: + set_tuplefield_int4(tuple, *static_cast< Int4 * >(data)); + break; + case SQL_C_SHORT: + set_tuplefield_int2(tuple, *static_cast< Int2 * >(data)); + break; + case SQL_C_CHAR: + set_tuplefield_string(tuple, static_cast< const char * >(data)); + break; + default: + throw std::runtime_error( + std::string( + "Cannot convert unknown data type to tuplefield: " + + std::to_string(GetType())) + .c_str()); + } + } + BindTemplate(const BindTemplate &) = default; + BindTemplate &operator=(const BindTemplate &) = default; + virtual std::string AsString() = 0; + virtual void UpdateData(SQLPOINTER new_data, size_t size) = 0; + + private: + SQLLEN m_len; + SQLUSMALLINT m_ordinal; + + protected: + virtual SQLSMALLINT GetType() = 0; + virtual SQLLEN GetSize() = 0; + virtual SQLPOINTER GetDataForBind() = 0; +}; + +// 4 byte integer column +class BindTemplateInt4 : public BindTemplate { + public: + BindTemplateInt4(const bool nullable, const SQLUSMALLINT ordinal) + : BindTemplate(nullable, ordinal), m_data(0) { + } + BindTemplateInt4(const bool nullable, const SQLUSMALLINT ordinal, + const Int4 data) + : BindTemplate(nullable, ordinal, data), m_data(data) { + } + ~BindTemplateInt4() { + } + std::string AsString() { + return std::to_string(*static_cast< Int4 * >(GetData())); + } + void UpdateData(SQLPOINTER new_data, size_t size) { + (void)size; + m_data = *(Int4 *)new_data; + } + + private: + Int4 m_data; + + protected: + SQLPOINTER GetDataForBind() { + return &m_data; + } + SQLSMALLINT GetType() { + return SQL_C_LONG; + } + SQLLEN GetSize() { + return static_cast< SQLLEN >(sizeof(Int4)); + } +}; + +// 2 byte integer column +class BindTemplateInt2 : public BindTemplate { + public: + BindTemplateInt2(const bool nullable, const SQLUSMALLINT ordinal) + : BindTemplate(nullable, ordinal), m_data(0) { + } + BindTemplateInt2(const bool nullable, const SQLUSMALLINT ordinal, + const Int2 data) + : BindTemplate(nullable, ordinal, data), m_data(data) { + } + ~BindTemplateInt2() { + } + std::string AsString() { + return std::to_string(*static_cast< Int2 * >(GetData())); + } + void UpdateData(SQLPOINTER new_data, size_t size) { + (void)size; + m_data = *(Int2 *)new_data; + } + + private: + Int2 m_data; + + protected: + SQLPOINTER GetDataForBind() { + return &m_data; + } + SQLSMALLINT GetType() { + return SQL_C_SHORT; + } + SQLLEN GetSize() { + return static_cast< SQLLEN >(sizeof(Int2)); + } +}; + +// Varchar data +class BindTemplateSQLCHAR : public BindTemplate { + public: + BindTemplateSQLCHAR(const bool nullable, const SQLUSMALLINT ordinal) + : BindTemplate(nullable, ordinal), m_data(MAX_INFO_STRING, '\0') { + } + BindTemplateSQLCHAR(const bool nullable, const SQLUSMALLINT ordinal, + const std::vector< SQLCHAR > &data) + : BindTemplate(nullable, ordinal, data), m_data(MAX_INFO_STRING, '\0') { + if (data.size() >= m_data.size()) { + throw std::runtime_error( + "Default data size exceeds max info string size."); + } else { + m_data.insert(m_data.begin(), data.begin(), data.end()); + } + } + ~BindTemplateSQLCHAR() { + } + std::string AsString() { + char *bind_tbl_data_char = static_cast< char * >(GetData()); + return (bind_tbl_data_char == NULL) ? "" : bind_tbl_data_char; + } + void UpdateData(SQLPOINTER new_data, size_t size) { + m_data.clear(); + SQLCHAR *data = (SQLCHAR *)new_data; + for (size_t i = 0; i < size; i++) { + m_data.push_back(*data++); + } + m_data.push_back(0); + } + + private: + std::vector< SQLCHAR > m_data; + + protected: + SQLPOINTER GetDataForBind() { + return m_data.data(); + } + SQLSMALLINT GetType() { + return SQL_C_CHAR; + } + SQLLEN GetSize() { + return static_cast< SQLLEN >(m_data.size()); + } +}; + +// Typedefs and macros to ease creation of BindTemplates +typedef std::unique_ptr< BindTemplate > bind_ptr; +typedef std::vector< bind_ptr > bind_vector; +#define _SQLCHAR_(...) \ + (std::make_unique< BindTemplateSQLCHAR >(BindTemplateSQLCHAR(__VA_ARGS__))) +#define _SQLINT2_(...) \ + (std::make_unique< BindTemplateInt2 >(BindTemplateInt2(__VA_ARGS__))) +#define _SQLINT4_(...) \ + (std::make_unique< BindTemplateInt4 >(BindTemplateInt4(__VA_ARGS__))) + +// Common function definitions +enum class TableResultSet { Catalog, Schema, TableTypes, TableLookUp, All }; +void ConvertToString(std::string &out, bool &valid, const SQLCHAR *sql_char, + const SQLSMALLINT sz); +QResultClass *SetupQResult(const bind_vector &cols, StatementClass *stmt, + StatementClass *col_stmt, const int col_cnt); +void CleanUp(StatementClass *stmt, StatementClass *sub_stmt, const RETCODE ret); +void ExecuteQuery(ConnectionClass *conn, HSTMT *stmt, const std::string &query); +void GetCatalogData(const std::string &query, StatementClass *stmt, + StatementClass *sub_stmt, const TableResultSet res_type, + std::string &table_type, + void (*populate_binds)(bind_vector &), + void (*setup_qres_info)(QResultClass *, + EnvironmentClass *)); + +// Common function declarations +void ConvertToString(std::string &out, bool &valid, const SQLCHAR *sql_char, + const SQLSMALLINT sz) { + valid = (sql_char != NULL); + if (!valid) { + out = "%"; + } else if (sz == SQL_NTS) { + out.assign(reinterpret_cast< const char * >(sql_char)); + } else if (sz <= 0) { + out = ""; + } else { + out.assign(reinterpret_cast< const char * >(sql_char), + static_cast< size_t >(sz)); + } +} + +QResultClass *SetupQResult(const bind_vector &cols, StatementClass *stmt, + StatementClass *col_stmt, const int col_cnt) { + (void)(cols); + (void)(col_stmt); + + // Initialize memory for data retreival + QResultClass *res = NULL; + if ((res = QR_Constructor()) == NULL) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for Tables or Columns result.", + "FetchResults"); + throw std::runtime_error( + "Couldn't allocate memory for Tables or Columns result."); + } + SC_set_Result(stmt, res); + + // The binding structure for a statement is not set up until a statement is + // actually executed, so we'll have to do this ourselves + extend_column_bindings(SC_get_ARDF(stmt), + static_cast< SQLSMALLINT >(col_cnt)); + QR_set_num_fields(res, col_cnt); + + return res; +} + +void CleanUp(StatementClass *stmt, StatementClass *sub_stmt, + const RETCODE ret = SQL_ERROR) { + stmt->status = STMT_FINISHED; + stmt->catalog_result = TRUE; + + if (!SQL_SUCCEEDED(ret) && 0 >= SC_get_errornumber(stmt)) + SC_error_copy(stmt, sub_stmt, TRUE); + + // set up the current tuple pointer for + stmt->currTuple = -1; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + if (sub_stmt) + ESAPI_FreeStmt(sub_stmt, SQL_DROP); +} + +void ExecuteQuery(ConnectionClass *conn, HSTMT *stmt, + const std::string &query) { + // Prepare statement + if (!SQL_SUCCEEDED(ESAPI_AllocStmt(conn, stmt, 0))) { + throw std::runtime_error("Failed to allocate memory for statement."); + } + + // Execute query + if (!SQL_SUCCEEDED(ESAPI_ExecDirect( + *stmt, reinterpret_cast< const SQLCHAR * >(query.c_str()), SQL_NTS, + 1))) { + std::string error_msg = "Failed to execute query '" + query + "'."; + throw std::runtime_error(error_msg.c_str()); + } +} + +// Table specific function definitions +void split(const std::string &input, const std::string &delim, + std::vector< std::string > &output); +void GenerateTableQuery(std::string &tables_query, const UWORD flag, + const std::string &table_name_value, + const TableResultSet result_type, + const bool table_valid); +void AssignTableBindTemplates(bind_vector &tabs); +void SetupTableQResInfo(QResultClass *res, EnvironmentClass *env); +void SetTableTuples(QResultClass *res, const TableResultSet res_type, + const bind_vector &bind_tbl, std::string &table_type, + StatementClass *stmt, StatementClass *tbl_stmt); + +// Table specific function declarations +void split(const std::string &input, const std::string &delim, + std::vector< std::string > &output) { + size_t start = 0; + size_t end = input.find(delim); + while (end != std::string::npos) { + output.push_back(input.substr(start, end - start)); + start = end + delim.length(); + end = input.find(delim, start); + } + output.push_back(input.substr(start, end)); +} + +// TODO #324 (SQL Plugin)- Fix patterns and escape characters for this +void GenerateTableQuery(std::string &tables_query, const UWORD flag, + const std::string &table_name_value, + const TableResultSet result_type, + const bool table_valid) { + bool search_pattern = (~flag & PODBC_NOT_SEARCH_PATTERN); + tables_query = "SHOW TABLES LIKE "; + if (table_valid && (table_name_value != "") + && (result_type == TableResultSet::All)) + tables_query += + search_pattern ? table_name_value : "^" + table_name_value + "$"; + else + tables_query += "%"; +} + +// In case of unique_ptr's, using push_back (over emplace_back) is preferred in +// C++14 and higher +void AssignTableBindTemplates(bind_vector &tabs) { + tabs.reserve(TABLE_TEMPLATE_COUNT); + tabs.push_back(_SQLCHAR_(false, 1, EMPTY_VARCHAR)); // TABLE_CAT 1 + tabs.push_back(_SQLCHAR_(false, 2, EMPTY_VARCHAR)); // TABLE_SCHEM 2 + tabs.push_back(_SQLCHAR_(false, 3, EMPTY_VARCHAR)); // TABLE_NAME 3 + tabs.push_back(_SQLCHAR_(false, 4, EMPTY_VARCHAR)); // TABLE_TYPE 4 + tabs.push_back(_SQLCHAR_(true, 5)); // REMARKS 5 +} + +void SetupTableQResInfo(QResultClass *res, EnvironmentClass *env) { + if (EN_is_odbc3(env)) { + QR_set_field_info_v(res, TABLES_CATALOG_NAME, TABLE_CAT, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, TABLES_SCHEMA_NAME, TABLE_SCHEM, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + } else { + QR_set_field_info_v(res, TABLES_CATALOG_NAME, TABLE_QUALIFIER, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, TABLES_SCHEMA_NAME, TABLE_OWNER, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + } + QR_set_field_info_v(res, TABLES_TABLE_NAME, TABLE_NAME, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABLES_TABLE_TYPE, TABLE_TYPE, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABLES_REMARKS, REMARKS, ES_TYPE_VARCHAR, + INFO_VARCHAR_SIZE); +} + +void SetTableTuples(QResultClass *res, const TableResultSet res_type, + const bind_vector &bind_tbl, std::string &table_type, + StatementClass *stmt, StatementClass *tbl_stmt) { + auto CheckResult = [&](const auto &res) { + if (res != SQL_NO_DATA_FOUND) { + SC_full_error_copy(stmt, tbl_stmt, FALSE); + throw std::runtime_error( + std::string("Failed to fetch data after query. Error code :" + + std::to_string(res)) + .c_str()); + } + }; + auto AssignData = [&](auto *res, const auto &binds) { + TupleField *tuple = QR_AddNew(res); + for (size_t i = 0; i < binds.size(); i++) + binds[i]->AssignData(&tuple[i]); + }; + + // General case + if (res_type == TableResultSet::All) { + RETCODE result = SQL_NO_DATA_FOUND; + while (SQL_SUCCEEDED(result = ESAPI_Fetch(tbl_stmt))) { + if (bind_tbl[TABLES_TABLE_TYPE]->AsString() == "BASE TABLE") { + std::string table("TABLE"); + bind_tbl[TABLES_TABLE_TYPE]->UpdateData(&table, table.size()); + } + AssignData(res, bind_tbl); + } + CheckResult(result); + } else if (res_type == TableResultSet::TableLookUp) { + // Get accepted table types + std::vector< std::string > table_types; + table_type.erase( + std::remove(table_type.begin(), table_type.end(), '\''), + table_type.end()); + split(table_type, ",", table_types); + + // Loop through all data + RETCODE result = SQL_NO_DATA_FOUND; + while (SQL_SUCCEEDED(result = ESAPI_Fetch(tbl_stmt))) { + // Replace BASE TABLE with TABLE for Excel & Power BI SQLTables call + if (bind_tbl[TABLES_TABLE_TYPE]->AsString() == "BASE TABLE") { + std::string table("TABLE"); + bind_tbl[TABLES_TABLE_TYPE]->UpdateData(&table, table.size()); + } + if (std::find(table_types.begin(), table_types.end(), + bind_tbl[TABLES_TABLE_TYPE]->AsString()) + != table_types.end()) { + AssignData(res, bind_tbl); + } + } + + CheckResult(result); + + } + // Special cases - only need single grab for this one + else { + RETCODE result; + if (!SQL_SUCCEEDED(result = ESAPI_Fetch(tbl_stmt))) { + SC_full_error_copy(stmt, tbl_stmt, FALSE); + throw std::runtime_error( + std::string("Failed to fetch data after query. Error code :" + + std::to_string(result)) + .c_str()); + } + + // Get index of result type of interest + size_t idx = NUM_OF_TABLES_FIELDS; + switch (res_type) { + case TableResultSet::Catalog: + idx = TABLES_CATALOG_NAME; + break; + case TableResultSet::Schema: + idx = TABLES_SCHEMA_NAME; + break; + case TableResultSet::TableTypes: + idx = TABLES_TABLE_TYPE; + break; + default: + // This should not be possible, handle it anyway + throw std::runtime_error( + "Result type is not an expected type."); + } + + // Get new tuple and assign index of interest (NULL others) + // TODO #324 (SQL Plugin)- Should these be unique? + TupleField *tuple = QR_AddNew(res); + for (size_t i = 0; i < bind_tbl.size(); i++) { + if (i == idx) + bind_tbl[i]->AssignData(&tuple[i]); + else + set_tuplefield_string(&tuple[i], NULL_STRING); + } + } +} + +// Column specific function definitions +void SetupColumnQResInfo(QResultClass *res, EnvironmentClass *unused); +void GenerateColumnQuery(std::string &query, const std::string &table_name, + const std::string &column_name, const bool table_valid, + const bool column_valid, const UWORD flag); +void AssignColumnBindTemplates(bind_vector &cols); + +// Column Specific function declarations +void SetupColumnQResInfo(QResultClass *res, EnvironmentClass *unused) { + (void)(unused); + + QR_set_field_info_v(res, COLUMNS_CATALOG_NAME, TABLE_CAT, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLUMNS_SCHEMA_NAME, TABLE_SCHEM, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLUMNS_TABLE_NAME, TABLE_NAME, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLUMNS_COLUMN_NAME, COLUMN_NAME, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLUMNS_DATA_TYPE, DATA_TYPE, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, COLUMNS_TYPE_NAME, TYPE_NAME, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLUMNS_PRECISION, COLUMN_SIZE, ES_TYPE_INT4, 4); + QR_set_field_info_v(res, COLUMNS_LENGTH, BUFFER_LENGTH, ES_TYPE_INT4, 4); + QR_set_field_info_v(res, COLUMNS_SCALE, DECIMAL_DIGITS, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, COLUMNS_RADIX, NUM_PREC_RADIX, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, COLUMNS_NULLABLE, NULLABLE, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, COLUMNS_REMARKS, REMARKS, ES_TYPE_VARCHAR, + INFO_VARCHAR_SIZE); + QR_set_field_info_v(res, COLUMNS_COLUMN_DEF, COLUMN_DEF, ES_TYPE_VARCHAR, + INFO_VARCHAR_SIZE); + QR_set_field_info_v(res, COLUMNS_SQL_DATA_TYPE, SQL_DATA_TYPE, ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, COLUMNS_SQL_DATETIME_SUB, SQL_DATETIME_SUB, + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, COLUMNS_CHAR_OCTET_LENGTH, CHAR_OCTET_LENGTH, + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, COLUMNS_ORDINAL_POSITION, ORDINAL_POSITION, + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, COLUMNS_IS_NULLABLE, IS_NULLABLE, ES_TYPE_VARCHAR, + INFO_VARCHAR_SIZE); +} + +// TODO #325 (SQL Plugin)- Fix patterns and escape characters for this +void GenerateColumnQuery(std::string &query, const std::string &table_name, + const std::string &column_name, const bool table_valid, + const bool column_valid, const UWORD flag) { + bool search_pattern = (~flag & PODBC_NOT_SEARCH_PATTERN); + query = "DESCRIBE TABLES LIKE "; + query += table_valid + ? (search_pattern ? table_name : "^" + table_name + "$") + : "%"; + if (column_valid) + query += " COLUMNS LIKE " + column_name; +} + +// In case of unique_ptr's, using push_back (over emplace_back) is preferred in +// C++14 and higher +void AssignColumnBindTemplates(bind_vector &cols) { + cols.reserve(COLUMN_TEMPLATE_COUNT); + cols.push_back(_SQLCHAR_(true, 1)); // TABLE_CAT 1 + cols.push_back(_SQLCHAR_(true, 2)); // TABLE_SCHEM 2 + cols.push_back(_SQLCHAR_(false, 3, EMPTY_VARCHAR)); // TABLE_NAME 3 + cols.push_back(_SQLCHAR_(false, 4, EMPTY_VARCHAR)); // COLUMN_NAME 4 + cols.push_back( + _SQLINT2_(false, 5, DEFAULT_TYPE_INT)); // DATA_TYPE 5 + cols.push_back( + _SQLCHAR_(false, 6, DEFAULT_TYPE_STR)); // TYPE_NAME 6 + cols.push_back(_SQLINT4_(true, 7)); // COLUMN_SIZE 7 + cols.push_back(_SQLINT4_(true, 8)); // BUFFER_LENGTH 8 + cols.push_back(_SQLINT2_(true, 9)); // DECIMAL_DIGITS 9 + cols.push_back(_SQLINT2_(true, 10)); // NUM_PREC_RADIX 10 + cols.push_back( + _SQLINT2_(false, 11, SQL_NULLABLE_UNKNOWN)); // NULLABLE 11 + cols.push_back(_SQLCHAR_(true, 12)); // REMARKS 12 + cols.push_back(_SQLCHAR_(true, 13)); // COLUMN_DEF 13 + cols.push_back( + _SQLINT2_(false, 14, DEFAULT_TYPE_INT)); // SQL_DATA_TYPE 14 + cols.push_back(_SQLINT2_(true, 15)); // SQL_DATETIME_SUB 15 + cols.push_back(_SQLINT4_(true, 16)); // CHAR_OCTET_LENGTH 16 + cols.push_back(_SQLINT4_(false, 17, -1)); // ORDINAL_POSITION 17 + cols.push_back(_SQLCHAR_(true, 18)); // IS_NULLABLE 18 +} + +void GetCatalogData(const std::string &query, StatementClass *stmt, + StatementClass *sub_stmt, const TableResultSet res_type, + std::string &table_type, + void (*populate_binds)(bind_vector &), + void (*setup_qres_info)(QResultClass *, + EnvironmentClass *)) { + // Execute query + ExecuteQuery(SC_get_conn(stmt), reinterpret_cast< HSTMT * >(&sub_stmt), + query); + + // Bind Columns + bind_vector binds; + (*populate_binds)(binds); + std::for_each(binds.begin(), binds.end(), + [&](const auto &b) { b->BindColumn(sub_stmt); }); + QResultClass *res = + SetupQResult(binds, stmt, sub_stmt, static_cast< int >(binds.size())); + + // Setup QResultClass + (*setup_qres_info)( + res, static_cast< EnvironmentClass * >(CC_get_env(SC_get_conn(stmt)))); + SetTableTuples(res, res_type, binds, table_type, stmt, sub_stmt); + + CleanUp(stmt, sub_stmt, SQL_SUCCESS); +} + +RETCODE SQL_API +ESAPI_Tables(HSTMT hstmt, const SQLCHAR *catalog_name_sql, + const SQLSMALLINT catalog_name_sz, const SQLCHAR *schema_name_sql, + const SQLSMALLINT schema_name_sz, const SQLCHAR *table_name_sql, + const SQLSMALLINT table_name_sz, const SQLCHAR *table_type_sql, + const SQLSMALLINT table_type_sz, const UWORD flag) { + CSTR func = "ESAPI_Tables"; + StatementClass *stmt = (StatementClass *)hstmt; + StatementClass *tbl_stmt = NULL; + RETCODE result = SQL_ERROR; + if ((result = SC_initialize_and_recycle(stmt)) != SQL_SUCCESS) + return result; + + try { + // Convert const SQLCHAR*'s to c++ strings + std::string catalog_name, schema_name, table_name, table_type; + bool catalog_valid, schema_valid, table_valid, table_type_valid; + ConvertToString(catalog_name, catalog_valid, catalog_name_sql, + catalog_name_sz); + ConvertToString(schema_name, schema_valid, schema_name_sql, + schema_name_sz); + ConvertToString(table_name, table_valid, table_name_sql, table_name_sz); + ConvertToString(table_type, table_type_valid, table_type_sql, + table_type_sz); + + // Special semantics for the CatalogName, SchemaName, and TableType + // arguments + TableResultSet result_type = TableResultSet::All; + + if (catalog_name == SQL_ALL_CATALOGS) { + if (schema_valid && table_valid && (table_name == "") + && (schema_name == "")) + result_type = TableResultSet::Catalog; + } + if (schema_name == SQL_ALL_SCHEMAS) { + if (catalog_valid && table_valid && (table_name == "") + && (catalog_name == "")) + result_type = TableResultSet::Schema; + } + if (table_type_valid && (table_type == SQL_ALL_TABLE_TYPES)) { + if (catalog_valid && table_valid && schema_valid + && (table_name == "") && (catalog_name == "") + && (schema_name == "")) + result_type = TableResultSet::TableTypes; + } + if (table_type_valid && (table_type != SQL_ALL_TABLE_TYPES)) { + result_type = TableResultSet::TableLookUp; + } + + // Create query to find out list + std::string query; + GenerateTableQuery(query, flag, table_name, result_type, table_valid); + + // TODO #324 (SQL Plugin)- evaluate catalog & schema support + GetCatalogData(query, stmt, tbl_stmt, result_type, table_type, + AssignTableBindTemplates, SetupTableQResInfo); + return SQL_SUCCESS; + } catch (std::bad_alloc &e) { + std::string error_msg = std::string("Bad allocation exception: '") + + e.what() + std::string("'."); + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, error_msg.c_str(), func); + } catch (std::exception &e) { + std::string error_msg = + std::string("Generic exception: '") + e.what() + std::string("'."); + SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); + } catch (...) { + std::string error_msg = std::string("Unknown exception raised."); + SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); + } + CleanUp(stmt, tbl_stmt); + return SQL_ERROR; +} + +RETCODE SQL_API +ESAPI_Columns(HSTMT hstmt, const SQLCHAR *catalog_name_sql, + const SQLSMALLINT catalog_name_sz, const SQLCHAR *schema_name_sql, + const SQLSMALLINT schema_name_sz, const SQLCHAR *table_name_sql, + const SQLSMALLINT table_name_sz, const SQLCHAR *column_name_sql, + const SQLSMALLINT column_name_sz, const UWORD flag, + const OID reloid, const Int2 attnum) { + (void)(reloid); + (void)(attnum); + + CSTR func = "ESAPI_Columns"; + + // Declare outside of try so we can clean them up properly if an exception + // occurs + StatementClass *stmt = (StatementClass *)hstmt; + StatementClass *col_stmt = NULL; + RETCODE result = SQL_ERROR; + if ((result = SC_initialize_and_recycle(stmt)) != SQL_SUCCESS) + return result; + + try { + // Convert const SQLCHAR *'s to strings + std::string catalog_name, schema_name, table_name, column_name; + bool catalog_valid, schema_valid, table_valid, column_valid; + ConvertToString(catalog_name, catalog_valid, catalog_name_sql, + catalog_name_sz); + ConvertToString(schema_name, schema_valid, schema_name_sql, + schema_name_sz); + ConvertToString(table_name, table_valid, table_name_sql, table_name_sz); + ConvertToString(column_name, column_valid, column_name_sql, + column_name_sz); + + // Generate query + std::string query; + GenerateColumnQuery(query, table_name, column_name, table_valid, + column_valid, flag); + + // TODO #324 (SQL Plugin)- evaluate catalog & schema support + + // Execute query + std::string table_type = ""; + GetCatalogData(query, stmt, col_stmt, TableResultSet::All, table_type, + AssignColumnBindTemplates, SetupColumnQResInfo); + return SQL_SUCCESS; + } catch (std::bad_alloc &e) { + std::string error_msg = std::string("Bad allocation exception: '") + + e.what() + std::string("'."); + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, error_msg.c_str(), func); + } catch (std::exception &e) { + std::string error_msg = + std::string("Generic exception: '") + e.what() + std::string("'."); + SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); + } catch (...) { + std::string error_msg("Unknown exception raised."); + SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); + } + CleanUp(stmt, col_stmt); + return SQL_ERROR; +} +void CleanUp_GetTypeInfo(StatementClass *stmt, const RETCODE ret = SQL_ERROR) { + stmt->status = STMT_FINISHED; + stmt->currTuple = -1; + if (SQL_SUCCEEDED(ret)) + SC_set_rowset_start(stmt, -1, FALSE); + else + SC_set_Result(stmt, NULL); + SC_set_current_col(stmt, -1); +} + +void SetupTypeQResInfo(QResultClass *res) { + QR_set_field_info_v(res, GETTYPE_TYPE_NAME, TYPE_NAME, ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, GETTYPE_DATA_TYPE, DATA_TYPE, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_COLUMN_SIZE, PRECISION, ES_TYPE_INT4, 4); + QR_set_field_info_v(res, GETTYPE_LITERAL_PREFIX, LITERAL_PREFIX, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, GETTYPE_LITERAL_SUFFIX, LITERAL_SUFFIX, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, GETTYPE_CREATE_PARAMS, CREATE_PARAMS, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, GETTYPE_NULLABLE, NULLABLE, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_CASE_SENSITIVE, CASE_SENSITIVE, + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_SEARCHABLE, SEARCHABLE, ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_UNSIGNED_ATTRIBUTE, UNSIGNED_ATTRIBUTE, + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_FIXED_PREC_SCALE, FIXED_PREC_SCALE, + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_AUTO_UNIQUE_VALUE, AUTO_INCREMENT, + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_LOCAL_TYPE_NAME, LOCAL_TYPE_NAME, + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, GETTYPE_MINIMUM_SCALE, MINIMUM_SCALE, ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, GETTYPE_MAXIMUM_SCALE, MAXIMUM_SCALE, ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, GETTYPE_SQL_DATA_TYPE, SQL_DATA_TYPE, ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, GETTYPE_SQL_DATETIME_SUB, SQL_DATETIME_SUB, + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, GETTYPE_NUM_PREC_RADIX, NUM_PREC_RADIX, + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, GETTYPE_INTERVAL_PRECISION, INTERVAL_PRECISION, + ES_TYPE_INT2, 2); +} + +RETCODE SetTypeResult(ConnectionClass *conn, StatementClass *stmt, + QResultClass *res, int esType, int sqlType) { + TupleField *tuple; + + if (tuple = QR_AddNew(res), NULL == tuple) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, "Couldn't QR_AddNew.", + "SetTypeResult"); + CleanUp_GetTypeInfo(stmt, SQL_ERROR); + return SQL_ERROR; + } + + set_tuplefield_string(&tuple[GETTYPE_TYPE_NAME], + estype_attr_to_name(conn, esType, -1, FALSE)); + set_tuplefield_int2(&tuple[GETTYPE_NULLABLE], + estype_nullable(conn, esType)); + + set_tuplefield_int2(&tuple[GETTYPE_DATA_TYPE], + static_cast< short >(sqlType)); + set_tuplefield_int2(&tuple[GETTYPE_CASE_SENSITIVE], + estype_case_sensitive(conn, esType)); + set_tuplefield_int2(&tuple[GETTYPE_SEARCHABLE], + estype_searchable(conn, esType)); + set_tuplefield_int2(&tuple[GETTYPE_FIXED_PREC_SCALE], + estype_money(conn, esType)); + + // Localized data-source dependent data type name (always NULL) + set_tuplefield_null(&tuple[GETTYPE_LOCAL_TYPE_NAME]); + + // These values can be NULL + set_nullfield_int4( + &tuple[GETTYPE_COLUMN_SIZE], + estype_attr_column_size(conn, esType, ES_ATP_UNSET, ES_ADT_UNSET, + ES_UNKNOWNS_UNSET)); + set_nullfield_string(&tuple[GETTYPE_LITERAL_PREFIX], + estype_literal_prefix(conn, esType)); + set_nullfield_string(&tuple[GETTYPE_LITERAL_SUFFIX], + estype_literal_suffix(conn, esType)); + set_nullfield_string(&tuple[GETTYPE_CREATE_PARAMS], + estype_create_params(conn, esType)); + set_nullfield_int2(&tuple[GETTYPE_UNSIGNED_ATTRIBUTE], + estype_unsigned(conn, esType)); + set_nullfield_int2(&tuple[GETTYPE_AUTO_UNIQUE_VALUE], + estype_auto_increment(conn, esType)); + set_nullfield_int2(&tuple[GETTYPE_MINIMUM_SCALE], + estype_min_decimal_digits(conn, esType)); + set_nullfield_int2(&tuple[GETTYPE_MAXIMUM_SCALE], + estype_max_decimal_digits(conn, esType)); + set_tuplefield_int2(&tuple[GETTYPE_SQL_DATA_TYPE], + static_cast< short >(sqlType)); + set_nullfield_int2(&tuple[GETTYPE_SQL_DATETIME_SUB], + estype_attr_to_datetime_sub(conn, esType, ES_ATP_UNSET)); + set_nullfield_int4(&tuple[GETTYPE_NUM_PREC_RADIX], + estype_radix(conn, esType)); + set_nullfield_int4(&tuple[GETTYPE_INTERVAL_PRECISION], 0); + + return SQL_SUCCESS; +} + +RETCODE SQL_API ESAPI_GetTypeInfo(HSTMT hstmt, SQLSMALLINT fSqlType) { + CSTR func = "ESAPI_GetTypeInfo"; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + conn = SC_get_conn(stmt); + QResultClass *res = NULL; + + int result_cols; + RETCODE result = SQL_ERROR; + + if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) + return result; + + try { + if (res = QR_Constructor(), !res) { + SC_set_error(stmt, STMT_INTERNAL_ERROR, "Error creating result.", + func); + return SQL_ERROR; + } + SC_set_Result(stmt, res); + + result_cols = NUM_OF_GETTYPE_FIELDS; + extend_column_bindings(SC_get_ARDF(stmt), + static_cast< SQLSMALLINT >(result_cols)); + + stmt->catalog_result = TRUE; + QR_set_num_fields(res, result_cols); + SetupTypeQResInfo(res); + + if (fSqlType == SQL_ALL_TYPES) { + for (std::pair< int, std::vector< int > > sqlType : + sql_es_type_map) { + for (auto const &esType : sqlType.second) { + result = + SetTypeResult(conn, stmt, res, esType, sqlType.first); + } + } + } else { + if (sql_es_type_map.count(fSqlType) > 0) { + for (auto esType : sql_es_type_map.at(fSqlType)) { + result = SetTypeResult(conn, stmt, res, esType, fSqlType); + } + } + } + result = SQL_SUCCESS; + + } catch (std::bad_alloc &e) { + std::string error_msg = std::string("Bad allocation exception: '") + + e.what() + std::string("'."); + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, error_msg.c_str(), func); + } catch (std::exception &e) { + std::string error_msg = + std::string("Generic exception: '") + e.what() + std::string("'."); + SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); + } catch (...) { + std::string error_msg("Unknown exception raised."); + SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); + } + + CleanUp_GetTypeInfo(stmt, result); + return result; +} \ No newline at end of file diff --git a/sql-odbc/src/odfesqlodbc/es_info.h b/sql-odbc/src/odfesqlodbc/es_info.h new file mode 100644 index 0000000000..17db5847cc --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_info.h @@ -0,0 +1,65 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ES_INFO_H__ +#define __ES_INFO_H__ +#include "es_helper.h" +#include "es_odbc.h" +#include "unicode_support.h" + +#ifndef WIN32 +#include +#endif + +#include "bind.h" +#include "catfunc.h" +#include "dlg_specific.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_types.h" +#include "misc.h" +#include "multibyte.h" +#include "qresult.h" +#include "statement.h" +#include "tuple.h" + +// C Interface +#ifdef __cplusplus +extern "C" { +#endif +RETCODE SQL_API ESAPI_Tables(HSTMT hstmt, const SQLCHAR* catalog_name_sql, + const SQLSMALLINT catalog_name_sz, + const SQLCHAR* schema_name_sql, + const SQLSMALLINT schema_name_sz, + const SQLCHAR* table_name_sql, + const SQLSMALLINT table_name_sz, + const SQLCHAR* table_type_sql, + const SQLSMALLINT table_type_sz, const UWORD flag); +RETCODE SQL_API +ESAPI_Columns(HSTMT hstmt, const SQLCHAR* catalog_name_sql, + const SQLSMALLINT catalog_name_sz, const SQLCHAR* schema_name_sql, + const SQLSMALLINT schema_name_sz, const SQLCHAR* table_name_sql, + const SQLSMALLINT table_name_sz, const SQLCHAR* column_name_sql, + const SQLSMALLINT column_name_sz, const UWORD flag, + const OID reloid, const Int2 attnum); + +RETCODE SQL_API ESAPI_GetTypeInfo(HSTMT hstmt, SQLSMALLINT fSqlType); +#ifdef __cplusplus +} +#endif + +#endif /* __ES_INFO_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/es_odbc.c b/sql-odbc/src/odfesqlodbc/es_odbc.c new file mode 100644 index 0000000000..40ec6a4e08 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_odbc.c @@ -0,0 +1,174 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifdef WIN32 +#ifdef _DEBUG +#include +#endif /* _DEBUG */ +#endif /* WIN32 */ +#include +#include "dlg_specific.h" +#include "environ.h" +#include "es_odbc.h" +#include "misc.h" + +#ifdef WIN32 +#include "loadlib.h" +#else +#include +#endif + +void unused_vargs(int cnt, ...) { +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-value" +#endif // __APPLE__ + (void)(cnt); +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +} + +static int exeesm = 0; +BOOL isMsAccess(void) { + return 1 == exeesm; +} +BOOL isMsQuery(void) { + return 2 == exeesm; +} +BOOL isSqlServr(void) { + return 3 == exeesm; +} + +RETCODE SQL_API SQLDummyOrdinal(void); + +extern void *conns_cs, *common_cs; + +int initialize_global_cs(void) { + static int init = 1; + + if (!init) + return 0; + init = 0; +#ifdef WIN32 +#ifdef _DEBUG +#ifdef _MEMORY_DEBUG_ + _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); +#endif /* _MEMORY_DEBUG_ */ +#endif /* _DEBUG */ +#endif /* WIN32 */ +#ifdef POSIX_THREADMUTEX_SUPPORT + getMutexAttr(); +#endif /* POSIX_THREADMUTEX_SUPPORT */ + InitializeLogging(); + INIT_CONNS_CS; + INIT_COMMON_CS; + + return 0; +} + +static void finalize_global_cs(void) { + DELETE_COMMON_CS; + DELETE_CONNS_CS; + FinalizeLogging(); +#ifdef _DEBUG +#ifdef _MEMORY_DEBUG_ + // _CrtDumpMemoryLeaks(); +#endif /* _MEMORY_DEBUG_ */ +#endif /* _DEBUG */ +} + +#ifdef WIN32 +HINSTANCE s_hModule; /* Saved module handle. */ +/* This is where the Driver Manager attaches to this Driver */ +BOOL WINAPI DllMain(HANDLE hInst, ULONG ul_reason_for_call, LPVOID lpReserved) { + const char *exename = GetExeProgramName(); + + switch (ul_reason_for_call) { + case DLL_PROCESS_ATTACH: + s_hModule = hInst; /* Save for dialog boxes */ + + if (stricmp(exename, "msaccess") == 0) + exeesm = 1; + else if (strnicmp(exename, "msqry", 5) == 0) + exeesm = 2; + else if (strnicmp(exename, "sqlservr", 8) == 0) + exeesm = 3; + initialize_global_cs(); + MYLOG(ES_DEBUG, "exe name=%s\n", exename); + break; + + case DLL_THREAD_ATTACH: + break; + + case DLL_PROCESS_DETACH: + MYLOG(ES_DEBUG, "DETACHING %s\n", DRIVER_FILE_NAME); + CleanupDelayLoadedDLLs(); + /* my(q)log is unavailable from here */ + finalize_global_cs(); + return TRUE; + + case DLL_THREAD_DETACH: + break; + + default: + break; + } + + return TRUE; + + UNREFERENCED_PARAMETER(lpReserved); +} + +#else /* not WIN32 */ + +#if defined(__GNUC__) || defined(__SUNPRO_C) + +/* Shared library initializer and destructor, using gcc's attributes */ + +static void __attribute__((constructor)) elasticodbc_init(void) { + initialize_global_cs(); +} + +static void __attribute__((destructor)) elasticodbc_fini(void) { + finalize_global_cs(); +} + +#else /* not __GNUC__ */ + +/* Shared library initialization on non-gcc systems. */ +BOOL _init(void) { + initialize_global_cs(); + return TRUE; +} + +BOOL _fini(void) { + finalize_global_cs(); + return TRUE; +} +#endif /* not __GNUC__ */ +#endif /* not WIN32 */ + +/* + * This function is used to cause the Driver Manager to + * call functions by number rather than name, which is faster. + * The ordinal value of this function must be 199 to have the + * Driver Manager do this. Also, the ordinal values of the + * functions must match the value of fFunction in SQLGetFunctions() + */ +RETCODE SQL_API SQLDummyOrdinal(void) { + return SQL_SUCCESS; +} diff --git a/sql-odbc/src/odfesqlodbc/es_odbc.h b/sql-odbc/src/odfesqlodbc/es_odbc.h new file mode 100644 index 0000000000..d5dff8e683 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_odbc.h @@ -0,0 +1,679 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __ESODBC_H__ +#define __ESODBC_H__ +#include + +/* #define __MS_REPORTS_ANSI_CHAR__ */ +void unused_vargs(int cnt, ...); +#define UNUSED(...) unused_vargs(0, __VA_ARGS__) + +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#elif __APPLE__ + +#elif __linux__ +#include "linux/kconfig.h" +#endif + +#include /* for FILE* pointers: see GLOBAL_VALUES */ + +#include "version.h" + +#ifdef WIN32 +#ifdef _DEBUG +#ifndef _MEMORY_DEBUG_ +#include +#if (_MSC_VER < 1400) /* in case of VC7 or under */ +#include +#endif /* _MSC_VER */ +#define _CRTDBG_MAP_ALLOC +#include +#endif /* _MEMORY_DEBUG_ */ +#else /* _DEBUG */ +#include +#endif /* _DEBUG */ +#else /* WIN32 */ +#include +#endif /* WIN32 */ + +#if defined(__GNUC__) || defined(__IBMC__) +#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 404 +#define ES_PRINTF_ATTRIBUTE gnu_printf +#else +#define ES_PRINTF_ATTRIBUTE printf +#endif +#define es_attribute_printf(f, a) \ + __attribute__((format(ES_PRINTF_ATTRIBUTE, f, a))) +#else +#define __attribute__(x) +#define es_attribute_printf(f, a) +#endif /* __GNUC__ || __IBMC__ */ + +#ifdef _MEMORY_DEBUG_ +void *esdebug_alloc(size_t); +void *esdebug_calloc(size_t, size_t); +void *esdebug_realloc(void *, size_t); +char *esdebug_strdup(const char *); +void *esdebug_memcpy(void *, const void *, size_t); +void *esdebug_memset(void *, int c, size_t); +char *esdebug_strcpy(char *, const char *); +char *esdebug_strncpy(char *, const char *, size_t); +char *esdebug_strncpy_null(char *, const char *, size_t); +void esdebug_free(void *); +void debug_memory_check(void); + +#ifdef WIN32 +#undef strdup +#endif /* WIN32 */ +#define malloc esdebug_alloc +#define realloc esdebug_realloc +#define calloc esdebug_calloc +#define strdup esdebug_strdup +#define free esdebug_free +#define strcpy esdebug_strcpy +#define strncpy esdebug_strncpy +/* #define strncpy_null esdebug_strncpy_null */ +#define memcpy esdebug_memcpy +#define memset esdebug_memset +#endif /* _MEMORY_DEBUG_ */ + +#ifdef WIN32 +#pragma warning(push) +#pragma warning(disable : 4201) // nonstandard extension used: nameless + // struct/union warning +#include +#pragma warning(pop) +#endif /* WIN32 */ +/* Must come before sql.h */ +#define ODBCVER 0x0351 + +/* + * Default NAMEDATALEN value in the server. The server can be compiled with + * a different value, but this will do. + */ +#define NAMEDATALEN_V73 64 + +#ifndef NAMESTORAGELEN +#define NAMESTORAGELEN 64 +#endif /* NAMESTORAGELEN */ + +#if defined(WIN32) || defined(WITH_UNIXODBC) || defined(WITH_IODBC) +#ifdef WIN32 +#pragma warning(push) +#pragma warning(disable : 4201) // nonstandard extension used: nameless + // struct/union warning +#endif // WIN32 +#include +#include +#include +#if WIN32 +#pragma warning(pop) +#endif // WIN32 +#if defined(WIN32) && (_MSC_VER < 1300) /* in case of VC6 or under */ +#define SQLLEN SQLINTEGER +#define SQLULEN SQLUINTEGER +#define SQLSETPOSIROW SQLUSMALLINT +/* VC6 bypasses 64bit mode. */ +#define DWLP_USER DWL_USER +#define ULONG_PTR ULONG +#define LONG_PTR LONG +#define SetWindowLongPtr(hdlg, DWLP_USER, lParam) \ + SetWindowLong(hdlg, DWLP_USER, lParam) +#define GetWindowLongPtr(hdlg, DWLP_USER) GetWindowLong(hdlg, DWLP_USER); +#endif +#else +#include "iodbc.h" +#include "isql.h" +#include "isqlext.h" +#endif /* WIN32 */ + +#if defined(WIN32) +#include +#elif defined(WITH_UNIXODBC) +#include +#elif defined(WITH_IODBC) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define Int4 int +#define UInt4 unsigned int +#define Int2 short +#define UInt2 unsigned short +typedef SQLBIGINT Int8; +typedef UInt4 OID; + +#ifndef SQL_TRUE +#define SQL_TRUE TRUE +#endif /* SQL_TRUE */ +#ifndef SQL_FALSE +#define SQL_FALSE FALSE +#endif /* SQL_FALSE */ + +#define FORMAT_SMALLI "%d" /* SQLSMALLINT */ +#define FORMAT_USMALLI "%u" /* SQLUSMALLINT */ +#ifdef WIN32 +#ifndef SSIZE_T_DEFINED +#define ssize_t SSIZE_T +#define SSIZE_T_DEFINED +#endif /* SSIZE_T */ +#define FORMAT_SIZE_T "%Iu" /* size_t */ +#define FORMAT_SSIZE_T "%Id" /* ssize_t */ +#define FORMAT_INTEGER "%ld" /* SQLINTEGER */ +#define FORMAT_UINTEGER "%lu" /* SQLUINTEGER */ +#define FORMATI64 "%I64d" /* SQLBIGINT */ +#define FORMATI64U "%I64u" /* SQLUBIGINT */ +#ifdef _WIN64 +#define FORMAT_LEN "%I64d" /* SQLLEN */ +#define FORMAT_ULEN "%I64u" /* SQLULEN */ +#define FORMAT_POSIROW "%I64u" +#else /* _WIN64 */ +#define FORMAT_LEN "%ld" /* SQLLEN */ +#define FORMAT_ULEN "%lu" /* SQLULEN */ +#define FORMAT_POSIROW "%hu" +#endif /* _WIN64 */ +#else /* WIN32 */ +#define FORMAT_SIZE_T "%zu" /* size_t */ +#define FORMAT_SSIZE_T "%zd" /* ssize_t */ +#ifndef HAVE_SSIZE_T +typedef long ssize_t; +#endif /* HAVE_SSIZE_T */ + +#ifndef SIZEOF_VOID_P +#ifdef __APPLE__ +#define SIZEOF_VOID_P 8 +#else +#error "SIZEOF_VOID_P must be defined" +#endif // __APPLE__ +#endif // SIZEOF_VOID_P + +#ifndef SIZEOF_LONG +#ifdef __APPLE__ +#define SIZEOF_LONG 8 +#else +#error "SIZEOF_LONG must be defined" +#endif // __APPLE__ +#endif // SIZEOF_LONG + +#if (SIZEOF_VOID_P == SIZEOF_LONG) /* ILP32 or LP64 */ +typedef long LONG_PTR; +typedef unsigned long ULONG_PTR; +#elif defined(HAVE_LONG_LONG) /* LLP64 */ +typedef long long LONG_PTR; +typedef unsigned long long ULONG_PTR; +#else /* SIZEOF_VOID_P */ +#error appropriate long pointer type not found +#endif /* SIZEOF_VOID_P */ +#if (SIZEOF_LONG == 8) /* LP64 */ +#define FORMAT_INTEGER "%d" /* SQLINTEGER */ +#define FORMAT_UINTEGER "%u" /* SQLUINTEGER */ +#define FORMATI64 "%ld" /* SQLBIGINT */ +#define FORMATI64U "%lu" /* SQLUBIGINT */ +#if defined(WITH_UNIXODBC) && defined(BUILD_LEGACY_64_BIT_MODE) +#define FORMAT_LEN "%d" /* SQLLEN */ +#define FORMAT_ULEN "%u" /* SQLULEN */ +#else /* WITH_UNIXODBC */ +#define FORMAT_LEN "%ld" /* SQLLEN */ +#define FORMAT_ULEN "%lu" /* SQLULEN */ +#endif /* WITH_UNIXODBC */ +#else /* SIZEOF_LONG */ +#define FORMAT_INTEGER "%ld" /* SQLINTEGER */ +#define FORMAT_UINTEGER "%lu" /* SQLUINTEGER */ +#if defined(HAVE_LONG_LONG) +#define FORMATI64 "%lld" /* SQLBIGINT */ +#define FORMATI64U "%llu" /* SQLUBIGINT */ +#if (SIZEOF_VOID_P == 8) /* LLP64 */ +#define FORMAT_LEN "%lld" /* SQLLEN */ +#define FORMAT_ULEN "%llu" /* SQLULEN */ +#else /* SIZEOF_VOID_P ILP32 */ +#define FORMAT_LEN "%ld" /* SQLLEN */ +#define FORMAT_ULEN "%lu" /* SQLULEN */ +#endif /* SIZEOF_VOID_P */ +#else /* HAVE_LONG_LONG */ +#define FORMAT_LEN "%ld" /* SQLLEN */ +#define FORMAT_ULEN "%lu" /* SQLULEN */ +#endif /* HAVE_LONG_LONG */ +#endif /* SIZEOF_LONG */ + +#if (SIZEOF_VOID_P == 8) && !defined(WITH_IODBC) +#define FORMAT_POSIROW FORMAT_ULEN +#else +#define FORMAT_POSIROW "%u" +#endif + +#endif /* WIN32 */ + +#define CAST_PTR(type, ptr) (type)((LONG_PTR)(ptr)) +#define CAST_UPTR(type, ptr) (type)((ULONG_PTR)(ptr)) +#ifndef SQL_IS_LEN +#define SQL_IS_LEN (-1000) +#endif /* SQL_IS_LEN */ +#ifdef HAVE_SIGNED_CHAR +typedef signed char po_ind_t; +#else +typedef char po_ind_t; +#endif /* HAVE_SIGNED_CHAR */ + +#ifndef WIN32 +#if !defined(WITH_UNIXODBC) && !defined(WITH_IODBC) +typedef float SFLOAT; +typedef double SDOUBLE; +#endif /* WITH_UNIXODBC */ + +#ifndef CALLBACK +#define CALLBACK +#endif /* CALLBACK */ +#endif /* WIN32 */ + +#ifndef WIN32 +#define stricmp strcasecmp +#define strnicmp strncasecmp +#ifndef TRUE +#define TRUE (BOOL)1 +#endif /* TRUE */ +#ifndef FALSE +#define FALSE (BOOL)0 +#endif /* FALSE */ +#else + +#if (_MSC_VER < 1900) /* vc12 or under */ +#define POSIX_SNPRINTF_REQUIRED +#define snprintf posix_snprintf +extern int posix_snprintf(char *buf, size_t size, const char *format, ...); +#endif /* _MSC_VER */ +#ifndef strdup +#define strdup _strdup +#endif /* strdup */ +#define strnicmp _strnicmp +#define stricmp _stricmp +#endif /* WIN32 */ + +#define IS_NOT_SPACE(c) ((c) && !isspace((UCHAR)c)) + +#ifndef SQL_ATTR_APP_ROW_DESC +#define SQL_ATTR_APP_ROW_DESC 10010 +#endif +#ifndef SQL_ATTR_APP_PARAM_DESC +#define SQL_ATTR_APP_PARAM_DESC 10011 +#endif +#ifndef SQL_ATTR_IMP_ROW_DESC +#define SQL_ATTR_IMP_ROW_DESC 10012 +#endif +#ifndef SQL_ATTR_IMP_PARAM_DESC +#define SQL_ATTR_IMP_PARAM_DESC 10013 +#endif + +/* Driver stuff */ + +#define DRIVERNAME "Elasticsearch ODBC" + +#define DBMS_NAME_UNICODE "Elasticsearch Unicode" +#define DBMS_NAME_ANSI "Elasticsearch ANSI" + +#define DRIVER_ODBC_VER "03.51" + +#ifdef UNICODE_SUPPORT +#define WCLEN sizeof(SQLWCHAR) +SQLULEN ucs2strlen(const SQLWCHAR *); +#else +#undef SQL_WCHAR +#undef SQL_WVARCHAR +#undef SQL_WLONGVARCHAR +#undef SQL_C_WCHAR +#define SQL_WCHAR SQL_WCHAR_IS_INHIBITED +#define SQL_WVARCHAR SQL_WVARCHAR_IS_INHIBITED +#define SQL_WLONGVARCHAR SQL_WLONGVARCHAR_IS_INHIBITED +#define SQL_C_WCHAR SQL_C_WCHAR_IS_INHIBITED +#endif + +#ifndef DBMS_NAME +#ifdef _WIN64 +#ifdef UNICODE_SUPPORT +#define DBMS_NAME DBMS_NAME_UNICODE "(x64)" +#else +#define DBMS_NAME DBMS_NAME_ANSI "(x64)" +#endif /* UNICODE_SUPPORT */ +#else /* _WIN64 */ +#ifdef UNICODE_SUPPORT +#define DBMS_NAME DBMS_NAME_UNICODE +#else +#define DBMS_NAME DBMS_NAME_ANSI +#endif /* UNICODE_SUPPORT */ +#endif /* _WIN64 */ +#endif /* DBMS_NAME */ + +#ifndef DBMS_NAME +#define DBMS_NAME "Elasticsearch Legacy" +#endif /* DBMS_NAME */ +#ifdef WIN32 +#ifdef UNICODE_SUPPORT +#define DRIVER_FILE_NAME "odfesqlodbc.dll" +#else +#define DRIVER_FILE_NAME "odfesqlodbc.dll" +#endif /* UNICODE_SUPPORT */ +#else +#ifdef UNICODE_SUPPORT +#define DRIVER_FILE_NAME "libodfesqlodbc.dylib" +#else +#define DRIVER_FILE_NAME "libodfesqlodbc.dylib" +#endif +#endif /* WIN32 */ +BOOL isMsAccess(void); +BOOL isMsQuery(void); +BOOL isSqlServr(void); + +/* ESCAPEs */ +#define ESCAPE_IN_LITERAL '\\' +#define BYTEA_ESCAPE_CHAR '\\' +#define SEARCH_PATTERN_ESCAPE '\\' +#define LITERAL_QUOTE '\'' +#define IDENTIFIER_QUOTE '\"' +#define ODBC_ESCAPE_START '{' +#define ODBC_ESCAPE_END '}' +#define DOLLAR_QUOTE '$' +#define LITERAL_EXT 'E' +#define ES_CARRIAGE_RETURN '\r' +#define ES_LINEFEED '\n' + +/* Limits */ +#define MAXESPATH 1024 + +/* see an easy way round this - DJP 24-1-2001 */ +#define MAX_CONNECT_STRING 4096 +#define FETCH_MAX \ + 100 /* default number of rows to cache \ \ + * for declare/fetch */ +#define TUPLE_MALLOC_INC 100 +#define MAX_CONNECTIONS \ + 128 /* conns per environment \ \ + * (arbitrary) */ + +#ifdef NAMEDATALEN +#define MAX_SCHEMA_LEN NAMEDATALEN +#define MAX_TABLE_LEN NAMEDATALEN +#define MAX_COLUMN_LEN NAMEDATALEN +#define NAME_FIELD_SIZE NAMEDATALEN /* size of name fields */ +#if (NAMEDATALEN > NAMESTORAGELEN) +#undef NAMESTORAGELEN +#define NAMESTORAGELEN NAMEDATALEN +#endif +#endif /* NAMEDATALEN */ +#define MAX_CURSOR_LEN 32 + +#define SCHEMA_NAME_STORAGE_LEN NAMESTORAGELEN +#define TABLE_NAME_STORAGE_LEN NAMESTORAGELEN +#define COLUMN_NAME_STORAGE_LEN NAMESTORAGELEN +#define INDEX_KEYS_STORAGE_COUNT 32 + +/* Registry length limits */ +#define LARGE_REGISTRY_LEN 4096 /* used for special cases */ +#define MEDIUM_REGISTRY_LEN \ + 256 /* normal size for \ \ + * user,database,etc. */ +#define SMALL_REGISTRY_LEN 10 /* for 1/0 settings */ + +/* These prefixes denote system tables */ +#define ELASTIC_SYS_PREFIX "es_" + +/* Info limits */ +#define MAX_INFO_STRING 128 + +/* POSIX defines a PATH_MAX.( wondows is _MAX_PATH ..) */ +#ifndef PATH_MAX +#ifdef _MAX_PATH +#define PATH_MAX _MAX_PATH +#else +#define PATH_MAX 1024 +#endif /* _MAX_PATH */ +#endif /* PATH_MAX */ + +typedef struct ConnectionClass_ ConnectionClass; +typedef struct StatementClass_ StatementClass; +typedef struct QResultClass_ QResultClass; +typedef struct BindInfoClass_ BindInfoClass; +typedef struct ParameterInfoClass_ ParameterInfoClass; +typedef struct ParameterImplClass_ ParameterImplClass; +typedef struct ColumnInfoClass_ ColumnInfoClass; +typedef struct EnvironmentClass_ EnvironmentClass; +typedef struct TupleField_ TupleField; +typedef struct KeySet_ KeySet; +typedef struct Rollback_ Rollback; +typedef struct ARDFields_ ARDFields; +typedef struct APDFields_ APDFields; +typedef struct IRDFields_ IRDFields; +typedef struct IPDFields_ IPDFields; + +typedef struct col_info COL_INFO; +typedef struct lo_arg LO_ARG; + +/* esNAME type define */ +typedef struct { + char *name; +} esNAME; +#define GET_NAME(the_name) ((the_name).name) +#define SAFE_NAME(the_name) ((the_name).name ? (the_name).name : NULL_STRING) +#define PRINT_NAME(the_name) ((the_name).name ? (the_name).name : PRINT_NULL) +#define NAME_IS_NULL(the_name) (NULL == (the_name).name) +#define NAME_IS_VALID(the_name) (NULL != (the_name).name) +#define INIT_NAME(the_name) ((the_name).name = NULL) +#define NULL_THE_NAME(the_name) \ + do { \ + if ((the_name).name) \ + free((the_name).name); \ + (the_name).name = NULL; \ + } while (0) +#define STR_TO_NAME(the_name, str) \ + do { \ + if ((the_name).name) \ + free((the_name).name); \ + (the_name).name = (str ? strdup((str)) : NULL); \ + } while (0) +#define STRX_TO_NAME(the_name, str) \ + do { \ + if ((the_name).name) \ + free((the_name).name); \ + (the_name).name = strdup((str)); \ + } while (0) +#define STRN_TO_NAME(the_name, str, n) \ + do { \ + if ((the_name).name) \ + free((the_name).name); \ + if (str) { \ + (the_name).name = malloc((n) + 1); \ + if ((the_name).name) { \ + memcpy((the_name).name, str, (n)); \ + (the_name).name[(n)] = '\0'; \ + } \ + } else \ + (the_name).name = NULL; \ + } while (0) +#define NAME_TO_NAME(to, from) \ + do { \ + if ((to).name) \ + free((to).name); \ + if ((from).name) \ + (to).name = strdup(from.name); \ + else \ + (to).name = NULL; \ + } while (0) +#define MOVE_NAME(to, from) \ + do { \ + if ((to).name) \ + free((to).name); \ + (to).name = (from).name; \ + (from).name = NULL; \ + } while (0) +#define SET_NAME_DIRECTLY(the_name, str) ((the_name).name = (str)) + +#define NAMECMP(name1, name2) (strcmp(SAFE_NAME(name1), SAFE_NAME(name2))) +#define NAMEICMP(name1, name2) (stricmp(SAFE_NAME(name1), SAFE_NAME(name2))) +/* esNAME define end */ + +typedef struct GlobalValues_ { + esNAME drivername; + char output_dir[LARGE_REGISTRY_LEN]; + int loglevel; +} GLOBAL_VALUES; + +void copy_globals(GLOBAL_VALUES *to, const GLOBAL_VALUES *from); +void init_globals(GLOBAL_VALUES *glbv); +void finalize_globals(GLOBAL_VALUES *glbv); + +typedef struct StatementOptions_ { + SQLLEN maxRows; + SQLLEN maxLength; + SQLLEN keyset_size; + SQLUINTEGER cursor_type; + SQLUINTEGER scroll_concurrency; + SQLUINTEGER retrieve_data; + SQLUINTEGER use_bookmarks; + void *bookmark_ptr; + SQLUINTEGER metadata_id; + SQLULEN stmt_timeout; +} StatementOptions; + +/* Used to pass extra query info to send_query */ +typedef struct QueryInfo_ { + SQLLEN row_size; + SQLLEN fetch_size; + QResultClass *result_in; + const char *cursor; +} QueryInfo; + +/* Used to save the error information */ +typedef struct { + UInt4 status; + Int2 errorsize; + Int2 recsize; + Int2 errorpos; + char sqlstate[6]; + SQLLEN diag_row_count; + char __error_message[40]; +} ES_ErrorInfo; +ES_ErrorInfo *ER_Constructor(SDWORD errornumber, const char *errormsg); +ES_ErrorInfo *ER_Dup(const ES_ErrorInfo *from); +void ER_Destructor(ES_ErrorInfo *); +RETCODE SQL_API ER_ReturnError(ES_ErrorInfo *, SQLSMALLINT, UCHAR *, + SQLINTEGER *, UCHAR *, SQLSMALLINT, + SQLSMALLINT *, UWORD); + +void logs_on_off(int cnopen, int, int); + +#define ES_TYPE_LO_UNDEFINED \ + (-999) /* hack until permanent \ \ + * type available */ +#define ES_TYPE_LO_NAME "lo" +#define CTID_ATTNUM (-1) /* the attnum of ctid */ +#define OID_ATTNUM (-2) /* the attnum of oid */ +#define XMIN_ATTNUM (-3) /* the attnum of xmin */ + +/* sizes */ +#define TEXT_FIELD_SIZE \ + 8190 /* size of default text fields \ \ + * (not including null term) */ +#define MAX_VARCHAR_SIZE \ + 512 /* default maximum size of \ \ + * varchar fields (not including null term) */ +#define INFO_VARCHAR_SIZE \ + 254 /* varchar field size \ \ + * used in info.c */ + +#define ES_NUMERIC_MAX_PRECISION 1000 +#define ES_NUMERIC_MAX_SCALE 1000 + +/* Sufficient digits to recover original float values */ +#define ES_REAL_DIGITS 9 +#define ES_DOUBLE_DIGITS 17 + +#define INFO_INQUIRY_LEN \ + 8192 /* this seems sufficiently big for \ \ + * queries used in info.c inoue \ \ + * 2001/05/17 */ +#define LENADDR_SHIFT(x, sft) ((x) ? (SQLLEN *)((char *)(x) + (sft)) : NULL) + +/* Structure to hold all the connection attributes for a specific + connection (used for both registry and file, DSN and DRIVER) +*/ +typedef struct { + // Connection + char dsn[MEDIUM_REGISTRY_LEN]; + char desc[MEDIUM_REGISTRY_LEN]; + char drivername[MEDIUM_REGISTRY_LEN]; + char server[MEDIUM_REGISTRY_LEN]; + char port[SMALL_REGISTRY_LEN]; + char response_timeout[SMALL_REGISTRY_LEN]; + char fetch_size[SMALL_REGISTRY_LEN]; + + // Authentication + char authtype[MEDIUM_REGISTRY_LEN]; + char username[MEDIUM_REGISTRY_LEN]; + esNAME password; + char region[MEDIUM_REGISTRY_LEN]; + + // Encryption + char use_ssl; + char verify_server; + + GLOBAL_VALUES drivers; /* moved from driver's option */ +} ConnInfo; + +#define SUPPORT_DESCRIBE_PARAM(conninfo_) (1) + +int initialize_global_cs(void); +enum { /* CC_conninfo_init option */ + CLEANUP_FOR_REUSE = 1L /* reuse the info */ + , + INIT_GLOBALS = (1L << 1) /* init globals memebers */ +}; +void CC_conninfo_init(ConnInfo *conninfo, UInt4 option); +void CC_conninfo_release(ConnInfo *conninfo); +void CC_copy_conninfo(ConnInfo *ci, const ConnInfo *sci); +const char *GetExeProgramName(); + +/* Define a type for defining a constant string expression */ +#ifndef CSTR +#define CSTR static const char *const +#endif /* CSTR */ + +CSTR NULL_STRING = ""; +CSTR PRINT_NULL = "(null)"; +#define OID_NAME "oid" +#define XMIN_NAME "xmin" +#define TABLEOID_NAME "tableoid" + +enum { + DISALLOW_UPDATABLE_CURSORS = 0, /* No cursors are updatable */ + ALLOW_STATIC_CURSORS = 1L, /* Static cursors are updatable */ + ALLOW_KEYSET_DRIVEN_CURSORS = + (1L << 1), /* Keyset-driven cursors are updatable */ + ALLOW_DYNAMIC_CURSORS = (1L << 2), /* Dynamic cursors are updatable */ + ALLOW_BULK_OPERATIONS = (1L << 3), /* Bulk operations available */ + SENSE_SELF_OPERATIONS = (1L << 4), /* Sense self update/delete/add */ +}; + +#ifdef __cplusplus +} +#endif + +#include "mylog.h" + +#endif /* __ESODBC_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/es_odbc.rc b/sql-odbc/src/odfesqlodbc/es_odbc.rc new file mode 100644 index 0000000000..6bf66d9af1 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_odbc.rc @@ -0,0 +1,257 @@ +// Microsoft Visual C++ generated resource script. +// +#include "resource.h" + +#define APSTUDIO_READONLY_SYMBOLS +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 2 resource. +// +#include "afxres.h" +#include "version.h" + +///////////////////////////////////////////////////////////////////////////// +#undef APSTUDIO_READONLY_SYMBOLS + +///////////////////////////////////////////////////////////////////////////// +// Japanese (Japan) resources + +#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_JPN) +LANGUAGE LANG_JAPANESE, SUBLANG_DEFAULT +#pragma code_page(932) + +#ifdef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// TEXTINCLUDE +// + +1 TEXTINCLUDE +BEGIN + "resource.h\0" +END + +2 TEXTINCLUDE +BEGIN + "#include ""afxres.h""\r\n" + "#include ""version.h""\r\n" + "\0" +END + +3 TEXTINCLUDE +BEGIN + "\r\n" + "\0" +END + +#endif // APSTUDIO_INVOKED + +#endif // Japanese (Japan) resources +///////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////// +// English (United States) resources + +#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) +LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US +#pragma code_page(1252) + +///////////////////////////////////////////////////////////////////////////// +// +// Dialog +// + +DLG_CONFIG DIALOGEX 65, 43, 275, 250 +STYLE DS_SETFONT | DS_MODALFRAME | DS_3DLOOK | DS_CENTER | WS_POPUP | WS_VISIBLE | WS_CAPTION | WS_SYSMENU +CAPTION "Elasticsearch ODBC Driver DSN Setup" +FONT 8, "MS Sans Serif", 0, 0, 0x0 +BEGIN + LTEXT "Data Source Name",IDC_DSNAMETEXT,23,7,77,12,NOT WS_GROUP + EDITTEXT IDC_DSNAME,108,6,120,12,ES_AUTOHSCROLL | WS_DISABLED + GROUPBOX "Connection Settings",IDC_CONN_SETTINGS,7,28,261,56 + LTEXT "Host",IDC_STATIC,20,46,20,8,NOT WS_GROUP + EDITTEXT IDC_SERVER,60,44,192,12,ES_AUTOHSCROLL + LTEXT "&Port",IDC_STATIC,20,66,19,8 + EDITTEXT IDC_PORT,60,64,192,13,ES_AUTOHSCROLL + GROUPBOX "Authentication Settings",IDC_AUTH_SETTINGS,7,93,260,92,BS_FLAT + LTEXT "Auth",IDC_AUTH_STATIC,21,110,19,8 + COMBOBOX IDC_AUTHTYPE,61,108,192,30,CBS_DROPDOWNLIST | CBS_SORT | WS_VSCROLL | WS_TABSTOP + LTEXT "User",IDC_USERNAME_STATIC,20,129,19,8 + EDITTEXT IDC_USER,61,127,191,12,ES_AUTOHSCROLL | WS_DISABLED + LTEXT "Password",IDC_PASSWORD_STATIC,20,149,41,12 + EDITTEXT IDC_PASSWORD,61,147,191,12,ES_PASSWORD | ES_AUTOHSCROLL | WS_DISABLED + LTEXT "Region",IDC_REGION_STATIC,20,170,28,8 + EDITTEXT IDC_REGION,61,168,191,12,ES_AUTOHSCROLL | WS_DISABLED + PUSHBUTTON "Advanced Options",ID_ADVANCED_OPTIONS,21,195,111,15,WS_GROUP + PUSHBUTTON "Logging Options",ID_LOG_OPTIONS,144,195,108,15,WS_GROUP + LTEXT "V.N.N.N",IDC_DRIVER_VERSION,10,228,108,8 + DEFPUSHBUTTON "OK",IDOK,119,224,44,15,WS_GROUP + DEFPUSHBUTTON "Test",IDOK2,167,224,44,15,WS_GROUP + PUSHBUTTON "Cancel",IDCANCEL,215,224,44,15 +END + +DLG_ADVANCED_OPTIONS DIALOGEX 0, 0, 157, 113 +STYLE DS_SETFONT | DS_MODALFRAME | DS_CENTER | WS_POPUP | WS_CAPTION | WS_SYSMENU +CAPTION "Advanced Options" +FONT 8, "MS Sans Serif", 0, 0, 0x0 +BEGIN + GROUPBOX "",IDC_STATIC,12,4,133,85 + CONTROL "SSL",IDC_USESSL,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,19,12,10,11 + LTEXT "Enable SSL",IDC_SSL_STATIC,37,14,39,8 + CONTROL "Host Verification",IDC_HOST_VER,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,19,32,10,9 + LTEXT "Hostname Verification",IDC_HOST_VER_STATIC,37,32,71,8 + LTEXT "Response Timeout (s)",IDC_CONNTIMEOUT_STATIC,19,51,70,8 + EDITTEXT IDC_CONNTIMEOUT,96,50,43,12,ES_AUTOHSCROLL + DEFPUSHBUTTON "OK",IDOK,49,94,44,14,WS_GROUP + PUSHBUTTON "Cancel",IDCANCEL,98,93,47,15 + LTEXT "Fetch Size",IDC_FETCH_SIZE_STATIC,19,71,35,8 + EDITTEXT IDC_FETCH_SIZE,96,70,43,12,ES_AUTOHSCROLL +END + +DLG_LOG_OPTIONS DIALOGEX 0, 0, 251, 79 +STYLE DS_SETFONT | DS_MODALFRAME | DS_CENTER | WS_POPUP | WS_CAPTION | WS_SYSMENU +CAPTION "Logging Options" +FONT 8, "MS Sans Serif", 0, 0, 0x0 +BEGIN + GROUPBOX "",IDC_STATIC,10,4,230,51 + LTEXT "Log Level",IDC_STATIC,22,15,31,8 + COMBOBOX IDC_LOG_LEVEL,71,13,149,30,CBS_DROPDOWNLIST | WS_VSCROLL | WS_TABSTOP + LTEXT "Log Path",IDC_STATIC,22,38,29,8 + EDITTEXT IDC_LOG_PATH,71,36,149,12,ES_AUTOHSCROLL + DEFPUSHBUTTON "OK",IDOK,144,60,44,14,WS_GROUP + PUSHBUTTON "Cancel",IDCANCEL,193,59,47,15 +END + + +///////////////////////////////////////////////////////////////////////////// +// +// DESIGNINFO +// + +#ifdef APSTUDIO_INVOKED +GUIDELINES DESIGNINFO +BEGIN + DLG_CONFIG, DIALOG + BEGIN + RIGHTMARGIN, 270 + TOPMARGIN, 1 + BOTTOMMARGIN, 200 + END + + DLG_ADVANCED_OPTIONS, DIALOG + BEGIN + LEFTMARGIN, 5 + RIGHTMARGIN, 152 + TOPMARGIN, 5 + BOTTOMMARGIN, 108 + END + + DLG_LOG_OPTIONS, DIALOG + BEGIN + LEFTMARGIN, 5 + RIGHTMARGIN, 246 + TOPMARGIN, 5 + BOTTOMMARGIN, 74 + END +END +#endif // APSTUDIO_INVOKED + + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// + +VS_VERSION_INFO VERSIONINFO + FILEVERSION ES_DRVFILE_VERSION + PRODUCTVERSION ES_DRVFILE_VERSION + FILEFLAGSMASK 0x3L +#ifdef _DEBUG + FILEFLAGS 0x9L +#else + FILEFLAGS 0x8L +#endif + FILEOS 0x4L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904e4" + BEGIN + VALUE "Comments", "Elasticsearch ANSI ODBC driver" + VALUE "CompanyName", "Amazon" + VALUE "FileDescription", "Elasticsearch ODBC Driver (English)" + VALUE "FileVersion", ES_ODBC_VERSION + VALUE "InternalName", "odfesqlodbc" + VALUE "LegalCopyright", "Copyright" + VALUE "LegalTrademarks", "ODBC(TM) is a trademark of Microsoft Corporation. Microsoft? is a registered trademark of Microsoft Corporation. Windows(TM) is a trademark of Microsoft Corporation." + VALUE "OriginalFilename", "odfesqlodbc.dll" + VALUE "ProductName", "Elasticsearch" + VALUE "ProductVersion", ES_ODBC_VERSION + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1252 + END +END + + +///////////////////////////////////////////////////////////////////////////// +// +// AFX_DIALOG_LAYOUT +// + +DLG_CONFIG AFX_DIALOG_LAYOUT +BEGIN + 0 +END + +DLG_ADVANCED_OPTIONS AFX_DIALOG_LAYOUT +BEGIN + 0 +END + +DLG_LOG_OPTIONS AFX_DIALOG_LAYOUT +BEGIN + 0 +END + + +///////////////////////////////////////////////////////////////////////////// +// +// String Table +// + +STRINGTABLE +BEGIN + IDS_AUTHTYPE_NONE "NONE" + IDS_AUTHTYPE_BASIC "BASIC" + IDS_AUTHTYPE_IAM "AWS_SIGV4" + IDS_LOGTYPE_OFF "LOG_OFF" + IDS_LOGTYPE_FATAL "LOG_FATAL" + IDS_LOGTYPE_ERROR "LOG_ERROR" + IDS_LOGTYPE_WARNING "LOG_WARNING" + IDS_LOGTYPE_INFO "LOG_INFO" + IDS_LOGTYPE_DEBUG "LOG_DEBUG" + IDS_LOGTYPE_TRACE "LOG_TRACE" + IDS_LOGTYPE_ALL "LOG_ALL" +END + +#endif // English (United States) resources +///////////////////////////////////////////////////////////////////////////// + + + +#ifndef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 3 resource. +// + + +///////////////////////////////////////////////////////////////////////////// +#endif // not APSTUDIO_INVOKED + diff --git a/sql-odbc/src/odfesqlodbc/es_parse_result.cpp b/sql-odbc/src/odfesqlodbc/es_parse_result.cpp new file mode 100644 index 0000000000..b73e291751 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_parse_result.cpp @@ -0,0 +1,465 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_parse_result.h" + +#include + +#include "es_helper.h" +#include "es_types.h" +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif // __APPLE__ +#include "rabbit.hpp" +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ +#include "statement.h" + +typedef std::vector< std::pair< std::string, OID > > schema_type; +typedef rabbit::array json_arr; +typedef json_arr::iterator::result_type json_arr_it; + +bool _CC_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result); +bool _CC_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result); +bool _CC_No_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result); +void GetSchemaInfo(schema_type &schema, json_doc &es_result_doc); +bool AssignColumnHeaders(const schema_type &doc_schema, QResultClass *q_res, + const ESResult &es_result); +bool AssignTableData(json_doc &es_result_doc, QResultClass *q_res, + size_t doc_schema_size, ColumnInfoClass &fields); +bool AssignRowData(const json_arr_it &row, size_t row_schema_size, + QResultClass *q_res, ColumnInfoClass &fields, + const size_t &row_size); +void UpdateResultFields(QResultClass *q_res, const ConnectionClass *conn, + const SQLULEN starting_cached_rows, const char *cursor, + std::string &command_type); +bool QR_prepare_for_tupledata(QResultClass *q_res); +void SetError(const char *err); +void ClearError(); + +// clang-format off +// Not all of these are being used at the moment, but these are the keywords in the json +static const std::string JSON_KW_SCHEMA = "schema"; +static const std::string JSON_KW_NAME = "name"; +static const std::string JSON_KW_TYPE = "type"; +static const std::string JSON_KW_TOTAL = "total"; +static const std::string JSON_KW_SIZE = "size"; +static const std::string JSON_KW_STATUS = "status"; +static const std::string JSON_KW_DATAROWS = "datarows"; +static const std::string JSON_KW_ERROR = "error"; +static const std::string JSON_KW_CURSOR = "cursor"; + +// clang-format on +const std::unordered_map< std::string, OID > type_to_oid_map = { + {"boolean", ES_TYPE_BOOL}, + {"byte", ES_TYPE_INT2}, + {"short", ES_TYPE_INT2}, + {"integer", ES_TYPE_INT4}, + {"long", ES_TYPE_INT8}, + {"half_float", ES_TYPE_FLOAT4}, + {"float", ES_TYPE_FLOAT4}, + {"double", ES_TYPE_FLOAT8}, + {"scaled_float", ES_TYPE_FLOAT8}, + {"keyword", ES_TYPE_VARCHAR}, + {"text", ES_TYPE_VARCHAR}, + {"date", ES_TYPE_TIMESTAMP}, + {"object", ES_TYPE_VARCHAR}, + {"nested", ES_TYPE_VARCHAR}, + {"date", ES_TYPE_DATE}}; + +#define ES_VARCHAR_SIZE (-2) +const std::unordered_map< OID, int16_t > oid_to_size_map = { + {ES_TYPE_BOOL, (int16_t)1}, + {ES_TYPE_INT2, (int16_t)2}, + {ES_TYPE_INT4, (int16_t)4}, + {ES_TYPE_INT8, (int16_t)8}, + {ES_TYPE_FLOAT4, (int16_t)4}, + {ES_TYPE_FLOAT8, (int16_t)8}, + {ES_TYPE_VARCHAR, (int16_t)ES_VARCHAR_SIZE}, + {ES_TYPE_DATE, (int16_t)ES_VARCHAR_SIZE}, + {ES_TYPE_TIMESTAMP, (int16_t)1}}; + +// Using global variable here so that the error message can be propagated +// without going otu of scope +std::string error_msg; + +void SetError(const char *err) { + error_msg = err; +} +void ClearError() { + error_msg = ""; +} +std::string GetResultParserError() { + return error_msg; +} + +BOOL CC_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result) { + ClearError(); + return _CC_from_ESResult(q_res, conn, cursor, es_result) ? TRUE : FALSE; +} + +BOOL CC_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result) { + ClearError(); + return _CC_Metadata_from_ESResult(q_res, conn, cursor, es_result) ? TRUE : FALSE; +} + +BOOL CC_No_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result) { + ClearError(); + return _CC_No_Metadata_from_ESResult(q_res, conn, cursor, es_result) + ? TRUE + : FALSE; +} + +BOOL CC_Append_Table_Data(json_doc &es_result_doc, QResultClass *q_res, + size_t doc_schema_size, ColumnInfoClass &fields) { + ClearError(); + return AssignTableData(es_result_doc, q_res, doc_schema_size, fields) + ? TRUE + : FALSE; +} + +bool _CC_No_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result) { + // Note - NULL conn and/or cursor is valid + if (q_res == NULL) + return false; + + try { + schema_type doc_schema; + GetSchemaInfo(doc_schema, es_result.es_result_doc); + + SQLULEN starting_cached_rows = q_res->num_cached_rows; + + // Assign table data and column headers + if (!AssignTableData(es_result.es_result_doc, q_res, doc_schema.size(), + *(q_res->fields))) + return false; + + // Update fields of QResult to reflect data written + UpdateResultFields(q_res, conn, starting_cached_rows, cursor, + es_result.command_type); + + // Return true (success) + return true; + } catch (const rabbit::type_mismatch &e) { + SetError(e.what()); + } catch (const rabbit::parse_error &e) { + SetError(e.what()); + } catch (const std::exception &e) { + SetError(e.what()); + } catch (...) { + SetError("Unknown exception thrown in _CC_No_Metadata_from_ESResult."); + } + + // Exception occurred, return false (error) + return false; +} + +bool _CC_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result) { + // Note - NULL conn and/or cursor is valid + if (q_res == NULL) + return false; + + QR_set_conn(q_res, conn); + try { + schema_type doc_schema; + GetSchemaInfo(doc_schema, es_result.es_result_doc); + + // Assign table data and column headers + if (!AssignColumnHeaders(doc_schema, q_res, es_result)) + return false; + + // Set command type and cursor name + QR_set_command(q_res, es_result.command_type.c_str()); + QR_set_cursor(q_res, cursor); + if (cursor == NULL) + QR_set_reached_eof(q_res); + + // Return true (success) + return true; + } catch (const rabbit::type_mismatch &e) { + SetError(e.what()); + } catch (const rabbit::parse_error &e) { + SetError(e.what()); + } catch (const std::exception &e) { + SetError(e.what()); + } catch (...) { + SetError("Unknown exception thrown in _CC_Metadata_from_ESResult."); + } + + // Exception occurred, return false (error) + return false; +} + +bool _CC_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result) { + // Note - NULL conn and/or cursor is valid + if (q_res == NULL) + return false; + + QR_set_conn(q_res, conn); + try { + schema_type doc_schema; + GetSchemaInfo(doc_schema, es_result.es_result_doc); + SQLULEN starting_cached_rows = q_res->num_cached_rows; + + // Assign table data and column headers + if ((!AssignColumnHeaders(doc_schema, q_res, es_result)) + || (!AssignTableData(es_result.es_result_doc, q_res, doc_schema.size(), + *(q_res->fields)))) + return false; + + // Update fields of QResult to reflect data written + UpdateResultFields(q_res, conn, starting_cached_rows, cursor, + es_result.command_type); + + // Return true (success) + return true; + } catch (const rabbit::type_mismatch &e) { + SetError(e.what()); + } catch (const rabbit::parse_error &e) { + SetError(e.what()); + } catch (const std::exception &e) { + SetError(e.what()); + } catch (...) { + SetError("Unknown exception thrown in CC_from_ESResult."); + } + + // Exception occurred, return false (error) + return false; +} + +void GetSchemaInfo(schema_type &schema, json_doc &es_result_doc) { + json_arr schema_arr = es_result_doc[JSON_KW_SCHEMA]; + for (auto it : schema_arr) { + auto mapped_oid = type_to_oid_map.find(it[JSON_KW_TYPE].as_string()); + OID type_oid = (mapped_oid == type_to_oid_map.end()) + ? SQL_WVARCHAR + : mapped_oid->second; + schema.push_back( + std::make_pair(it[JSON_KW_NAME].as_string(), type_oid)); + } +} + +bool AssignColumnHeaders(const schema_type &doc_schema, QResultClass *q_res, + const ESResult &es_result) { + // Verify server_info size matches the schema size + if (es_result.column_info.size() != doc_schema.size()) + return false; + + // Allocte memory for column fields + QR_set_num_fields(q_res, (uint16_t)es_result.column_info.size()); + if (QR_get_fields(q_res)->coli_array == NULL) + return false; + + // Assign column info + for (size_t i = 0; i < doc_schema.size(); i++) { + auto type_size_ptr = oid_to_size_map.find(doc_schema[i].second); + int16_t type_size = (type_size_ptr == oid_to_size_map.end()) + ? ES_ADT_UNSET + : type_size_ptr->second; + CI_set_field_info(QR_get_fields(q_res), (int)i, + doc_schema[i].first.c_str(), doc_schema[i].second, + type_size, es_result.column_info[i].length_of_str, + es_result.column_info[i].relation_id, + es_result.column_info[i].attribute_number); + QR_set_rstatus(q_res, PORES_FIELDS_OK); + } + q_res->num_fields = CI_get_num_fields(QR_get_fields(q_res)); + + return true; +} + +// Responsible for looping through rows, allocating tuples and passing rows for +// assignment +bool AssignTableData(json_doc &es_result_doc, QResultClass *q_res, + size_t doc_schema_size, ColumnInfoClass &fields) { + // Assign row info + json_arr es_result_data = es_result_doc[JSON_KW_DATAROWS]; + if (es_result_data.size() == 0) + return true; + + // Figure out number of columns are in a row and make schema is not bigger + // than it + size_t row_size = std::distance(es_result_data.begin()->value_begin(), + es_result_data.begin()->value_end()); + if (row_size < doc_schema_size) { + return false; + } + for (auto it : es_result_data) { + // Setup memory to receive tuple + if (!QR_prepare_for_tupledata(q_res)) + return false; + + // Assign row data + if (!AssignRowData(it, doc_schema_size, q_res, fields, row_size)) + return false; + } + + return true; +} + +// Responsible for assigning row data to tuples +bool AssignRowData(const json_arr_it &row, size_t row_schema_size, + QResultClass *q_res, ColumnInfoClass &fields, + const size_t &row_size) { + TupleField *tuple = + q_res->backend_tuples + (q_res->num_cached_rows * row_size); + + // Setup keyset if present + KeySet *ks = NULL; + if (QR_haskeyset(q_res)) { + ks = q_res->keyset + q_res->num_cached_keys; + ks->status = 0; + } + + // Loop through and assign data + size_t i = 0; + for (auto row_column = row.value_begin(); i < row_schema_size; + ++row_column, ++i) { + if (row_column->is_null()) { + tuple[i].len = SQL_NULL_DATA; + tuple[i].value = NULL; + } else { + // Copy string over to tuple + const std::string data = row_column->str(); + tuple[i].len = static_cast< int >(data.length()); + QR_MALLOC_return_with_error( + tuple[i].value, char, data.length() + 1, q_res, + "Out of memory in allocating item buffer.", false); + strcpy((char *)tuple[i].value, data.c_str()); + + // If data length exceeds current display size, set display size + if (fields.coli_array[i].display_size < tuple[i].len) + fields.coli_array[i].display_size = tuple[i].len; + } + } + + // If there are more rows than schema suggests, we have Keyset data + if (row_size > row_schema_size) { + if (ks == NULL) { + QR_set_rstatus(q_res, PORES_INTERNAL_ERROR); + QR_set_message(q_res, + "Keyset was NULL, but Keyset data was expected."); + return false; + } + + auto row_column = row.value_begin() + row_schema_size; + if (sscanf(row_column->str().c_str(), "(%u,%hu)", &ks->blocknum, + &ks->offset) + != 2) { + QR_set_rstatus(q_res, PORES_INTERNAL_ERROR); + QR_set_message(q_res, "Failed to assign Keyset."); + return false; + } + row_column++; + ks->oid = std::stoul(row_column->str(), nullptr, 10); + } + + // Increment relevant data + q_res->cursTuple++; + if (q_res->num_fields > 0) + QR_inc_num_cache(q_res); + else if (QR_haskeyset(q_res)) + q_res->num_cached_keys++; + + if ((SQLULEN)q_res->cursTuple >= q_res->num_total_read) + q_res->num_total_read = q_res->cursTuple + 1; + return true; +} + +void UpdateResultFields(QResultClass *q_res, const ConnectionClass *conn, + const SQLULEN starting_cached_rows, const char *cursor, + std::string &command_type) { + // Adjust total read + if (!QR_once_reached_eof(q_res) + && q_res->cursTuple >= (Int4)q_res->num_total_read) + q_res->num_total_read = q_res->cursTuple + 1; + + // Adjust eof and tuple cursor + if (q_res->num_cached_rows - starting_cached_rows < q_res->cmd_fetch_size) { + QR_set_reached_eof(q_res); + if (q_res->cursTuple < (Int4)q_res->num_total_read) + q_res->cursTuple = q_res->num_total_read; + } + + // Handle NULL connection + if (conn != NULL) { + q_res->fetch_number = static_cast< SQLLEN >(0); + QR_set_rowstart_in_cache(q_res, 0); + q_res->key_base = 0; + } + + // Set command type and cursor name + QR_set_command(q_res, command_type.c_str()); + QR_set_cursor(q_res, cursor); + if (cursor == NULL) + QR_set_reached_eof(q_res); + + // Set flags, adjust pointers, and return true (success) + q_res->dataFilled = true; + q_res->tupleField = + q_res->backend_tuples + (q_res->fetch_number * q_res->num_fields); + QR_set_rstatus(q_res, PORES_TUPLES_OK); +} + +bool QR_prepare_for_tupledata(QResultClass *q_res) { + if (QR_get_cursor(q_res)) { + return true; + } + + // If total tuples > allocated tuples, need to reallocate + if (q_res->num_fields > 0 + && QR_get_num_total_tuples(q_res) >= q_res->count_backend_allocated) { + SQLLEN tuple_size = (q_res->count_backend_allocated < 1) + ? TUPLE_MALLOC_INC + : q_res->count_backend_allocated * 2; + + // Will return false if allocation fails + QR_REALLOC_return_with_error( + q_res->backend_tuples, TupleField, + tuple_size * q_res->num_fields * sizeof(TupleField), q_res, + "Out of memory while reading tuples.", false); + q_res->count_backend_allocated = tuple_size; + } + + // If total keyset > allocated keyset, need to reallocate + if (QR_haskeyset(q_res) + && q_res->num_cached_keys >= q_res->count_keyset_allocated) { + SQLLEN keyset_size = (q_res->count_keyset_allocated < 1) + ? TUPLE_MALLOC_INC + : q_res->count_keyset_allocated * 2; + + // Will return false if macro fails + QR_REALLOC_return_with_error( + q_res->keyset, KeySet, sizeof(KeySet) * keyset_size, q_res, + "Out of memory while allocating keyset", false); + memset(&q_res->keyset[q_res->count_keyset_allocated], 0, + (keyset_size - q_res->count_keyset_allocated) * sizeof(KeySet)); + q_res->count_keyset_allocated = keyset_size; + } + + return true; +} diff --git a/sql-odbc/src/odfesqlodbc/es_parse_result.h b/sql-odbc/src/odfesqlodbc/es_parse_result.h new file mode 100644 index 0000000000..0543c6ac0d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_parse_result.h @@ -0,0 +1,44 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef _ES_PARSE_RESULT_H_ +#define _ES_PARSE_RESULT_H_ +#include "qresult.h" + +#ifdef __cplusplus +std::string GetResultParserError(); +extern "C" { +#endif +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus +#include "es_helper.h" +typedef rabbit::document json_doc; +// const char* is used instead of string for the cursor, because a NULL cursor +// is sometimes used Cannot pass q_res as reference because it breaks qresult.h +// macros that expect to use -> operator +BOOL CC_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result); +BOOL CC_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result); +BOOL CC_No_Metadata_from_ESResult(QResultClass *q_res, ConnectionClass *conn, + const char *cursor, ESResult &es_result); +BOOL CC_Append_Table_Data(json_doc &es_result_doc, QResultClass *q_res, + size_t doc_schema_size, ColumnInfoClass &fields); +#endif +#endif diff --git a/sql-odbc/src/odfesqlodbc/es_result_queue.cpp b/sql-odbc/src/odfesqlodbc/es_result_queue.cpp new file mode 100644 index 0000000000..0943f1cd0c --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_result_queue.cpp @@ -0,0 +1,64 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_result_queue.h" + +#include "es_types.h" + +ESResultQueue::ESResultQueue(unsigned int capacity) + : m_push_semaphore(capacity, capacity), + m_pop_semaphore(0, capacity) { +} + +ESResultQueue::~ESResultQueue() { + while (!m_queue.empty()) { + delete m_queue.front(); + m_queue.pop(); + } +} + +void ESResultQueue::clear() { + std::scoped_lock lock(m_queue_mutex); + while (!m_queue.empty()) { + delete m_queue.front(); + m_queue.pop(); + m_push_semaphore.release(); + m_pop_semaphore.lock(); + } +} + +bool ESResultQueue::pop(unsigned int timeout_ms, ESResult*& result) { + if (m_pop_semaphore.try_lock_for(timeout_ms)) { + std::scoped_lock lock(m_queue_mutex); + result = m_queue.front(); + m_queue.pop(); + m_push_semaphore.release(); + return true; + } + + return false; +} + +bool ESResultQueue::push(unsigned int timeout_ms, ESResult* result) { + if (m_push_semaphore.try_lock_for(timeout_ms)) { + std::scoped_lock lock(m_queue_mutex); + m_queue.push(result); + m_pop_semaphore.release(); + return true; + } + + return false; +} diff --git a/sql-odbc/src/odfesqlodbc/es_result_queue.h b/sql-odbc/src/odfesqlodbc/es_result_queue.h new file mode 100644 index 0000000000..d624d38934 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_result_queue.h @@ -0,0 +1,43 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ +#ifndef ES_RESULT_QUEUE +#define ES_RESULT_QUEUE + +#include +#include +#include "es_semaphore.h" + +#define QUEUE_TIMEOUT 20 // milliseconds + +struct ESResult; + +class ESResultQueue { + public: + ESResultQueue(unsigned int capacity); + ~ESResultQueue(); + + void clear(); + bool pop(unsigned int timeout_ms, ESResult*& result); + bool push(unsigned int timeout_ms, ESResult* result); + + private: + std::queue m_queue; + std::mutex m_queue_mutex; + es_semaphore m_push_semaphore; + es_semaphore m_pop_semaphore; +}; + +#endif diff --git a/sql-odbc/src/odfesqlodbc/es_semaphore.cpp b/sql-odbc/src/odfesqlodbc/es_semaphore.cpp new file mode 100644 index 0000000000..eaf1532564 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_semaphore.cpp @@ -0,0 +1,105 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_semaphore.h" + +#include + +#ifdef WIN32 +namespace { +HANDLE createSemaphore(unsigned int initial, unsigned int capacity) { + HANDLE semaphore = NULL; + std::string semName; + while (NULL == semaphore) { + semName = "es_sem_" + std::to_string(rand() * 1000); + semaphore = CreateSemaphore(NULL, initial, capacity, semName.c_str()); + } + + return semaphore; +} +} // namespace +#else +#include +#endif + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif // __APPLE__ +es_semaphore::es_semaphore(unsigned int initial, unsigned int capacity) +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + : +#ifdef WIN32 + m_semaphore(createSemaphore(initial, capacity)) +#elif defined(__APPLE__) + m_semaphore(dispatch_semaphore_create(initial)) +#endif +{ +#if !defined(WIN32) && !defined(__APPLE__) + sem_init(&m_semaphore, 0, capacity); +#endif +} + +es_semaphore::~es_semaphore() { +#ifdef WIN32 + CloseHandle(m_semaphore); +#elif defined(__APPLE__) +#else + sem_destroy(&m_semaphore); +#endif +} + +void es_semaphore::lock() { +#ifdef WIN32 + WaitForSingleObject(m_semaphore, INFINITE); +#elif defined(__APPLE__) + dispatch_semaphore_wait(m_semaphore, DISPATCH_TIME_FOREVER); +#else + sem_wait(&m_semaphore); +#endif +} + +void es_semaphore::release() { +#ifdef WIN32 + ReleaseSemaphore(m_semaphore, 1, NULL); +#elif defined(__APPLE__) + dispatch_semaphore_signal(m_semaphore); +#else + sem_post(&m_semaphore); +#endif +} + +bool es_semaphore::try_lock_for(unsigned int timeout_ms) { +#ifdef WIN32 + return WaitForSingleObject(m_semaphore, timeout_ms) == WAIT_OBJECT_0; +#elif defined(__APPLE__) + return 0 + == dispatch_semaphore_wait( + m_semaphore, dispatch_time(DISPATCH_TIME_NOW, + static_cast< int64_t >( + timeout_ms * NSEC_PER_MSEC))); +#else + struct timespec ts; + if (-1 == clock_gettime(CLOCK_REALTIME, &ts)) { + return false; + } + + ts.tv_nsec += timeout_ms * 1000000; + return 0 == sem_timedwait(&m_semaphore & ts); +#endif +} diff --git a/sql-odbc/src/odfesqlodbc/es_semaphore.h b/sql-odbc/src/odfesqlodbc/es_semaphore.h new file mode 100644 index 0000000000..980e4cd6eb --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_semaphore.h @@ -0,0 +1,46 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ +#ifndef ES_SEMAPHORE +#define ES_SEMAPHORE + +#ifdef WIN32 + #include +#elif defined(__APPLE__) + #include +#else + #include +#endif + +class es_semaphore { + public: + es_semaphore(unsigned int initial, unsigned int capacity); + ~es_semaphore(); + + void lock(); + void release(); + bool try_lock_for(unsigned int timeout_ms); + + private: +#ifdef WIN32 + HANDLE m_semaphore; +#elif defined(__APPLE__) + dispatch_semaphore_t m_semaphore; +#else + sem_t m_semaphore; +#endif +}; + +#endif diff --git a/sql-odbc/src/odfesqlodbc/es_statement.cpp b/sql-odbc/src/odfesqlodbc/es_statement.cpp new file mode 100644 index 0000000000..fccbfca2af --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_statement.cpp @@ -0,0 +1,341 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_statement.h" + +#include "environ.h" // Critical section for statment +#include "es_apifunc.h" +#include "es_helper.h" +#include "misc.h" +#include "statement.h" + +extern "C" void *common_cs; + +RETCODE ExecuteStatement(StatementClass *stmt, BOOL commit) { + CSTR func = "ExecuteStatement"; + int func_cs_count = 0; + ConnectionClass *conn = SC_get_conn(stmt); + CONN_Status oldstatus = conn->status; + + auto CleanUp = [&]() -> RETCODE { + SC_SetExecuting(stmt, FALSE); + CLEANUP_FUNC_CONN_CS(func_cs_count, conn); + if (conn->status != CONN_DOWN) + conn->status = oldstatus; + if (SC_get_errornumber(stmt) == STMT_OK) + return SQL_SUCCESS; + else if (SC_get_errornumber(stmt) < STMT_OK) + return SQL_SUCCESS_WITH_INFO; + else { + if (!SC_get_errormsg(stmt) || !SC_get_errormsg(stmt)[0]) { + if (STMT_NO_MEMORY_ERROR != SC_get_errornumber(stmt)) + SC_set_errormsg(stmt, "Error while executing the query"); + SC_log_error(func, NULL, stmt); + } + return SQL_ERROR; + } + }; + + ENTER_INNER_CONN_CS(conn, func_cs_count); + + if (conn->status == CONN_EXECUTING) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, "Connection is already in use.", + func); + return CleanUp(); + } + + if (!SC_SetExecuting(stmt, TRUE)) { + SC_set_error(stmt, STMT_OPERATION_CANCELLED, "Cancel Request Accepted", + func); + return CleanUp(); + } + + conn->status = CONN_EXECUTING; + + QResultClass *res = SendQueryGetResult(stmt, commit); + if (!res) { + std::string es_conn_err = GetErrorMsg(SC_get_conn(stmt)->esconn); + std::string es_parse_err = GetResultParserError(); + if (!es_conn_err.empty()) { + SC_set_error(stmt, STMT_NO_RESPONSE, es_conn_err.c_str(), func); + } else if (!es_parse_err.empty()) { + SC_set_error(stmt, STMT_EXEC_ERROR, es_parse_err.c_str(), func); + } else if (SC_get_errornumber(stmt) <= 0) { + SC_set_error( + stmt, STMT_NO_RESPONSE, + "Failed to retrieve error message from result. Connection may be down.", + func); + } + return CleanUp(); + } + + if (CONN_DOWN != conn->status) + conn->status = oldstatus; + stmt->status = STMT_FINISHED; + LEAVE_INNER_CONN_CS(func_cs_count, conn); + + // Check the status of the result + if (SC_get_errornumber(stmt) < 0) { + if (QR_command_successful(res)) + SC_set_errornumber(stmt, STMT_OK); + else if (QR_command_nonfatal(res)) + SC_set_errornumber(stmt, STMT_INFO_ONLY); + else + SC_set_errorinfo(stmt, res, 0); + } + + // Set cursor before the first tuple in the list + stmt->currTuple = -1; + SC_set_current_col(stmt, static_cast< int >(stmt->currTuple)); + SC_set_rowset_start(stmt, stmt->currTuple, FALSE); + + // Only perform if query was not aborted + if (!QR_get_aborted(res)) { + // Check if result columns were obtained from query + for (QResultClass *tres = res; tres; tres = tres->next) { + Int2 numcols = QR_NumResultCols(tres); + if (numcols <= 0) + continue; + ARDFields *opts = SC_get_ARDF(stmt); + extend_column_bindings(opts, numcols); + if (opts->bindings) + break; + + // Failed to allocate + QR_Destructor(res); + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Could not get enough free memory to store " + "the binding information", + func); + return CleanUp(); + } + } + + QResultClass *last = SC_get_Result(stmt); + if (last) { + // Statement already contains a result + // Append to end if this hasn't happened + while (last->next != NULL) { + if (last == res) + break; + last = last->next; + } + if (last != res) + last->next = res; + } else { + // Statement does not contain a result + // Assign directly + SC_set_Result(stmt, res); + } + + // This will commit results for SQLExecDirect and will not commit + // results for SQLPrepare since only metadata is required for SQLPrepare + if (commit) { + GetNextResultSet(stmt); + } + + stmt->diag_row_count = res->recent_processed_row_count; + + return CleanUp(); +} + +SQLRETURN GetNextResultSet(StatementClass *stmt) { + ConnectionClass *conn = SC_get_conn(stmt); + QResultClass *q_res = SC_get_Result(stmt); + if ((q_res == NULL) && (conn == NULL)) { + return SQL_ERROR; + } + + SQLSMALLINT total_columns = -1; + if (!SQL_SUCCEEDED(SQLNumResultCols(stmt, &total_columns)) || + (total_columns == -1)) { + return SQL_ERROR; + } + + ESResult *es_res = ESGetResult(conn->esconn); + if (es_res != NULL) { + // Save server cursor id to fetch more pages later + if (es_res->es_result_doc.has("cursor")) { + QR_set_server_cursor_id( + q_res, es_res->es_result_doc["cursor"].as_string().c_str()); + } else { + QR_set_server_cursor_id(q_res, NULL); + } + + // Responsible for looping through rows, allocating tuples and + // appending these rows in q_result + CC_Append_Table_Data(es_res->es_result_doc, q_res, total_columns, + *(q_res->fields)); + } + + return SQL_SUCCESS; +} + +RETCODE RePrepareStatement(StatementClass *stmt) { + CSTR func = "RePrepareStatement"; + RETCODE result = SC_initialize_and_recycle(stmt); + if (result != SQL_SUCCESS) + return result; + if (!stmt->statement) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Expected statement to be allocated.", func); + return SQL_ERROR; + } + + // If an SQLPrepare was performed prior to this, but was left in the + // described state because an error prior to SQLExecute then set the + // statement to finished so it can be recycled. + if (stmt->status == STMT_DESCRIBED) + stmt->status = STMT_FINISHED; + + return SQL_SUCCESS; +} + +RETCODE PrepareStatement(StatementClass *stmt, const SQLCHAR *stmt_str, + SQLINTEGER stmt_sz) { + CSTR func = "PrepareStatement"; + RETCODE result = SC_initialize_and_recycle(stmt); + if (result != SQL_SUCCESS) + return result; + + stmt->statement = make_string(stmt_str, stmt_sz, NULL, 0); + if (!stmt->statement) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "No memory available to store statement", func); + return SQL_ERROR; + } + + // If an SQLPrepare was performed prior to this, but was left in the + // described state because an error prior to SQLExecute then set the + // statement to finished so it can be recycled. + if (stmt->status == STMT_DESCRIBED) + stmt->status = STMT_FINISHED; + stmt->statement_type = (short)statement_type(stmt->statement); + + return SQL_SUCCESS; +} + +QResultClass *SendQueryGetResult(StatementClass *stmt, BOOL commit) { + if (stmt == NULL) + return NULL; + + // Allocate QResultClass + QResultClass *res = QR_Constructor(); + if (res == NULL) + return NULL; + + // Send command + ConnectionClass *conn = SC_get_conn(stmt); + if (ESExecDirect(conn->esconn, stmt->statement, conn->connInfo.fetch_size) != 0) { + QR_Destructor(res); + return NULL; + } + res->rstatus = PORES_COMMAND_OK; + + // Get ESResult + ESResult *es_res = ESGetResult(conn->esconn); + if (es_res == NULL) { + QR_Destructor(res); + return NULL; + } + + BOOL success = + commit + ? CC_from_ESResult(res, conn, res->cursor_name, *es_res) + : CC_Metadata_from_ESResult(res, conn, res->cursor_name, *es_res); + + // Convert result to QResultClass + if (!success) { + QR_Destructor(res); + res = NULL; + } + + if (commit) { + // Deallocate ESResult + ESClearResult(es_res); + res->es_result = NULL; + } else { + // Set ESResult into connection class so it can be used later + res->es_result = es_res; + } + return res; +} + +RETCODE AssignResult(StatementClass *stmt) { + if (stmt == NULL) + return SQL_ERROR; + + QResultClass *res = SC_get_Result(stmt); + if (!res || !res->es_result) { + return SQL_ERROR; + } + + // Commit result to QResultClass + ESResult *es_res = static_cast< ESResult * >(res->es_result); + ConnectionClass *conn = SC_get_conn(stmt); + if (!CC_No_Metadata_from_ESResult(res, conn, res->cursor_name, *es_res)) { + QR_Destructor(res); + return SQL_ERROR; + } + GetNextResultSet(stmt); + + // Deallocate and return result + ESClearResult(es_res); + res->es_result = NULL; + return SQL_SUCCESS; +} + +void ClearESResult(void *es_result) { + if (es_result != NULL) { + ESResult *es_res = static_cast< ESResult * >(es_result); + ESClearResult(es_res); + } +} + +SQLRETURN ESAPI_Cancel(HSTMT hstmt) { + // Verify pointer validity and convert to StatementClass + if (hstmt == NULL) + return SQL_INVALID_HANDLE; + StatementClass *stmt = (StatementClass *)hstmt; + + // Get execution delegate (if applicable) and initialize return code + StatementClass *estmt = + (stmt->execute_delegate == NULL) ? stmt : stmt->execute_delegate; + SQLRETURN ret = SQL_SUCCESS; + + // Entry common critical section + ENTER_COMMON_CS; + + // Waiting for more data from SQLParamData/SQLPutData - cancel statement + if (estmt->data_at_exec >= 0) { + // Enter statement critical section + ENTER_STMT_CS(stmt); + + // Clear info and cancel need data + SC_clear_error(stmt); + estmt->data_at_exec = -1; + estmt->put_data = FALSE; + cancelNeedDataState(estmt); + + // Leave statement critical section + LEAVE_STMT_CS(stmt); + } + + // Leave common critical section + LEAVE_COMMON_CS; + + return ret; +} diff --git a/sql-odbc/src/odfesqlodbc/es_statement.h b/sql-odbc/src/odfesqlodbc/es_statement.h new file mode 100644 index 0000000000..455f226664 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_statement.h @@ -0,0 +1,39 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef _ES_STATEMENT_H_ +#define _ES_STATEMENT_H_ + +#include "es_parse_result.h" +#include "qresult.h" +#include "statement.h" + +#ifdef __cplusplus +extern "C" { +#endif +RETCODE RePrepareStatement(StatementClass *stmt); +RETCODE PrepareStatement(StatementClass* stmt, const SQLCHAR *stmt_str, SQLINTEGER stmt_sz); +RETCODE ExecuteStatement(StatementClass *stmt, BOOL commit); +QResultClass *SendQueryGetResult(StatementClass *stmt, BOOL commit); +RETCODE AssignResult(StatementClass *stmt); +SQLRETURN ESAPI_Cancel(HSTMT hstmt); +SQLRETURN GetNextResultSet(StatementClass *stmt); +void ClearESResult(void *es_result); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sql-odbc/src/odfesqlodbc/es_types.c b/sql-odbc/src/odfesqlodbc/es_types.c new file mode 100644 index 0000000000..316bfcaf0c --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_types.c @@ -0,0 +1,1524 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_types.h" + +#include "dlg_specific.h" +#include "environ.h" +#include "es_connection.h" +#include "qresult.h" +#include "statement.h" +#ifndef WIN32 +#include +#endif + +#define EXPERIMENTAL_CURRENTLY + +SQLSMALLINT ansi_to_wtype(const ConnectionClass *self, SQLSMALLINT ansitype) { +#ifndef UNICODE_SUPPORT + return ansitype; +#else + if (!ALLOW_WCHAR(self)) + return ansitype; + switch (ansitype) { + case SQL_CHAR: + return SQL_WCHAR; + case SQL_VARCHAR: + return SQL_WVARCHAR; + case SQL_LONGVARCHAR: + return SQL_WLONGVARCHAR; + } + return ansitype; +#endif /* UNICODE_SUPPORT */ +} + +/* These are NOW the SQL Types reported in SQLGetTypeInfo. */ +SQLSMALLINT sqlTypes[] = { + SQL_BIGINT, + /* SQL_BINARY, -- Commented out because VarBinary is more correct. */ + SQL_BIT, SQL_CHAR, SQL_TYPE_DATE, SQL_DATE, SQL_DECIMAL, SQL_DOUBLE, + SQL_FLOAT, SQL_INTEGER, SQL_LONGVARBINARY, SQL_LONGVARCHAR, SQL_NUMERIC, + SQL_REAL, SQL_SMALLINT, SQL_TYPE_TIME, SQL_TYPE_TIMESTAMP, SQL_TIME, + SQL_TIMESTAMP, SQL_TINYINT, SQL_VARBINARY, SQL_VARCHAR, +#ifdef UNICODE_SUPPORT + SQL_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR, +#endif /* UNICODE_SUPPORT */ + SQL_GUID, +/* AFAIK SQL_INTERVAL types cause troubles in some spplications */ +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + SQL_INTERVAL_MONTH, SQL_INTERVAL_YEAR, SQL_INTERVAL_YEAR_TO_MONTH, + SQL_INTERVAL_DAY, SQL_INTERVAL_HOUR, SQL_INTERVAL_MINUTE, + SQL_INTERVAL_SECOND, SQL_INTERVAL_DAY_TO_HOUR, SQL_INTERVAL_DAY_TO_MINUTE, + SQL_INTERVAL_DAY_TO_SECOND, SQL_INTERVAL_HOUR_TO_MINUTE, + SQL_INTERVAL_HOUR_TO_SECOND, SQL_INTERVAL_MINUTE_TO_SECOND, +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + 0}; + +#ifdef ODBCINT64 +#define ALLOWED_C_BIGINT SQL_C_SBIGINT +/* #define ALLOWED_C_BIGINT SQL_C_CHAR */ /* Delphi should be either ? */ +#else +#define ALLOWED_C_BIGINT SQL_C_CHAR +#endif + +OID es_true_type(const ConnectionClass *conn, OID type, OID basetype) { + if (0 == basetype) + return type; + else if (0 == type) + return basetype; + else if (type == (OID)conn->lobj_type) + return type; + return basetype; +} + +#define MONTH_BIT (1 << 17) +#define YEAR_BIT (1 << 18) +#define DAY_BIT (1 << 19) +#define HOUR_BIT (1 << 26) +#define MINUTE_BIT (1 << 27) +#define SECOND_BIT (1 << 28) + +static Int4 getCharColumnSizeX(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as) { + int p = -1, maxsize; + MYLOG(ES_TRACE, + "entering type=%d, atttypmod=%d, adtsize_or=%d, unknown = %d\n", type, + atttypmod, adtsize_or_longestlen, handle_unknown_size_as); + + maxsize = MAX_VARCHAR_SIZE; +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn) && isSqlServr() && maxsize > 4000) + maxsize = 4000; +#endif /* UNICODE_SUPPORT */ + + if (maxsize == TEXT_FIELD_SIZE + 1) /* magic length for testing */ + maxsize = 0; + + /* + * Static ColumnSize (i.e., the Maximum ColumnSize of the datatype) This + * has nothing to do with a result set. + */ + MYLOG(ES_DEBUG, "!!! atttypmod < 0 ?\n"); + if (atttypmod < 0 && adtsize_or_longestlen < 0) + return maxsize; + + MYLOG(ES_DEBUG, "!!! adtsize_or_logngest=%d\n", adtsize_or_longestlen); + p = adtsize_or_longestlen; /* longest */ + /* + * Catalog Result Sets -- use assigned column width (i.e., from + * set_tuplefield_string) + */ + MYLOG(ES_DEBUG, "!!! catalog_result=%d\n", handle_unknown_size_as); + if (UNKNOWNS_AS_LONGEST == handle_unknown_size_as) { + MYLOG(ES_DEBUG, "LONGEST: p = %d\n", p); + if (p > 0 && (atttypmod < 0 || atttypmod > p)) + return p; + } + if (TYPE_MAY_BE_ARRAY(type)) { + if (p > 0) + return p; + return maxsize; + } + + /* Size is unknown -- handle according to parameter */ + if (atttypmod > 0) /* maybe the length is known */ + { + return atttypmod; + } + + /* The type is really unknown */ + switch (handle_unknown_size_as) { + case UNKNOWNS_AS_DONTKNOW: + return -1; + case UNKNOWNS_AS_LONGEST: + case UNKNOWNS_AS_MAX: + break; + default: + return -1; + } + if (maxsize <= 0) + return maxsize; + switch (type) { + case ES_TYPE_BPCHAR: + case ES_TYPE_VARCHAR: + case ES_TYPE_TEXT: + return maxsize; + } + + if (p > maxsize) + maxsize = p; + return maxsize; +} + +/* + * Specify when handle_unknown_size_as parameter is unused + */ +#define UNUSED_HANDLE_UNKNOWN_SIZE_AS (-2) + +static SQLSMALLINT getNumericDecimalDigitsX(const ConnectionClass *conn, + OID type, int atttypmod, + int adtsize_or_longest, + int handle_unknown_size_as) { + UNUSED(conn, handle_unknown_size_as); + SQLSMALLINT default_decimal_digits = 6; + + MYLOG(ES_TRACE, "entering type=%d, atttypmod=%d\n", type, atttypmod); + + if (atttypmod < 0 && adtsize_or_longest < 0) + return default_decimal_digits; + + if (atttypmod > -1) + return (SQLSMALLINT)(atttypmod & 0xffff); + if (adtsize_or_longest <= 0) + return default_decimal_digits; + adtsize_or_longest >>= 16; /* extract the scale part */ + return (SQLSMALLINT)adtsize_or_longest; +} + +static Int4 /* Elasticsearch restritiction */ +getNumericColumnSizeX(const ConnectionClass *conn, OID type, int atttypmod, + int adtsize_or_longest, int handle_unknown_size_as) { + UNUSED(conn); + Int4 default_column_size = 28; + MYLOG(ES_TRACE, "entering type=%d, typmod=%d\n", type, atttypmod); + + if (atttypmod > -1) + return (atttypmod >> 16) & 0xffff; + switch (handle_unknown_size_as) { + case UNKNOWNS_AS_DONTKNOW: + return SQL_NO_TOTAL; + } + if (adtsize_or_longest <= 0) + return default_column_size; + adtsize_or_longest %= (1 << 16); /* extract the precision part */ + switch (handle_unknown_size_as) { + case UNKNOWNS_AS_MAX: + return adtsize_or_longest > default_column_size + ? adtsize_or_longest + : default_column_size; + default: + if (adtsize_or_longest < 10) + adtsize_or_longest = 10; + } + return adtsize_or_longest; +} + +static SQLSMALLINT getTimestampDecimalDigitsX(const ConnectionClass *conn, + OID type, int atttypmod) { + UNUSED(conn); + MYLOG(ES_DEBUG, "type=%d, atttypmod=%d\n", type, atttypmod); + return (SQLSMALLINT)(atttypmod > -1 ? atttypmod : 6); +} + +#ifdef ES_INTERVAL_AS_SQL_INTERVAL +static SQLSMALLINT getIntervalDecimalDigits(OID type, int atttypmod) { + Int4 prec; + + MYLOG(ES_TRACE, "entering type=%d, atttypmod=%d\n", type, atttypmod); + + if ((atttypmod & SECOND_BIT) == 0) + return 0; + return (SQLSMALLINT)((prec = atttypmod & 0xffff) == 0xffff ? 6 : prec); +} +#endif // ES_INTERVAL_AS_SQL_INTERVAL + +SQLSMALLINT +estype_attr_to_concise_type(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as) { + EnvironmentClass *env = (EnvironmentClass *)CC_get_env(conn); +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + SQLSMALLINT sqltype; +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + BOOL bLongVarchar, bFixed = FALSE; + + switch (type) { + case ES_TYPE_CHAR: + return ansi_to_wtype(conn, SQL_CHAR); + case ES_TYPE_NAME: + case ES_TYPE_REFCURSOR: + return ansi_to_wtype(conn, SQL_VARCHAR); + + case ES_TYPE_BPCHAR: + bFixed = TRUE; + case ES_TYPE_VARCHAR: + if (getCharColumnSizeX(conn, type, atttypmod, adtsize_or_longestlen, + handle_unknown_size_as) + > MAX_VARCHAR_SIZE) + bLongVarchar = TRUE; + else + bLongVarchar = FALSE; + return ansi_to_wtype(conn, bLongVarchar + ? SQL_LONGVARCHAR + : (bFixed ? SQL_CHAR : SQL_VARCHAR)); + case ES_TYPE_TEXT: + bLongVarchar = DEFAULT_TEXTASLONGVARCHAR; + if (bLongVarchar) { + int column_size = getCharColumnSizeX(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + if (column_size > 0 && column_size <= MAX_VARCHAR_SIZE) + bLongVarchar = FALSE; + } + return ansi_to_wtype(conn, + bLongVarchar ? SQL_LONGVARCHAR : SQL_VARCHAR); + + case ES_TYPE_BYTEA: + return SQL_VARBINARY; + case ES_TYPE_LO_UNDEFINED: + return SQL_LONGVARBINARY; + + case ES_TYPE_INT2: + return SQL_SMALLINT; + + case ES_TYPE_OID: + case ES_TYPE_XID: + case ES_TYPE_INT4: + return SQL_INTEGER; + + /* Change this to SQL_BIGINT for ODBC v3 bjm 2001-01-23 */ + case ES_TYPE_INT8: + if (conn->ms_jet) + return SQL_NUMERIC; /* maybe a little better than SQL_VARCHAR */ + return SQL_BIGINT; + + case ES_TYPE_NUMERIC: + return SQL_NUMERIC; + + case ES_TYPE_FLOAT4: + return SQL_REAL; + case ES_TYPE_FLOAT8: + return SQL_FLOAT; + case ES_TYPE_DATE: + if (EN_is_odbc3(env)) + return SQL_TYPE_DATE; + return SQL_DATE; + case ES_TYPE_TIME: + if (EN_is_odbc3(env)) + return SQL_TYPE_TIME; + return SQL_TIME; + case ES_TYPE_ABSTIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + case ES_TYPE_TIMESTAMP: + if (EN_is_odbc3(env)) + return SQL_TYPE_TIMESTAMP; + return SQL_TIMESTAMP; + case ES_TYPE_MONEY: + return SQL_FLOAT; + case ES_TYPE_BOOL: + return SQL_BIT; + case ES_TYPE_XML: + return ansi_to_wtype(conn, SQL_LONGVARCHAR); + case ES_TYPE_INET: + case ES_TYPE_CIDR: + case ES_TYPE_MACADDR: + return ansi_to_wtype(conn, SQL_VARCHAR); + case ES_TYPE_UUID: + return SQL_GUID; + + case ES_TYPE_INTERVAL: +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + if (sqltype = get_interval_type(atttypmod, NULL), 0 != sqltype) + return sqltype; +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + return ansi_to_wtype(conn, SQL_VARCHAR); + + default: + + /* + * first, check to see if 'type' is in list. If not, look up + * with query. Add oid, name to list. If it's already in + * list, just return. + */ + /* hack until permanent type is available */ + if (type == (OID)conn->lobj_type) + return SQL_LONGVARBINARY; + + bLongVarchar = DEFAULT_UNKNOWNSASLONGVARCHAR; + if (bLongVarchar) { + int column_size = getCharColumnSizeX(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + if (column_size > 0 && column_size <= MAX_VARCHAR_SIZE) + bLongVarchar = FALSE; + } +#ifdef EXPERIMENTAL_CURRENTLY + return ansi_to_wtype(conn, + bLongVarchar ? SQL_LONGVARCHAR : SQL_VARCHAR); +#else + return bLongVarchar ? SQL_LONGVARCHAR : SQL_VARCHAR; +#endif /* EXPERIMENTAL_CURRENTLY */ + } +} + +SQLSMALLINT +estype_attr_to_sqldesctype(const ConnectionClass *conn, OID type, int atttypmod, + int adtsize_or_longestlen, + int handle_unknown_size_as) { + SQLSMALLINT rettype; + +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + if (ES_TYPE_INTERVAL == type) + return SQL_INTERVAL; +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + switch (rettype = estype_attr_to_concise_type(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as)) { + case SQL_TYPE_DATE: + case SQL_TYPE_TIME: + case SQL_TYPE_TIMESTAMP: + return SQL_DATETIME; + } + return rettype; +} + +SQLSMALLINT +estype_attr_to_datetime_sub(const ConnectionClass *conn, OID type, + int atttypmod) { + UNUSED(conn, type, atttypmod); + return -1; +} + +SQLSMALLINT +estype_attr_to_ctype(const ConnectionClass *conn, OID type, int atttypmod) { + UNUSED(atttypmod); + EnvironmentClass *env = (EnvironmentClass *)CC_get_env(conn); +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + SQLSMALLINT ctype; +#endif /* ES_INTERVAL_A_SQL_INTERVAL */ + + switch (type) { + case ES_TYPE_INT8: + if (!conn->ms_jet) + return ALLOWED_C_BIGINT; + return SQL_C_CHAR; + case ES_TYPE_NUMERIC: + return SQL_C_CHAR; + case ES_TYPE_INT2: + return SQL_C_SSHORT; + case ES_TYPE_OID: + case ES_TYPE_XID: + return SQL_C_ULONG; + case ES_TYPE_INT4: + return SQL_C_SLONG; + case ES_TYPE_FLOAT4: + return SQL_C_FLOAT; + case ES_TYPE_FLOAT8: + return SQL_C_DOUBLE; + case ES_TYPE_DATE: + if (EN_is_odbc3(env)) + return SQL_C_TYPE_DATE; + return SQL_C_DATE; + case ES_TYPE_TIME: + if (EN_is_odbc3(env)) + return SQL_C_TYPE_TIME; + return SQL_C_TIME; + case ES_TYPE_ABSTIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + case ES_TYPE_TIMESTAMP: + if (EN_is_odbc3(env)) + return SQL_C_TYPE_TIMESTAMP; + return SQL_C_TIMESTAMP; + case ES_TYPE_MONEY: + return SQL_C_FLOAT; + case ES_TYPE_BOOL: + return SQL_C_BIT; + + case ES_TYPE_BYTEA: + return SQL_C_BINARY; + case ES_TYPE_LO_UNDEFINED: + return SQL_C_BINARY; + case ES_TYPE_BPCHAR: + case ES_TYPE_VARCHAR: + case ES_TYPE_TEXT: + return ansi_to_wtype(conn, SQL_C_CHAR); + case ES_TYPE_UUID: + if (!conn->ms_jet) + return SQL_C_GUID; + return ansi_to_wtype(conn, SQL_C_CHAR); + + case ES_TYPE_INTERVAL: +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + if (ctype = get_interval_type(atttypmod, NULL), 0 != ctype) + return ctype; +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + return ansi_to_wtype(conn, SQL_CHAR); + + default: + /* hack until permanent type is available */ + if (type == (OID)conn->lobj_type) + return SQL_C_BINARY; + + /* Experimental, Does this work ? */ +#ifdef EXPERIMENTAL_CURRENTLY + return ansi_to_wtype(conn, SQL_C_CHAR); +#else + return SQL_C_CHAR; +#endif /* EXPERIMENTAL_CURRENTLY */ + } +} + +const char *estype_attr_to_name(const ConnectionClass *conn, OID type, + int typmod, BOOL auto_increment) { + UNUSED(conn, typmod, conn, auto_increment); + switch (type) { + case ES_TYPE_BOOL: + return ES_TYPE_NAME_BOOLEAN; + case ES_TYPE_INT1: + return ES_TYPE_NAME_BYTE; + case ES_TYPE_INT2: + return ES_TYPE_NAME_SHORT; + case ES_TYPE_INT4: + return ES_TYPE_NAME_INTEGER; + case ES_TYPE_INT8: + return ES_TYPE_NAME_LONG; + case ES_TYPE_HALF_FLOAT: + return ES_TYPE_NAME_HALF_FLOAT; + case ES_TYPE_FLOAT4: + return ES_TYPE_NAME_FLOAT; + case ES_TYPE_FLOAT8: + return ES_TYPE_NAME_DOUBLE; + case ES_TYPE_SCALED_FLOAT: + return ES_TYPE_NAME_SCALED_FLOAT; + case ES_TYPE_KEYWORD: + return ES_TYPE_NAME_KEYWORD; + case ES_TYPE_TEXT: + return ES_TYPE_NAME_TEXT; + case ES_TYPE_NESTED: + return ES_TYPE_NAME_NESTED; + case ES_TYPE_DATETIME: + return ES_TYPE_NAME_DATE; + case ES_TYPE_OBJECT: + return ES_TYPE_NAME_OBJECT; + case ES_TYPE_VARCHAR: + return ES_TYPE_NAME_VARCHAR; + default: + return ES_TYPE_NAME_UNSUPPORTED; + } +} + +Int4 /* Elasticsearch restriction */ +estype_attr_column_size(const ConnectionClass *conn, OID type, int atttypmod, + int adtsize_or_longest, int handle_unknown_size_as) { + UNUSED(handle_unknown_size_as, adtsize_or_longest, atttypmod, conn); + switch (type) { + case ES_TYPE_BOOL: + return 1; + case ES_TYPE_INT1: + return 3; + case ES_TYPE_INT2: + return 5; + case ES_TYPE_INT4: + return 10; + case ES_TYPE_INT8: + return 19; + case ES_TYPE_HALF_FLOAT: + return 7; + case ES_TYPE_FLOAT4: + return 7; + case ES_TYPE_FLOAT8: + return 15; + case ES_TYPE_SCALED_FLOAT: + return 15; + case ES_TYPE_KEYWORD: + return 256; + case ES_TYPE_TEXT: + return INT_MAX; + case ES_TYPE_NESTED: + return 0; + case ES_TYPE_DATETIME: + return 24; + case ES_TYPE_OBJECT: + return 0; + default: + return adtsize_or_longest; + } +} + +SQLSMALLINT +estype_attr_precision(const ConnectionClass *conn, OID type, int atttypmod, + int adtsize_or_longest, int handle_unknown_size_as) { + switch (type) { + case ES_TYPE_NUMERIC: + return (SQLSMALLINT)getNumericColumnSizeX(conn, type, atttypmod, + adtsize_or_longest, + handle_unknown_size_as); + case ES_TYPE_TIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + return getTimestampDecimalDigitsX(conn, type, atttypmod); +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + case ES_TYPE_INTERVAL: + return getIntervalDecimalDigits(type, atttypmod); +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + } + return -1; +} + +Int4 estype_attr_display_size(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as) { + int dsize; + + switch (type) { + case ES_TYPE_INT2: + return 6; + + case ES_TYPE_OID: + case ES_TYPE_XID: + return 10; + + case ES_TYPE_INT4: + return 11; + + case ES_TYPE_INT8: + return 20; /* signed: 19 digits + sign */ + + case ES_TYPE_NUMERIC: + dsize = getNumericColumnSizeX(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + return dsize <= 0 ? dsize : dsize + 2; + + case ES_TYPE_MONEY: + return 15; /* ($9,999,999.99) */ + + case ES_TYPE_FLOAT4: /* a sign, ES_REAL_DIGITS digits, a decimal point, + the letter E, a sign, and 2 digits */ + return (1 + ES_REAL_DIGITS + 1 + 1 + 3); + + case ES_TYPE_FLOAT8: /* a sign, ES_DOUBLE_DIGITS digits, a decimal + point, the letter E, a sign, and 3 digits */ + return (1 + ES_DOUBLE_DIGITS + 1 + 1 + 1 + 3); + + case ES_TYPE_MACADDR: + return 17; + case ES_TYPE_INET: + case ES_TYPE_CIDR: + return sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128"); + case ES_TYPE_UUID: + return 36; + case ES_TYPE_INTERVAL: + return 30; + + /* Character types use regular precision */ + default: + return estype_attr_column_size(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + } +} + +Int4 estype_attr_buffer_length(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as) { + int dsize; + + switch (type) { + case ES_TYPE_INT2: + return 2; /* sizeof(SQLSMALLINT) */ + + case ES_TYPE_OID: + case ES_TYPE_XID: + case ES_TYPE_INT4: + return 4; /* sizeof(SQLINTEGER) */ + + case ES_TYPE_INT8: + if (SQL_C_CHAR == estype_attr_to_ctype(conn, type, atttypmod)) + return 20; /* signed: 19 digits + sign */ + return 8; /* sizeof(SQLSBININT) */ + + case ES_TYPE_NUMERIC: + dsize = getNumericColumnSizeX(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + return dsize <= 0 ? dsize : dsize + 2; + + case ES_TYPE_FLOAT4: + case ES_TYPE_MONEY: + return 4; /* sizeof(SQLREAL) */ + + case ES_TYPE_FLOAT8: + return 8; /* sizeof(SQLFLOAT) */ + + case ES_TYPE_DATE: + case ES_TYPE_TIME: + return 6; /* sizeof(DATE(TIME)_STRUCT) */ + + case ES_TYPE_ABSTIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + return 16; /* sizeof(TIMESTAMP_STRUCT) */ + + case ES_TYPE_MACADDR: + return 17; + case ES_TYPE_INET: + case ES_TYPE_CIDR: + return sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128"); + case ES_TYPE_UUID: + return 16; /* sizeof(SQLGUID) */ + + /* Character types use the default precision */ + case ES_TYPE_VARCHAR: + case ES_TYPE_BPCHAR: { + int coef = 1; + Int4 prec = estype_attr_column_size(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as), + maxvarc; + if (SQL_NO_TOTAL == prec) + return prec; +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn)) + return prec * WCLEN; +#endif /* UNICODE_SUPPORT */ + coef = conn->mb_maxbyte_per_char; + if (coef < 2) + /* CR -> CR/LF */ + coef = 2; + if (coef == 1) + return prec; + maxvarc = MAX_VARCHAR_SIZE; + if (prec <= maxvarc && prec * coef > maxvarc) + return maxvarc; + return coef * prec; + } +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + case ES_TYPE_INTERVAL: + return sizeof(SQL_INTERVAL_STRUCT); +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + + default: + return estype_attr_column_size(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + } +} + +/* + */ +Int4 estype_attr_desclength(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as) { + int dsize; + + switch (type) { + case ES_TYPE_INT2: + return 2; + + case ES_TYPE_OID: + case ES_TYPE_XID: + case ES_TYPE_INT4: + return 4; + + case ES_TYPE_INT8: + return 20; /* signed: 19 digits + sign */ + + case ES_TYPE_NUMERIC: + dsize = getNumericColumnSizeX(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + return dsize <= 0 ? dsize : dsize + 2; + + case ES_TYPE_FLOAT4: + case ES_TYPE_MONEY: + return 4; + + case ES_TYPE_FLOAT8: + return 8; + + case ES_TYPE_DATE: + case ES_TYPE_TIME: + case ES_TYPE_ABSTIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + case ES_TYPE_TIMESTAMP: + case ES_TYPE_VARCHAR: + case ES_TYPE_BPCHAR: + return estype_attr_column_size(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + default: + return estype_attr_column_size(conn, type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); + } +} + +Int2 estype_attr_decimal_digits(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int UNUSED_handle_unknown_size_as) { + switch (type) { + case ES_TYPE_INT2: + case ES_TYPE_OID: + case ES_TYPE_XID: + case ES_TYPE_INT4: + case ES_TYPE_INT8: + case ES_TYPE_FLOAT4: + case ES_TYPE_FLOAT8: + case ES_TYPE_MONEY: + case ES_TYPE_BOOL: + + /* + * Number of digits to the right of the decimal point in + * "yyyy-mm=dd hh:mm:ss[.f...]" + */ + case ES_TYPE_ABSTIME: + case ES_TYPE_TIMESTAMP: + return 0; + case ES_TYPE_TIME: + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + /* return 0; */ + return getTimestampDecimalDigitsX(conn, type, atttypmod); + + case ES_TYPE_NUMERIC: + return getNumericDecimalDigitsX(conn, type, atttypmod, + adtsize_or_longestlen, + UNUSED_handle_unknown_size_as); + +#ifdef ES_INTERVAL_AS_SQL_INTERVAL + case ES_TYPE_INTERVAL: + return getIntervalDecimalDigits(type, atttypmod); +#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ + + default: + return -1; + } +} + +Int2 estype_attr_scale(const ConnectionClass *conn, OID type, int atttypmod, + int adtsize_or_longestlen, + int UNUSED_handle_unknown_size_as) { + switch (type) { + case ES_TYPE_NUMERIC: + return getNumericDecimalDigitsX(conn, type, atttypmod, + adtsize_or_longestlen, + UNUSED_handle_unknown_size_as); + } + return -1; +} + +Int4 estype_attr_transfer_octet_length(const ConnectionClass *conn, OID type, + int atttypmod, + int handle_unknown_size_as) { + int coef = 1; + Int4 maxvarc, column_size; + + switch (type) { + case ES_TYPE_VARCHAR: + case ES_TYPE_BPCHAR: + case ES_TYPE_TEXT: + case ES_TYPE_UNKNOWN: + column_size = estype_attr_column_size( + conn, type, atttypmod, ES_ADT_UNSET, handle_unknown_size_as); + if (SQL_NO_TOTAL == column_size) + return column_size; +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn)) + return column_size * WCLEN; +#endif /* UNICODE_SUPPORT */ + coef = conn->mb_maxbyte_per_char; + if (coef < 2) + /* CR -> CR/LF */ + coef = 2; + if (coef == 1) + return column_size; + maxvarc = MAX_VARCHAR_SIZE; + if (column_size <= maxvarc && column_size * coef > maxvarc) + return maxvarc; + return coef * column_size; + case ES_TYPE_BYTEA: + return estype_attr_column_size(conn, type, atttypmod, ES_ADT_UNSET, + handle_unknown_size_as); + default: + if (type == (OID)conn->lobj_type) + return estype_attr_column_size(conn, type, atttypmod, + ES_ADT_UNSET, + handle_unknown_size_as); + } + return -1; +} + +/* + * Casting parameters e.g. ?::timestamp is much more flexible + * than specifying parameter datatype oids determined by + * sqltype_to_bind_estype() via parse message. + */ +const char *sqltype_to_escast(const ConnectionClass *conn, + SQLSMALLINT fSqlType) { + const char *esCast = NULL_STRING; + + switch (fSqlType) { + case SQL_BINARY: + case SQL_VARBINARY: + esCast = "::bytea"; + break; + case SQL_TYPE_DATE: + case SQL_DATE: + esCast = "::date"; + break; + case SQL_DECIMAL: + case SQL_NUMERIC: + esCast = "::numeric"; + break; + case SQL_BIGINT: + esCast = "::int8"; + break; + case SQL_INTEGER: + esCast = "::int4"; + break; + case SQL_REAL: + esCast = "::float4"; + break; + case SQL_SMALLINT: + case SQL_TINYINT: + esCast = "::int2"; + break; + case SQL_TIME: + case SQL_TYPE_TIME: + esCast = "::time"; + break; + case SQL_TIMESTAMP: + case SQL_TYPE_TIMESTAMP: + esCast = "::timestamp"; + break; + case SQL_GUID: + if (ES_VERSION_GE(conn, 8.3)) + esCast = "::uuid"; + break; + case SQL_INTERVAL_MONTH: + case SQL_INTERVAL_YEAR: + case SQL_INTERVAL_YEAR_TO_MONTH: + case SQL_INTERVAL_DAY: + case SQL_INTERVAL_HOUR: + case SQL_INTERVAL_MINUTE: + case SQL_INTERVAL_SECOND: + case SQL_INTERVAL_DAY_TO_HOUR: + case SQL_INTERVAL_DAY_TO_MINUTE: + case SQL_INTERVAL_DAY_TO_SECOND: + case SQL_INTERVAL_HOUR_TO_MINUTE: + case SQL_INTERVAL_HOUR_TO_SECOND: + case SQL_INTERVAL_MINUTE_TO_SECOND: + esCast = "::interval"; + break; + } + + return esCast; +} + +OID sqltype_to_estype(const ConnectionClass *conn, SQLSMALLINT fSqlType) { + OID esType = 0; + switch (fSqlType) { + case SQL_BINARY: + esType = ES_TYPE_BYTEA; + break; + + case SQL_CHAR: + esType = ES_TYPE_BPCHAR; + break; + +#ifdef UNICODE_SUPPORT + case SQL_WCHAR: + esType = ES_TYPE_BPCHAR; + break; +#endif /* UNICODE_SUPPORT */ + + case SQL_BIT: + esType = ES_TYPE_BOOL; + break; + + case SQL_TYPE_DATE: + case SQL_DATE: + esType = ES_TYPE_DATE; + break; + + case SQL_DOUBLE: + case SQL_FLOAT: + esType = ES_TYPE_FLOAT8; + break; + + case SQL_DECIMAL: + case SQL_NUMERIC: + esType = ES_TYPE_NUMERIC; + break; + + case SQL_BIGINT: + esType = ES_TYPE_INT8; + break; + + case SQL_INTEGER: + esType = ES_TYPE_INT4; + break; + + case SQL_LONGVARBINARY: + esType = conn->lobj_type; + break; + + case SQL_LONGVARCHAR: + esType = ES_TYPE_VARCHAR; + break; + +#ifdef UNICODE_SUPPORT + case SQL_WLONGVARCHAR: + esType = ES_TYPE_VARCHAR; + break; +#endif /* UNICODE_SUPPORT */ + + case SQL_REAL: + esType = ES_TYPE_FLOAT4; + break; + + case SQL_SMALLINT: + case SQL_TINYINT: + esType = ES_TYPE_INT2; + break; + + case SQL_TIME: + case SQL_TYPE_TIME: + esType = ES_TYPE_TIME; + break; + + case SQL_TIMESTAMP: + case SQL_TYPE_TIMESTAMP: + esType = ES_TYPE_DATETIME; + break; + + case SQL_VARBINARY: + esType = ES_TYPE_BYTEA; + break; + + case SQL_VARCHAR: + esType = ES_TYPE_VARCHAR; + break; + +#ifdef UNICODE_SUPPORT + case SQL_WVARCHAR: + esType = ES_TYPE_VARCHAR; + break; +#endif /* UNICODE_SUPPORT */ + + case SQL_GUID: + if (ES_VERSION_GE(conn, 8.3)) + esType = ES_TYPE_UUID; + break; + + case SQL_INTERVAL_MONTH: + case SQL_INTERVAL_YEAR: + case SQL_INTERVAL_YEAR_TO_MONTH: + case SQL_INTERVAL_DAY: + case SQL_INTERVAL_HOUR: + case SQL_INTERVAL_MINUTE: + case SQL_INTERVAL_SECOND: + case SQL_INTERVAL_DAY_TO_HOUR: + case SQL_INTERVAL_DAY_TO_MINUTE: + case SQL_INTERVAL_DAY_TO_SECOND: + case SQL_INTERVAL_HOUR_TO_MINUTE: + case SQL_INTERVAL_HOUR_TO_SECOND: + case SQL_INTERVAL_MINUTE_TO_SECOND: + esType = ES_TYPE_INTERVAL; + break; + } + + return esType; +} + +static int getAtttypmodEtc(const StatementClass *stmt, int col, + int *adtsize_or_longestlen) { + int atttypmod = -1; + + if (NULL != adtsize_or_longestlen) + *adtsize_or_longestlen = ES_ADT_UNSET; + if (col >= 0) { + const QResultClass *res; + + if (res = SC_get_Curres(stmt), NULL != res) { + atttypmod = QR_get_atttypmod(res, col); + if (NULL != adtsize_or_longestlen) { + if (stmt->catalog_result) + *adtsize_or_longestlen = QR_get_fieldsize(res, col); + else { + *adtsize_or_longestlen = QR_get_display_size(res, col); + if (ES_TYPE_NUMERIC == QR_get_field_type(res, col) + && atttypmod < 0 && *adtsize_or_longestlen > 0) { + SQLULEN i; + size_t sval, maxscale = 0; + const char *tval, *sptr; + + for (i = 0; i < res->num_cached_rows; i++) { + tval = QR_get_value_backend_text(res, i, col); + if (NULL != tval) { + sptr = strchr(tval, '.'); + if (NULL != sptr) { + sval = strlen(tval) - (sptr + 1 - tval); + if (sval > maxscale) + maxscale = sval; + } + } + } + *adtsize_or_longestlen += (int)(maxscale << 16); + } + } + } + } + } + return atttypmod; +} + +/* + * There are two ways of calling this function: + * + * 1. When going through the supported ES types (SQLGetTypeInfo) + * + * 2. When taking any type id (SQLColumns, SQLGetData) + * + * The first type will always work because all the types defined are returned + *here. The second type will return a default based on global parameter when it + *does not know. This allows for supporting types that are unknown. All + *other es routines in here return a suitable default. + */ +SQLSMALLINT +estype_to_concise_type(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_to_concise_type(SC_get_conn(stmt), type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); +} + +SQLSMALLINT +estype_to_sqldesctype(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int adtsize_or_longestlen; + int atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + + return estype_attr_to_sqldesctype(SC_get_conn(stmt), type, atttypmod, + adtsize_or_longestlen, + handle_unknown_size_as); +} + +const char *estype_to_name(const StatementClass *stmt, OID type, int col, + BOOL auto_increment) { + int atttypmod = getAtttypmodEtc(stmt, col, NULL); + + return estype_attr_to_name(SC_get_conn(stmt), type, atttypmod, + auto_increment); +} + +/* + * This corresponds to "precision" in ODBC 2.x. + * + * For ES_TYPE_VARCHAR, ES_TYPE_BPCHAR, ES_TYPE_NUMERIC, SQLColumns will + * override this length with the atttypmod length from es_attribute . + * + * If col >= 0, then will attempt to get the info from the result set. + * This is used for functions SQLDescribeCol and SQLColAttributes. + */ +Int4 /* Elasticsearch restriction */ +estype_column_size(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_column_size( + SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, + stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); +} + +/* + * precision in ODBC 3.x. + */ +SQLSMALLINT +estype_precision(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_precision( + SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, + stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); +} + +Int4 estype_display_size(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_display_size( + SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, + stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); +} + +/* + * The length in bytes of data transferred on an SQLGetData, SQLFetch, + * or SQLFetchScroll operation if SQL_C_DEFAULT is specified. + */ +Int4 estype_buffer_length(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_buffer_length( + SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, + stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); +} + +/* + */ +Int4 estype_desclength(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_desclength( + SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, + stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); +} + +#ifdef NOT_USED +/* + * Transfer octet length. + */ +Int4 estype_transfer_octet_length(const StatementClass *stmt, OID type, + int column_size) { + ConnectionClass *conn = SC_get_conn(stmt); + + int coef = 1; + Int4 maxvarc; + switch (type) { + case ES_TYPE_VARCHAR: + case ES_TYPE_BPCHAR: + case ES_TYPE_TEXT: + if (SQL_NO_TOTAL == column_size) + return column_size; +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn)) + return column_size * WCLEN; +#endif /* UNICODE_SUPPORT */ + coef = conn->mb_maxbyte_per_char; + if (coef < 2 && (conn->connInfo).lf_conversion) + /* CR -> CR/LF */ + coef = 2; + if (coef == 1) + return column_size; + maxvarc = conn->connInfo.drivers.max_varchar_size; + if (column_size <= maxvarc && column_size * coef > maxvarc) + return maxvarc; + return coef * column_size; + case ES_TYPE_BYTEA: + return column_size; + default: + if (type == conn->lobj_type) + return column_size; + } + return -1; +} +#endif /* NOT_USED */ + +/* + * corrsponds to "min_scale" in ODBC 2.x. + */ +Int2 estype_min_decimal_digits(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return -1; +} + +/* + * corrsponds to "max_scale" in ODBC 2.x. + */ +Int2 estype_max_decimal_digits(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return -1; +} + +/* + * corrsponds to "scale" in ODBC 2.x. + */ +Int2 estype_decimal_digits(const StatementClass *stmt, OID type, int col) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_decimal_digits(SC_get_conn(stmt), type, atttypmod, + adtsize_or_longestlen, + UNUSED_HANDLE_UNKNOWN_SIZE_AS); +} + +/* + * "scale" in ODBC 3.x. + */ +Int2 estype_scale(const StatementClass *stmt, OID type, int col) { + int atttypmod, adtsize_or_longestlen; + + atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); + return estype_attr_scale(SC_get_conn(stmt), type, atttypmod, + adtsize_or_longestlen, + UNUSED_HANDLE_UNKNOWN_SIZE_AS); +} + +Int2 estype_radix(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return 10; +} + +Int2 estype_nullable(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return SQL_NULLABLE_UNKNOWN; /* everything should be nullable unknown */ +} + +Int2 estype_auto_increment(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return SQL_FALSE; +} + +Int2 estype_case_sensitive(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + switch (type) { + case ES_TYPE_KEYWORD: + case ES_TYPE_TEXT: + return SQL_TRUE; + + default: + return SQL_FALSE; + } +} + +Int2 estype_money(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return SQL_FALSE; +} + +Int2 estype_searchable(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return SQL_SEARCHABLE; +} + +Int2 estype_unsigned(const ConnectionClass *conn, OID type) { + UNUSED(conn); + switch (type) { + case ES_TYPE_BOOL: + case ES_TYPE_KEYWORD: + case ES_TYPE_TEXT: + case ES_TYPE_NESTED: + case ES_TYPE_DATETIME: + case ES_TYPE_OBJECT: + return SQL_TRUE; + + case ES_TYPE_INT1: + case ES_TYPE_INT2: + case ES_TYPE_INT4: + case ES_TYPE_INT8: + case ES_TYPE_HALF_FLOAT: + case ES_TYPE_FLOAT4: + case ES_TYPE_FLOAT8: + case ES_TYPE_SCALED_FLOAT: + return SQL_FALSE; + + default: + return -1; + } +} + +const char *estype_literal_prefix(const ConnectionClass *conn, OID type) { + UNUSED(conn); + switch (type) { + case ES_TYPE_KEYWORD: + case ES_TYPE_TEXT: + case ES_TYPE_NESTED: + case ES_TYPE_OBJECT: + return "\""; + default: + return ""; + } +} + +const char *estype_literal_suffix(const ConnectionClass *conn, OID type) { + UNUSED(conn); + switch (type) { + case ES_TYPE_KEYWORD: + case ES_TYPE_TEXT: + case ES_TYPE_NESTED: + case ES_TYPE_OBJECT: + return "\""; + default: + return ""; + } +} + +const char *estype_create_params(const ConnectionClass *conn, OID type) { + UNUSED(conn, type); + return NULL; +} + +SQLSMALLINT +sqltype_to_default_ctype(const ConnectionClass *conn, SQLSMALLINT sqltype) { + /* + * from the table on page 623 of ODBC 2.0 Programmer's Reference + * (Appendix D) + */ + switch (sqltype) { + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + case SQL_DECIMAL: + case SQL_NUMERIC: + return SQL_C_CHAR; + case SQL_BIGINT: + return ALLOWED_C_BIGINT; + +#ifdef UNICODE_SUPPORT + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + return ansi_to_wtype(conn, SQL_C_CHAR); +#endif /* UNICODE_SUPPORT */ + + case SQL_BIT: + return SQL_C_BIT; + + case SQL_TINYINT: + return SQL_C_STINYINT; + + case SQL_SMALLINT: + return SQL_C_SSHORT; + + case SQL_INTEGER: + return SQL_C_SLONG; + + case SQL_REAL: + return SQL_C_FLOAT; + + case SQL_FLOAT: + case SQL_DOUBLE: + return SQL_C_DOUBLE; + + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + return SQL_C_BINARY; + + case SQL_DATE: + return SQL_C_DATE; + + case SQL_TIME: + return SQL_C_TIME; + + case SQL_TIMESTAMP: + return SQL_C_TIMESTAMP; + + case SQL_TYPE_DATE: + return SQL_C_TYPE_DATE; + + case SQL_TYPE_TIME: + return SQL_C_TYPE_TIME; + + case SQL_TYPE_TIMESTAMP: + return SQL_C_TYPE_TIMESTAMP; + + case SQL_GUID: + if (conn->ms_jet) + return SQL_C_CHAR; + else + return SQL_C_GUID; + + default: + /* should never happen */ + return SQL_C_CHAR; + } +} + +Int4 ctype_length(SQLSMALLINT ctype) { + switch (ctype) { + case SQL_C_SSHORT: + case SQL_C_SHORT: + return sizeof(SWORD); + + case SQL_C_USHORT: + return sizeof(UWORD); + + case SQL_C_SLONG: + case SQL_C_LONG: + return sizeof(SDWORD); + + case SQL_C_ULONG: + return sizeof(UDWORD); + + case SQL_C_FLOAT: + return sizeof(SFLOAT); + + case SQL_C_DOUBLE: + return sizeof(SDOUBLE); + + case SQL_C_BIT: + return sizeof(UCHAR); + + case SQL_C_STINYINT: + case SQL_C_TINYINT: + return sizeof(SCHAR); + + case SQL_C_UTINYINT: + return sizeof(UCHAR); + + case SQL_C_DATE: + case SQL_C_TYPE_DATE: + return sizeof(DATE_STRUCT); + + case SQL_C_TIME: + case SQL_C_TYPE_TIME: + return sizeof(TIME_STRUCT); + + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_TIMESTAMP: + return sizeof(TIMESTAMP_STRUCT); + + case SQL_C_GUID: + return sizeof(SQLGUID); + case SQL_C_INTERVAL_YEAR: + case SQL_C_INTERVAL_MONTH: + case SQL_C_INTERVAL_YEAR_TO_MONTH: + case SQL_C_INTERVAL_DAY: + case SQL_C_INTERVAL_HOUR: + case SQL_C_INTERVAL_DAY_TO_HOUR: + case SQL_C_INTERVAL_MINUTE: + case SQL_C_INTERVAL_DAY_TO_MINUTE: + case SQL_C_INTERVAL_HOUR_TO_MINUTE: + case SQL_C_INTERVAL_SECOND: + case SQL_C_INTERVAL_DAY_TO_SECOND: + case SQL_C_INTERVAL_HOUR_TO_SECOND: + case SQL_C_INTERVAL_MINUTE_TO_SECOND: + return sizeof(SQL_INTERVAL_STRUCT); + case SQL_C_NUMERIC: + return sizeof(SQL_NUMERIC_STRUCT); + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + return sizeof(SQLBIGINT); + + case SQL_C_BINARY: + case SQL_C_CHAR: +#ifdef UNICODE_SUPPORT + case SQL_C_WCHAR: +#endif /* UNICODE_SUPPORT */ + return 0; + + default: /* should never happen */ + return 0; + } +} diff --git a/sql-odbc/src/odfesqlodbc/es_types.h b/sql-odbc/src/odfesqlodbc/es_types.h new file mode 100644 index 0000000000..596534017d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_types.h @@ -0,0 +1,333 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef ES_TYPES +#define ES_TYPES + +#include "dlg_specific.h" +#include "es_odbc.h" +#ifdef __cplusplus +extern "C" { +#endif + +/* the type numbers are defined by the OID's of the types' rows */ +/* in table es_type */ + +#ifdef NOT_USED +#define ES_TYPE_LO ? ? ? ? /* waiting for permanent type */ +#endif + +#define ES_TYPE_NAME_BOOLEAN "boolean"; +#define ES_TYPE_NAME_BYTE "byte"; +#define ES_TYPE_NAME_SHORT "short"; +#define ES_TYPE_NAME_INTEGER "integer"; +#define ES_TYPE_NAME_LONG "long"; +#define ES_TYPE_NAME_HALF_FLOAT "half_float"; +#define ES_TYPE_NAME_FLOAT "float"; +#define ES_TYPE_NAME_DOUBLE "double"; +#define ES_TYPE_NAME_SCALED_FLOAT "scaled_float"; +#define ES_TYPE_NAME_KEYWORD "keyword"; +#define ES_TYPE_NAME_TEXT "text"; +#define ES_TYPE_NAME_NESTED "nested"; +#define ES_TYPE_NAME_DATE "date"; +#define ES_TYPE_NAME_OBJECT "object"; +#define ES_TYPE_NAME_VARCHAR "varchar"; +#define ES_TYPE_NAME_UNSUPPORTED "unsupported"; + +#define MS_ACCESS_SERIAL "int identity" +#define ES_TYPE_BOOL 16 +#define ES_TYPE_BYTEA 17 +#define ES_TYPE_CHAR 18 +#define ES_TYPE_NAME 19 +#define ES_TYPE_INT8 20 +#define ES_TYPE_INT2 21 +#define ES_TYPE_INT2VECTOR 22 +#define ES_TYPE_INT4 23 +#define ES_TYPE_REGPROC 24 +#define ES_TYPE_TEXT 25 +#define ES_TYPE_OID 26 +#define ES_TYPE_TID 27 +#define ES_TYPE_XID 28 +#define ES_TYPE_CID 29 +#define ES_TYPE_OIDVECTOR 30 +#define ES_TYPE_INT1 31 +#define ES_TYPE_HALF_FLOAT 32 +#define ES_TYPE_SCALED_FLOAT 33 +#define ES_TYPE_KEYWORD 34 +#define ES_TYPE_NESTED 35 +#define ES_TYPE_OBJECT 36 +#define ES_TYPE_XML 142 +#define ES_TYPE_XMLARRAY 143 +#define ES_TYPE_CIDR 650 +#define ES_TYPE_FLOAT4 700 +#define ES_TYPE_FLOAT8 701 +#define ES_TYPE_ABSTIME 702 +#define ES_TYPE_UNKNOWN 705 +#define ES_TYPE_MONEY 790 +#define ES_TYPE_MACADDR 829 +#define ES_TYPE_INET 869 +#define ES_TYPE_TEXTARRAY 1009 +#define ES_TYPE_BPCHARARRAY 1014 +#define ES_TYPE_VARCHARARRAY 1015 +#define ES_TYPE_BPCHAR 1042 +#define ES_TYPE_VARCHAR 1043 +#define ES_TYPE_DATE 1082 +#define ES_TYPE_TIME 1083 +#define ES_TYPE_TIMESTAMP_NO_TMZONE 1114 /* since 7.2 */ +#define ES_TYPE_DATETIME 1184 /* timestamptz */ +#define ES_TYPE_INTERVAL 1186 +#define ES_TYPE_TIME_WITH_TMZONE 1266 /* since 7.1 */ +#define ES_TYPE_TIMESTAMP 1296 /* deprecated since 7.0 */ +#define ES_TYPE_BIT 1560 +#define ES_TYPE_NUMERIC 1700 +#define ES_TYPE_REFCURSOR 1790 +#define ES_TYPE_RECORD 2249 +#define ES_TYPE_ANY 2276 +#define ES_TYPE_VOID 2278 +#define ES_TYPE_UUID 2950 +#define INTERNAL_ASIS_TYPE (-9999) + +#define TYPE_MAY_BE_ARRAY(type) \ + ((type) == ES_TYPE_XMLARRAY || ((type) >= 1000 && (type) <= 1041)) +/* extern Int4 es_types_defined[]; */ +extern SQLSMALLINT sqlTypes[]; + +/* Defines for estype_precision */ +#define ES_ATP_UNSET (-3) /* atttypmod */ +#define ES_ADT_UNSET (-3) /* adtsize_or_longestlen */ +#define ES_UNKNOWNS_UNSET 0 /* UNKNOWNS_AS_MAX */ +#define ES_WIDTH_OF_BOOLS_AS_CHAR 5 + +/* + * SQL_INTERVAL support is disabled because I found + * some applications which are unhappy with it. + * +#define ES_INTERVAL_AS_SQL_INTERVAL + */ + +OID es_true_type(const ConnectionClass *, OID, OID); +OID sqltype_to_estype(const ConnectionClass *conn, SQLSMALLINT fSqlType); +const char *sqltype_to_escast(const ConnectionClass *conn, + SQLSMALLINT fSqlType); + +SQLSMALLINT estype_to_concise_type(const StatementClass *stmt, OID type, + int col, int handle_unknown_size_as); +SQLSMALLINT estype_to_sqldesctype(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as); +const char *estype_to_name(const StatementClass *stmt, OID type, int col, + BOOL auto_increment); + +SQLSMALLINT estype_attr_to_concise_type(const ConnectionClass *conn, OID type, + int typmod, int adtsize_or_longestlen, + int handle_unknown_size_as); +SQLSMALLINT estype_attr_to_sqldesctype(const ConnectionClass *conn, OID type, + int typmod, int adtsize_or_longestlen, + int handle_unknown_size_as); +SQLSMALLINT estype_attr_to_datetime_sub(const ConnectionClass *conn, OID type, + int typmod); +SQLSMALLINT estype_attr_to_ctype(const ConnectionClass *conn, OID type, + int typmod); +const char *estype_attr_to_name(const ConnectionClass *conn, OID type, + int typmod, BOOL auto_increment); +Int4 estype_attr_column_size(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longest, + int handle_unknown_size_as); +Int4 estype_attr_buffer_length(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as); +Int4 estype_attr_display_size(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as); +Int2 estype_attr_decimal_digits(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as); +Int4 estype_attr_transfer_octet_length(const ConnectionClass *conn, OID type, + int atttypmod, + int handle_unknown_size_as); +SQLSMALLINT estype_attr_precision(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longest, + int handle_unknown_size_as); +Int4 estype_attr_desclength(const ConnectionClass *conn, OID type, + int atttypmod, int adtsize_or_longestlen, + int handle_unknown_size_as); +Int2 estype_attr_scale(const ConnectionClass *conn, OID type, int atttypmod, + int adtsize_or_longestlen, int handle_unknown_size_as); + +/* These functions can use static numbers or result sets(col parameter) */ +Int4 estype_column_size( + const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as); /* corresponds to "precision" in ODBC 2.x */ +SQLSMALLINT estype_precision( + const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as); /* "precsion in ODBC 3.x */ +/* the following size/length are of Int4 due to ES restriction */ +Int4 estype_display_size(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as); +Int4 estype_buffer_length(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as); +Int4 estype_desclength(const StatementClass *stmt, OID type, int col, + int handle_unknown_size_as); +// Int4 estype_transfer_octet_length(const ConnectionClass *conn, OID type, +// int column_size); + +SQLSMALLINT estype_decimal_digits( + const StatementClass *stmt, OID type, + int col); /* corresponds to "scale" in ODBC 2.x */ +SQLSMALLINT estype_min_decimal_digits( + const ConnectionClass *conn, + OID type); /* corresponds to "min_scale" in ODBC 2.x */ +SQLSMALLINT estype_max_decimal_digits( + const ConnectionClass *conn, + OID type); /* corresponds to "max_scale" in ODBC 2.x */ +SQLSMALLINT estype_scale(const StatementClass *stmt, OID type, + int col); /* ODBC 3.x " */ +Int2 estype_radix(const ConnectionClass *conn, OID type); +Int2 estype_nullable(const ConnectionClass *conn, OID type); +Int2 estype_auto_increment(const ConnectionClass *conn, OID type); +Int2 estype_case_sensitive(const ConnectionClass *conn, OID type); +Int2 estype_money(const ConnectionClass *conn, OID type); +Int2 estype_searchable(const ConnectionClass *conn, OID type); +Int2 estype_unsigned(const ConnectionClass *conn, OID type); +const char *estype_literal_prefix(const ConnectionClass *conn, OID type); +const char *estype_literal_suffix(const ConnectionClass *conn, OID type); +const char *estype_create_params(const ConnectionClass *conn, OID type); + +SQLSMALLINT sqltype_to_default_ctype(const ConnectionClass *stmt, + SQLSMALLINT sqltype); +Int4 ctype_length(SQLSMALLINT ctype); + +SQLSMALLINT ansi_to_wtype(const ConnectionClass *self, SQLSMALLINT ansitype); + +#ifdef __cplusplus +} +#endif + +typedef enum { + CONNECTION_OK, + CONNECTION_BAD, + /* Non-blocking mode only below here */ + + /* + * The existence of these should never be relied upon - they should only + * be used for user feedback or similar purposes. + */ + CONNECTION_STARTED, /* Waiting for connection to be made. */ + CONNECTION_MADE, /* Connection OK; waiting to send. */ + CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the postmaster. + */ + CONNECTION_AUTH_OK, /* Received authentication; waiting for backend startup. + */ + CONNECTION_SETENV, /* Negotiating environment. */ + CONNECTION_SSL_STARTUP, /* Negotiating SSL. */ + CONNECTION_NEEDED, /* Internal state: connect() needed */ + CONNECTION_CHECK_WRITABLE, /* Check if we could make a writable connection. + */ + CONNECTION_CONSUME, /* Wait for any pending message and consume them. */ + CONNECTION_GSS_STARTUP /* Negotiating GSSAPI. */ +} ConnStatusType; + +// Only expose this to C++ code, this will be passed through the C interface as +// a void* +#ifdef __cplusplus +#include + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif // __APPLE__ +#include "rabbit.hpp" +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + +#include +#include + +typedef struct authentication_options { + std::string auth_type; + std::string username; + std::string password; + std::string region; +} authentication_options; + +typedef struct encryption_options { + bool use_ssl; + bool verify_server; + std::string certificate_type; + std::string certificate; + std::string key; + std::string key_pw; +} encryption_options; + +typedef struct connection_options { + std::string server; + std::string port; + std::string timeout; + std::string fetch_size; +} connection_options; + +typedef struct runtime_options { + connection_options conn; + authentication_options auth; + encryption_options crypt; +} runtime_options; + +#define INVALID_OID 0 +#define KEYWORD_TYPE_OID 1043 +#define KEYWORD_TYPE_SIZE 255 +#define KEYWORD_DISPLAY_SIZE 255 +#define KEYWORD_LENGTH_OF_STR 255 + +// Copied from ColumnInfoClass's 'srvr_info' struct. Comments are the relevant +// name in 'srvr_info' +typedef struct ColumnInfo { + std::string field_name; // name + uint32_t type_oid; // adtid + int16_t type_size; // adtsize + int32_t display_size; // longest row + int32_t length_of_str; // the length of bpchar/varchar + uint32_t relation_id; // relid + int16_t attribute_number; // attid + ColumnInfo() { + field_name = ""; + type_oid = INVALID_OID; + type_size = 0; // ? + display_size = 0; // ? + length_of_str = 0; // ? + relation_id = INVALID_OID; + attribute_number = INVALID_OID; + } +} ColumnInfo; + +typedef struct ESResult { + uint32_t ref_count; // reference count. A ColumnInfo can be shared by + // several qresults. + uint16_t num_fields; + std::vector< ColumnInfo > column_info; + std::string cursor; + std::string result_json; + std::string command_type; // SELECT / FETCH / etc + rabbit::document es_result_doc; + ESResult() { + ref_count = 0; + num_fields = 0; + result_json = ""; + command_type = ""; + } +} ESResult; + +#endif +#endif diff --git a/sql-odbc/src/odfesqlodbc/es_utility.cpp b/sql-odbc/src/odfesqlodbc/es_utility.cpp new file mode 100644 index 0000000000..1da446eaae --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_utility.cpp @@ -0,0 +1,126 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_utility.h" + +#include +#include +#include +#include +#include +#include + +#include + +// Used in the event that we run out of memory. This way we have a way of +// settings the buffer to point at an empty char array (because the buffer +// itself isn't const, we can't set this to const without having to later cast +// it away) +static char oom_buffer[1] = ""; +static char *oom_buffer_ptr = oom_buffer; + +static void MarkESExpBufferBroken(ESExpBuffer str) { + if (str->data != oom_buffer) + free(str->data); + str->data = oom_buffer_ptr; + str->len = 0; + str->maxlen = 0; +} + +static bool EnlargeESExpBuffer(ESExpBuffer str, size_t needed) { + if (ESExpBufferBroken(str)) + return 0; + + if (needed >= ((size_t)INT_MAX - str->len)) { + MarkESExpBufferBroken(str); + return false; + } + + needed += str->len + 1; + if (needed <= str->maxlen) + return true; + + size_t newlen = (str->maxlen > 0) ? (2 * str->maxlen) : 64; + while (needed > newlen) + newlen = 2 * newlen; + + if (newlen > (size_t)INT_MAX) + newlen = (size_t)INT_MAX; + + char *newdata = (char *)realloc(str->data, newlen); + if (newdata != NULL) { + str->data = newdata; + str->maxlen = newlen; + return true; + } + + MarkESExpBufferBroken(str); + return false; +} + +static bool AppendESExpBufferVA(ESExpBuffer str, const char *fmt, + va_list args) { + size_t needed = 32; + if (str->maxlen > (str->len + 16)) { + size_t avail = str->maxlen - str->len; + + int nprinted = vsnprintf(str->data + str->len, avail, fmt, args); + if ((nprinted < 0) || (nprinted > (INT_MAX - 1))) { + MarkESExpBufferBroken(str); + return true; + } else if ((size_t)nprinted < avail) { + str->len += nprinted; + return true; + } + needed = nprinted + 1; + } + return !EnlargeESExpBuffer(str, needed); +} + +void InitESExpBuffer(ESExpBuffer str) { + str->data = (char *)malloc(INITIAL_EXPBUFFER_SIZE); + if (str->data == NULL) { + str->data = oom_buffer_ptr; + str->maxlen = 0; + } else { + str->maxlen = INITIAL_EXPBUFFER_SIZE; + str->data[0] = '\0'; + } + str->len = 0; +} + +void AppendESExpBuffer(ESExpBuffer str, const char *fmt, ...) { + if (ESExpBufferBroken(str)) + return; + + va_list args; + bool done = false; + int save_errno = errno; + do { + errno = save_errno; + va_start(args, fmt); + done = AppendESExpBufferVA(str, fmt, args); + va_end(args); + } while (!done); +} + +void TermESExpBuffer(ESExpBuffer str) { + if (str->data != oom_buffer) + free(str->data); + str->data = oom_buffer_ptr; + str->maxlen = 0; + str->len = 0; +} diff --git a/sql-odbc/src/odfesqlodbc/es_utility.h b/sql-odbc/src/odfesqlodbc/es_utility.h new file mode 100644 index 0000000000..0093a39ef0 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/es_utility.h @@ -0,0 +1,46 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef ES_UTILITY_H +#define ES_UTILITY_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ESExpBufferData { + char *data; + size_t len; + size_t maxlen; +} ESExpBufferData; + +typedef ESExpBufferData *ESExpBuffer; + +#define ESExpBufferBroken(str) ((str) == NULL || (str)->maxlen == 0) +#define ESExpBufferDataBroken(buf) ((buf).maxlen == 0) +#define INITIAL_EXPBUFFER_SIZE 256 + +void InitESExpBuffer(ESExpBuffer str); +void AppendESExpBuffer(ESExpBuffer str, const char *fmt, ...); +void TermESExpBuffer(ESExpBuffer str); + +#ifdef __cplusplus +} +#endif + +#endif /* ES_UTILITY_H */ diff --git a/sql-odbc/src/odfesqlodbc/execute.c b/sql-odbc/src/odfesqlodbc/execute.c new file mode 100644 index 0000000000..5d14b6dd65 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/execute.c @@ -0,0 +1,150 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include + +#include "es_odbc.h" +#include "misc.h" + +#ifndef WIN32 +#include +#endif /* WIN32 */ + +#include "bind.h" +#include "convert.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_statement.h" +#include "es_types.h" +#include "qresult.h" +#include "statement.h" + +RETCODE SQL_API ESAPI_Prepare(HSTMT hstmt, const SQLCHAR *stmt_str, + SQLINTEGER stmt_sz) { + if (hstmt == NULL) + return SQL_ERROR; + + // We know cursor is not open at this point + StatementClass *stmt = (StatementClass *)hstmt; + + // PrepareStatement deallocates memory if necessary + RETCODE ret = PrepareStatement(stmt, stmt_str, stmt_sz); + if (ret != SQL_SUCCESS) + return ret; + + // Execute the statement + ret = ExecuteStatement(stmt, FALSE); + if (ret == SQL_SUCCESS) + stmt->prepared = PREPARED; + + return ret; +} + +RETCODE SQL_API ESAPI_Execute(HSTMT hstmt) { + if (hstmt == NULL) + return SQL_ERROR; + + // We know cursor is not open at this point + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE ret = SQL_ERROR; + switch (stmt->prepared) { + case PREPARED: + ret = AssignResult(stmt); + stmt->prepared = EXECUTED; + break; + case EXECUTED: + ret = RePrepareStatement(stmt); + if (ret != SQL_SUCCESS) + break; + ret = ExecuteStatement(stmt, TRUE); + if (ret != SQL_SUCCESS) + break; + stmt->prepared = EXECUTED; + break; + case NOT_PREPARED: + ret = SQL_ERROR; + break; + default: + break; + } + return ret; +} + +RETCODE SQL_API ESAPI_ExecDirect(HSTMT hstmt, const SQLCHAR *stmt_str, + SQLINTEGER stmt_sz, BOOL commit) { + if (hstmt == NULL) + return SQL_ERROR; + + // We know cursor is not open at this point + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE ret = PrepareStatement(stmt, stmt_str, stmt_sz); + if (ret != SQL_SUCCESS) + return ret; + + // Execute statement + ret = ExecuteStatement(hstmt, commit); + if (ret != SQL_SUCCESS) + return ret; + stmt->prepared = NOT_PREPARED; + return ret; +} + +/* + * Returns the SQL string as modified by the driver. + * Currently, just copy the input string without modification + * observing buffer limits and truncation. + */ +RETCODE SQL_API ESAPI_NativeSql(HDBC hdbc, const SQLCHAR *szSqlStrIn, + SQLINTEGER cbSqlStrIn, SQLCHAR *szSqlStr, + SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr) { + CSTR func = "ESAPI_NativeSql"; + size_t len = 0; + char *ptr; + ConnectionClass *conn = (ConnectionClass *)hdbc; + RETCODE result; + + MYLOG(ES_TRACE, "entering...cbSqlStrIn=" FORMAT_INTEGER "\n", cbSqlStrIn); + + ptr = (cbSqlStrIn == 0) ? "" : make_string(szSqlStrIn, cbSqlStrIn, NULL, 0); + if (!ptr) { + CC_set_error(conn, CONN_NO_MEMORY_ERROR, + "No memory available to store native sql string", func); + return SQL_ERROR; + } + + result = SQL_SUCCESS; + len = strlen(ptr); + + if (szSqlStr) { + strncpy_null((char *)szSqlStr, ptr, cbSqlStrMax); + + if (len >= (size_t)cbSqlStrMax) { + result = SQL_SUCCESS_WITH_INFO; + CC_set_error(conn, CONN_TRUNCATED, + "The buffer was too small for the NativeSQL.", func); + } + } + + if (pcbSqlStr) + *pcbSqlStr = (SQLINTEGER)len; + + if (cbSqlStrIn) + free(ptr); + + return result; +} diff --git a/sql-odbc/src/odfesqlodbc/info.c b/sql-odbc/src/odfesqlodbc/info.c new file mode 100644 index 0000000000..efae0d7cbf --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/info.c @@ -0,0 +1,1861 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_odbc.h" +#include "unicode_support.h" + +#include +#include + +#ifndef WIN32 +#include +#endif + +#include "dlg_specific.h" +#include "es_types.h" +#include "tuple.h" + +#include "bind.h" +#include "catfunc.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_info.h" +#include "es_types.h" +#include "misc.h" +#include "multibyte.h" +#include "qresult.h" +#include "statement.h" +#include "tuple.h" + +/* Trigger related stuff for SQLForeign Keys */ +#define TRIGGER_SHIFT 3 +#define TRIGGER_MASK 0x03 +#define TRIGGER_DELETE 0x01 +#define TRIGGER_UPDATE 0x02 + +RETCODE SQL_API ESAPI_GetInfo(HDBC hdbc, SQLUSMALLINT fInfoType, + PTR rgbInfoValue, SQLSMALLINT cbInfoValueMax, + SQLSMALLINT *pcbInfoValue) { + CSTR func = "ESAPI_GetInfo"; + ConnectionClass *conn = (ConnectionClass *)hdbc; + ConnInfo *ci; + const char *p = NULL; + char tmp[MAX_INFO_STRING]; + SQLULEN len = 0, value = 0; + RETCODE ret = SQL_ERROR; + char odbcver[16]; + + MYLOG(ES_TRACE, "entering...fInfoType=%d\n", fInfoType); + + if (!conn) { + CC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + ci = &(conn->connInfo); + + switch (fInfoType) { + case SQL_ACCESSIBLE_PROCEDURES: /* ODBC 1.0 */ + p = "N"; + break; + + case SQL_ACCESSIBLE_TABLES: /* ODBC 1.0 */ + p = "N"; + break; + + case SQL_ACTIVE_CONNECTIONS: /* ODBC 1.0 */ + len = 2; + value = 0; + break; + + case SQL_ACTIVE_STATEMENTS: /* ODBC 1.0 */ + len = 2; + value = 0; + break; + + case SQL_ALTER_TABLE: /* ODBC 2.0 */ + len = 4; + value = SQL_AT_ADD_COLUMN | SQL_AT_DROP_COLUMN + | SQL_AT_ADD_COLUMN_SINGLE | SQL_AT_ADD_CONSTRAINT + | SQL_AT_ADD_TABLE_CONSTRAINT + | SQL_AT_CONSTRAINT_INITIALLY_DEFERRED + | SQL_AT_CONSTRAINT_INITIALLY_IMMEDIATE + | SQL_AT_CONSTRAINT_DEFERRABLE + | SQL_AT_DROP_TABLE_CONSTRAINT_RESTRICT + | SQL_AT_DROP_TABLE_CONSTRAINT_CASCADE + | SQL_AT_DROP_COLUMN_RESTRICT | SQL_AT_DROP_COLUMN_CASCADE; + break; + + case SQL_BOOKMARK_PERSISTENCE: /* ODBC 2.0 */ + /* very simple bookmark support */ + len = 4; + value = SQL_BP_SCROLL | SQL_BP_DELETE | SQL_BP_UPDATE + | SQL_BP_TRANSACTION; + break; + + case SQL_COLUMN_ALIAS: /* ODBC 2.0 */ + p = "Y"; + break; + + case SQL_CONCAT_NULL_BEHAVIOR: /* ODBC 1.0 */ + len = 2; + value = SQL_CB_NULL; + break; + + case SQL_CONVERT_GUID: + case SQL_CONVERT_INTEGER: + case SQL_CONVERT_SMALLINT: + case SQL_CONVERT_TINYINT: + case SQL_CONVERT_BIT: + case SQL_CONVERT_VARCHAR: + case SQL_CONVERT_BIGINT: + case SQL_CONVERT_DECIMAL: + case SQL_CONVERT_DOUBLE: + case SQL_CONVERT_FLOAT: + case SQL_CONVERT_NUMERIC: + case SQL_CONVERT_REAL: + case SQL_CONVERT_DATE: + case SQL_CONVERT_TIME: + case SQL_CONVERT_TIMESTAMP: + case SQL_CONVERT_BINARY: + case SQL_CONVERT_LONGVARBINARY: + case SQL_CONVERT_VARBINARY: /* ODBC 1.0 */ + case SQL_CONVERT_CHAR: + case SQL_CONVERT_LONGVARCHAR: +#ifdef UNICODE_SUPPORT + case SQL_CONVERT_WCHAR: + case SQL_CONVERT_WLONGVARCHAR: + case SQL_CONVERT_WVARCHAR: +#endif /* UNICODE_SUPPORT */ + len = sizeof(SQLUINTEGER); + value = 0; /* CONVERT is unavailable */ + break; + + case SQL_CONVERT_FUNCTIONS: /* ODBC 1.0 */ + len = sizeof(SQLUINTEGER); + value = SQL_FN_CVT_CAST; + MYLOG(ES_DEBUG, "CONVERT_FUNCTIONS=" FORMAT_ULEN "\n", value); + break; + + case SQL_CORRELATION_NAME: /* ODBC 1.0 */ + + /* + * Saying no correlation name makes Query not work right. + * value = SQL_CN_NONE; + */ + len = 2; + value = SQL_CN_ANY; + break; + + case SQL_CURSOR_COMMIT_BEHAVIOR: /* ODBC 1.0 */ + len = 2; + value = SQL_CB_CLOSE; + break; + + case SQL_CURSOR_ROLLBACK_BEHAVIOR: /* ODBC 1.0 */ + len = 2; + value = SQL_CB_PRESERVE; + break; + + case SQL_DATA_SOURCE_NAME: /* ODBC 1.0 */ + p = CC_get_DSN(conn); + break; + + case SQL_DATA_SOURCE_READ_ONLY: /* ODBC 1.0 */ + p = "Y"; + break; + + case SQL_DATABASE_NAME: /* Support for old ODBC 1.0 Apps */ + + /* + * Returning the database name causes problems in MS Query. It + * generates query like: "SELECT DISTINCT a FROM byronnbad3 + * bad3" + * + * p = CC_get_database(conn); + */ + p = CurrCatString(conn); + break; + + case SQL_DBMS_NAME: /* ODBC 1.0 */ + p = "Elasticsearch"; + break; + + case SQL_DBMS_VER: /* ODBC 1.0 */ + STRCPY_FIXED(tmp, conn->es_version); + p = tmp; + break; + + case SQL_DEFAULT_TXN_ISOLATION: /* ODBC 1.0 */ + len = 4; + if (0 == conn->default_isolation) + conn->isolation = CC_get_isolation(conn); + value = conn->default_isolation; + break; + + case SQL_DRIVER_NAME: /* ODBC 1.0 */ + p = DRIVER_FILE_NAME; + break; + + case SQL_DRIVER_ODBC_VER: + SPRINTF_FIXED(odbcver, "%02x.%02x", ODBCVER / 256, ODBCVER % 256); + /* p = DRIVER_ODBC_VER; */ + p = odbcver; + break; + + case SQL_DRIVER_VER: /* ODBC 1.0 */ + p = ELASTICSEARCHDRIVERVERSION; + break; + + case SQL_EXPRESSIONS_IN_ORDERBY: /* ODBC 1.0 */ + p = "Y"; + break; + + case SQL_FETCH_DIRECTION: /* ODBC 1.0 */ + len = 4; + value = (SQL_FD_FETCH_NEXT | SQL_FD_FETCH_FIRST | SQL_FD_FETCH_LAST + | SQL_FD_FETCH_PRIOR | SQL_FD_FETCH_ABSOLUTE + | SQL_FD_FETCH_RELATIVE | SQL_FD_FETCH_BOOKMARK); + break; + + case SQL_FILE_USAGE: /* ODBC 2.0 */ + len = 2; + value = SQL_FILE_NOT_SUPPORTED; + break; + + case SQL_GETDATA_EXTENSIONS: /* ODBC 2.0 */ + len = 4; + value = (SQL_GD_ANY_COLUMN | SQL_GD_ANY_ORDER | SQL_GD_BOUND + | SQL_GD_BLOCK); + break; + + case SQL_GROUP_BY: /* ODBC 2.0 */ + len = 2; + value = SQL_GB_GROUP_BY_EQUALS_SELECT; + break; + + case SQL_IDENTIFIER_CASE: /* ODBC 1.0 */ + + /* + * are identifiers case-sensitive (yes, but only when quoted. + * If not quoted, they default to lowercase) + */ + len = 2; + value = SQL_IC_LOWER; + break; + + case SQL_IDENTIFIER_QUOTE_CHAR: /* ODBC 1.0 */ + /* the character used to quote "identifiers" */ + p = "`"; + break; + + case SQL_KEYWORDS: /* ODBC 2.0 */ + p = NULL_STRING; + break; + + case SQL_LIKE_ESCAPE_CLAUSE: /* ODBC 2.0 */ + p = "Y"; + break; + + case SQL_LOCK_TYPES: /* ODBC 2.0 */ + len = 4; + value = SQL_LCK_NO_CHANGE; + break; + + case SQL_MAX_BINARY_LITERAL_LEN: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_MAX_CHAR_LITERAL_LEN: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_MAX_COLUMN_NAME_LEN: /* ODBC 1.0 */ + len = 2; + value = CC_get_max_idlen(conn); + if (0 == value) + value = NAMEDATALEN_V73 - 1; + break; + + case SQL_MAX_COLUMNS_IN_GROUP_BY: /* ODBC 2.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_COLUMNS_IN_INDEX: /* ODBC 2.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_COLUMNS_IN_ORDER_BY: /* ODBC 2.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_COLUMNS_IN_SELECT: /* ODBC 2.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_COLUMNS_IN_TABLE: /* ODBC 2.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_CURSOR_NAME_LEN: /* ODBC 1.0 */ + len = 2; + value = MAX_CURSOR_LEN; + break; + + case SQL_MAX_INDEX_SIZE: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_MAX_OWNER_NAME_LEN: /* ODBC 1.0 */ + len = 2; + value = 0; + if (ES_VERSION_GT(conn, 7.4)) + value = CC_get_max_idlen(conn); +#ifdef MAX_SCHEMA_LEN + else + value = MAX_SCHEMA_LEN; +#endif /* MAX_SCHEMA_LEN */ + if (0 == value) + value = NAMEDATALEN_V73 - 1; + break; + + case SQL_MAX_PROCEDURE_NAME_LEN: /* ODBC 1.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_QUALIFIER_NAME_LEN: /* ODBC 1.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_ROW_SIZE: /* ODBC 2.0 */ + len = 4; + /* No limit with tuptoaster in 7.1+ */ + value = 0; + break; + + case SQL_MAX_STATEMENT_LEN: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_MAX_TABLE_NAME_LEN: /* ODBC 1.0 */ + len = 2; + if (ES_VERSION_GT(conn, 7.4)) + value = CC_get_max_idlen(conn); +#ifdef MAX_TABLE_LEN + else + value = MAX_TABLE_LEN; +#endif /* MAX_TABLE_LEN */ + if (0 == value) + value = NAMEDATALEN_V73 - 1; + break; + + case SQL_MAX_TABLES_IN_SELECT: /* ODBC 2.0 */ + len = 2; + value = 0; + break; + + case SQL_MAX_USER_NAME_LEN: + len = 2; + value = 0; + break; + + case SQL_MULT_RESULT_SETS: /* ODBC 1.0 */ + /* Don't support multiple result sets but say yes anyway? */ + p = "Y"; + break; + + case SQL_MULTIPLE_ACTIVE_TXN: /* ODBC 1.0 */ + p = "Y"; + break; + + case SQL_NEED_LONG_DATA_LEN: /* ODBC 2.0 */ + + /* + * Don't need the length, SQLPutData can handle any size and + * multiple calls + */ + p = "N"; + break; + + case SQL_NON_NULLABLE_COLUMNS: /* ODBC 1.0 */ + len = 2; + value = SQL_NNC_NON_NULL; + break; + + case SQL_NULL_COLLATION: /* ODBC 2.0 */ + /* where are nulls sorted? */ + len = 2; + value = SQL_NC_HIGH; + break; + + case SQL_NUMERIC_FUNCTIONS: /* ODBC 1.0 */ + len = 4; + value = SQL_FN_NUM_ABS | SQL_FN_NUM_ATAN | SQL_FN_NUM_ATAN2 + | SQL_FN_NUM_COS | SQL_FN_NUM_COT | SQL_FN_NUM_DEGREES + | SQL_FN_NUM_FLOOR | SQL_FN_NUM_LOG | SQL_FN_NUM_LOG10 + | SQL_FN_NUM_PI | SQL_FN_NUM_POWER | SQL_FN_NUM_RADIANS + | SQL_FN_NUM_ROUND | SQL_FN_NUM_SIGN | SQL_FN_NUM_SIN + | SQL_FN_NUM_SQRT | SQL_FN_NUM_TAN; + break; + + case SQL_ODBC_API_CONFORMANCE: /* ODBC 1.0 */ + len = 2; + value = SQL_OAC_LEVEL1; + break; + + case SQL_ODBC_SAG_CLI_CONFORMANCE: /* ODBC 1.0 */ + len = 2; + value = SQL_OSCC_NOT_COMPLIANT; + break; + + case SQL_ODBC_SQL_CONFORMANCE: /* ODBC 1.0 */ + len = 2; + value = SQL_OSC_CORE; + break; + + case SQL_ODBC_SQL_OPT_IEF: /* ODBC 1.0 */ + p = "N"; + break; + + case SQL_OJ_CAPABILITIES: /* ODBC 2.01 */ + len = 4; + value = SQL_OJ_LEFT | SQL_OJ_RIGHT | SQL_OJ_NOT_ORDERED + | SQL_OJ_ALL_COMPARISON_OPS; + break; + + case SQL_ORDER_BY_COLUMNS_IN_SELECT: /* ODBC 2.0 */ + p = "Y"; + break; + + case SQL_OUTER_JOINS: /* ODBC 1.0 */ + p = "Y"; + break; + + case SQL_OWNER_TERM: /* ODBC 1.0 */ + p = ""; + break; + + case SQL_OWNER_USAGE: /* ODBC 2.0 */ + // Elasticsearch does not support schemas. + // This will disable showing an empty schema box in Tableau. + len = 4; + value = 0; + break; + + case SQL_POS_OPERATIONS: /* ODBC 2.0 */ + len = 4; + value = (SQL_POS_POSITION | SQL_POS_REFRESH); + break; + + case SQL_POSITIONED_STATEMENTS: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_PROCEDURE_TERM: /* ODBC 1.0 */ + p = "procedure"; + break; + + case SQL_PROCEDURES: /* ODBC 1.0 */ + p = "Y"; + break; + + case SQL_QUALIFIER_LOCATION: /* ODBC 2.0 */ + len = 2; + value = SQL_QL_START; + break; + + case SQL_QUALIFIER_NAME_SEPARATOR: /* ODBC 1.0 */ + p = "."; + break; + + case SQL_QUALIFIER_TERM: /* ODBC 1.0 */ + p = "cluster"; + break; + + case SQL_QUALIFIER_USAGE: /* ODBC 2.0 */ + len = 4; + value = SQL_CU_DML_STATEMENTS; + break; + + case SQL_QUOTED_IDENTIFIER_CASE: /* ODBC 2.0 */ + /* are "quoted" identifiers case-sensitive? YES! */ + len = 2; + value = SQL_IC_SENSITIVE; + break; + + case SQL_ROW_UPDATES: /* ODBC 1.0 */ + + /* + * Driver doesn't support keyset-driven or mixed cursors, so + * not much point in saying row updates are supported + */ + p = "N"; + break; + + case SQL_SCROLL_CONCURRENCY: /* ODBC 1.0 */ + len = 4; + value = SQL_SCCO_READ_ONLY; + break; + + case SQL_SCROLL_OPTIONS: /* ODBC 1.0 */ + len = 4; + value = SQL_SO_FORWARD_ONLY | SQL_SO_STATIC; + break; + + case SQL_SEARCH_PATTERN_ESCAPE: /* ODBC 1.0 */ + p = ""; + break; + + case SQL_SERVER_NAME: /* ODBC 1.0 */ + p = CC_get_server(conn); + break; + + case SQL_SPECIAL_CHARACTERS: /* ODBC 2.0 */ + p = "_"; + break; + + case SQL_STATIC_SENSITIVITY: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_STRING_FUNCTIONS: /* ODBC 1.0 */ + len = 4; + value = SQL_FN_STR_ASCII | SQL_FN_STR_LENGTH | SQL_FN_STR_LTRIM + | SQL_FN_STR_REPLACE | SQL_FN_STR_RTRIM + | SQL_FN_STR_SUBSTRING; + break; + + case SQL_SUBQUERIES: /* ODBC 2.0 */ + len = 4; + value = (SQL_SQ_QUANTIFIED | SQL_SQ_IN | SQL_SQ_EXISTS + | SQL_SQ_COMPARISON); + break; + + case SQL_SYSTEM_FUNCTIONS: /* ODBC 1.0 */ + len = 4; + value = SQL_FN_SYS_IFNULL; + break; + + case SQL_TABLE_TERM: /* ODBC 1.0 */ + p = "table"; + break; + + case SQL_TIMEDATE_ADD_INTERVALS: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_TIMEDATE_DIFF_INTERVALS: /* ODBC 2.0 */ + len = 4; + value = 0; + break; + + case SQL_TIMEDATE_FUNCTIONS: /* ODBC 1.0 */ + len = 4; + value = SQL_FN_TD_CURDATE | SQL_FN_TD_DAYOFMONTH | SQL_FN_TD_MONTH + | SQL_FN_TD_MONTHNAME | SQL_FN_TD_NOW | SQL_FN_TD_YEAR; + break; + + case SQL_TXN_CAPABLE: /* ODBC 1.0 */ + /* + * Elasticsearch does not support transactions. + */ + len = 2; + value = SQL_TC_NONE; + break; + + case SQL_TXN_ISOLATION_OPTION: /* ODBC 1.0 */ + len = 4; + value = SQL_TXN_READ_UNCOMMITTED | SQL_TXN_READ_COMMITTED + | SQL_TXN_REPEATABLE_READ | SQL_TXN_SERIALIZABLE; + break; + + case SQL_UNION: /* ODBC 2.0 */ + len = 4; + value = (SQL_U_UNION | SQL_U_UNION_ALL); + break; + + case SQL_USER_NAME: /* ODBC 1.0 */ + p = CC_get_username(conn); + break; + + /* Keys for ODBC 3.0 */ + case SQL_DYNAMIC_CURSOR_ATTRIBUTES1: + len = 4; + value = 0; + break; + case SQL_DYNAMIC_CURSOR_ATTRIBUTES2: + len = 4; + value = 0; + break; + case SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1: + len = 4; + value = SQL_CA1_NEXT; /* others aren't allowed in ODBC spec */ + break; + case SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2: + len = 4; + value = SQL_CA2_READ_ONLY_CONCURRENCY | SQL_CA2_CRC_EXACT; + break; + case SQL_KEYSET_CURSOR_ATTRIBUTES1: + len = 4; + value = 0; + break; + case SQL_KEYSET_CURSOR_ATTRIBUTES2: + len = 4; + value = 0; + break; + + case SQL_STATIC_CURSOR_ATTRIBUTES1: + len = 4; + value = SQL_CA1_NEXT | SQL_CA1_ABSOLUTE | SQL_CA1_RELATIVE + | SQL_CA1_BOOKMARK | SQL_CA1_LOCK_NO_CHANGE + | SQL_CA1_POS_POSITION | SQL_CA1_POS_REFRESH; + break; + case SQL_STATIC_CURSOR_ATTRIBUTES2: + len = 4; + value = SQL_CA2_READ_ONLY_CONCURRENCY | SQL_CA2_CRC_EXACT; + break; + + case SQL_ODBC_INTERFACE_CONFORMANCE: + len = 4; + value = SQL_OIC_CORE; + break; + case SQL_ACTIVE_ENVIRONMENTS: + len = 2; + value = 0; + break; + case SQL_AGGREGATE_FUNCTIONS: + len = 4; + value = SQL_AF_ALL; + break; + case SQL_ALTER_DOMAIN: + len = 4; + value = 0; + break; + case SQL_ASYNC_MODE: + len = 4; + value = SQL_AM_NONE; + break; + case SQL_BATCH_ROW_COUNT: + len = 4; + value = SQL_BRC_EXPLICIT; + break; + case SQL_BATCH_SUPPORT: + len = 4; + value = SQL_BS_SELECT_EXPLICIT | SQL_BS_ROW_COUNT_EXPLICIT; + break; + case SQL_CATALOG_NAME: + if (CurrCat(conn)) + p = "Y"; + else + p = "N"; + break; + case SQL_COLLATION_SEQ: + p = ""; + break; + case SQL_CREATE_ASSERTION: + len = 4; + value = 0; + break; + case SQL_CREATE_CHARACTER_SET: + len = 4; + value = 0; + break; + case SQL_CREATE_COLLATION: + len = 4; + value = 0; + break; + case SQL_CREATE_DOMAIN: + len = 4; + value = 0; + break; + case SQL_CREATE_SCHEMA: + len = 4; + value = SQL_CS_CREATE_SCHEMA | SQL_CS_AUTHORIZATION; + break; + case SQL_CREATE_TABLE: + len = 4; + value = SQL_CT_CREATE_TABLE | SQL_CT_COLUMN_CONSTRAINT + | SQL_CT_COLUMN_DEFAULT | SQL_CT_GLOBAL_TEMPORARY + | SQL_CT_TABLE_CONSTRAINT + | SQL_CT_CONSTRAINT_NAME_DEFINITION + | SQL_CT_CONSTRAINT_INITIALLY_DEFERRED + | SQL_CT_CONSTRAINT_INITIALLY_IMMEDIATE + | SQL_CT_CONSTRAINT_DEFERRABLE; + break; + case SQL_CREATE_TRANSLATION: + len = 4; + value = 0; + break; + case SQL_CREATE_VIEW: + len = 4; + value = SQL_CV_CREATE_VIEW; + break; + case SQL_DDL_INDEX: + len = 4; + value = SQL_DI_CREATE_INDEX | SQL_DI_DROP_INDEX; + break; + case SQL_DESCRIBE_PARAMETER: + p = "N"; + break; + case SQL_DROP_ASSERTION: + len = 4; + value = 0; + break; + case SQL_DROP_CHARACTER_SET: + len = 4; + value = 0; + break; + case SQL_DROP_COLLATION: + len = 4; + value = 0; + break; + case SQL_DROP_DOMAIN: + len = 4; + value = 0; + break; + case SQL_DROP_SCHEMA: + len = 4; + value = SQL_DS_DROP_SCHEMA | SQL_DS_RESTRICT | SQL_DS_CASCADE; + break; + case SQL_DROP_TABLE: + len = 4; + value = SQL_DT_DROP_TABLE; + value |= (SQL_DT_RESTRICT | SQL_DT_CASCADE); + break; + case SQL_DROP_TRANSLATION: + len = 4; + value = 0; + break; + case SQL_DROP_VIEW: + len = 4; + value = SQL_DV_DROP_VIEW; + value |= (SQL_DV_RESTRICT | SQL_DV_CASCADE); + break; + case SQL_INDEX_KEYWORDS: + len = 4; + value = SQL_IK_NONE; + break; + case SQL_INFO_SCHEMA_VIEWS: + len = 4; + value = 0; + break; + case SQL_INSERT_STATEMENT: + len = 4; + value = SQL_IS_INSERT_LITERALS | SQL_IS_INSERT_SEARCHED + | SQL_IS_SELECT_INTO; + break; + case SQL_MAX_IDENTIFIER_LEN: + len = 2; + value = CC_get_max_idlen(conn); + if (0 == value) + value = NAMEDATALEN_V73 - 1; + break; + case SQL_MAX_ROW_SIZE_INCLUDES_LONG: + p = "Y"; + break; + case SQL_PARAM_ARRAY_ROW_COUNTS: + len = 4; + value = SQL_PARC_BATCH; + break; + case SQL_PARAM_ARRAY_SELECTS: + len = 4; + value = SQL_PAS_BATCH; + break; + case SQL_SQL_CONFORMANCE: + // SQL plugin currently does not support this level, + // but Tableau requires at least Entry level reported for retrieving + // row data + len = 4; + value = SQL_SC_SQL92_ENTRY; + break; + case SQL_SQL92_DATETIME_FUNCTIONS: + len = 4; + value = 0; + break; + case SQL_SQL92_FOREIGN_KEY_DELETE_RULE: + len = 4; + value = SQL_SFKD_CASCADE | SQL_SFKD_NO_ACTION | SQL_SFKD_SET_DEFAULT + | SQL_SFKD_SET_NULL; + break; + case SQL_SQL92_FOREIGN_KEY_UPDATE_RULE: + len = 4; + value = SQL_SFKU_CASCADE | SQL_SFKU_NO_ACTION | SQL_SFKU_SET_DEFAULT + | SQL_SFKU_SET_NULL; + break; + case SQL_SQL92_GRANT: + len = 4; + value = SQL_SG_DELETE_TABLE | SQL_SG_INSERT_TABLE + | SQL_SG_REFERENCES_TABLE | SQL_SG_SELECT_TABLE + | SQL_SG_UPDATE_TABLE; + break; + case SQL_SQL92_NUMERIC_VALUE_FUNCTIONS: + len = 4; + value = 0; + break; + case SQL_SQL92_PREDICATES: + len = 4; + value = SQL_SP_BETWEEN | SQL_SP_COMPARISON | SQL_SP_IN + | SQL_SP_ISNULL | SQL_SP_LIKE; + break; + case SQL_SQL92_RELATIONAL_JOIN_OPERATORS: + len = 4; + value = SQL_SRJO_CROSS_JOIN | SQL_SRJO_INNER_JOIN + | SQL_SRJO_LEFT_OUTER_JOIN | SQL_SRJO_RIGHT_OUTER_JOIN; + break; + case SQL_SQL92_REVOKE: + len = 4; + value = SQL_SR_DELETE_TABLE | SQL_SR_INSERT_TABLE + | SQL_SR_REFERENCES_TABLE | SQL_SR_SELECT_TABLE + | SQL_SR_UPDATE_TABLE; + break; + case SQL_SQL92_ROW_VALUE_CONSTRUCTOR: + len = 4; + value = SQL_SRVC_VALUE_EXPRESSION | SQL_SRVC_NULL; + break; + case SQL_SQL92_STRING_FUNCTIONS: + len = 4; + value = SQL_SSF_LOWER | SQL_SSF_UPPER; + break; + case SQL_SQL92_VALUE_EXPRESSIONS: + len = 4; + value = SQL_SVE_CASE | SQL_SVE_CAST; + break; +#ifdef SQL_DTC_TRANSACTION_COST + case SQL_DTC_TRANSACTION_COST: +#else + case 1750: +#endif + len = 4; + break; + case SQL_DATETIME_LITERALS: + case SQL_DRIVER_HDESC: + case SQL_MAX_ASYNC_CONCURRENT_STATEMENTS: + case SQL_STANDARD_CLI_CONFORMANCE: + case SQL_CONVERT_INTERVAL_DAY_TIME: + len = 4; + value = 0; + break; + case SQL_DM_VER: + case SQL_XOPEN_CLI_YEAR: + len = 0; + value = 0; + break; + + default: + /* unrecognized key */ + CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, + "Unrecognized key passed to ESAPI_GetInfo.", NULL); + goto cleanup; + } + + ret = SQL_SUCCESS; + + MYLOG(ES_DEBUG, "p='%s', len=" FORMAT_ULEN ", value=" FORMAT_ULEN ", cbMax=%d\n", + p ? p : "", len, value, cbInfoValueMax); + + /* + * NOTE, that if rgbInfoValue is NULL, then no warnings or errors + * should result and just pcbInfoValue is returned, which indicates + * what length would be required if a real buffer had been passed in. + */ + if (p) { + /* char/binary data */ + len = strlen(p); + + if (rgbInfoValue) { +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn)) { + len = utf8_to_ucs2(p, len, (SQLWCHAR *)rgbInfoValue, + cbInfoValueMax / WCLEN); + len *= WCLEN; + } else +#endif /* UNICODE_SUPPORT */ + strncpy_null((char *)rgbInfoValue, p, (size_t)cbInfoValueMax); + + if (len >= (SQLULEN)cbInfoValueMax) { + ret = SQL_SUCCESS_WITH_INFO; + CC_set_error(conn, CONN_TRUNCATED, + "The buffer was too small for the InfoValue.", + func); + } + } +#ifdef UNICODE_SUPPORT + else if (CC_is_in_unicode_driver(conn)) + len *= WCLEN; +#endif /* UNICODE_SUPPORT */ + } else { + /* numeric data */ + if (rgbInfoValue) { + if (len == sizeof(SQLSMALLINT)) + *((SQLUSMALLINT *)rgbInfoValue) = (SQLUSMALLINT)value; + else if (len == sizeof(SQLINTEGER)) + *((SQLUINTEGER *)rgbInfoValue) = (SQLUINTEGER)value; + } + } + + if (pcbInfoValue) + *pcbInfoValue = (SQLSMALLINT)len; +cleanup: + + return ret; +} + +/* + * macros for estype_xxxx() calls which have ES_ATP_UNSET parameters + */ +#define ESTYPE_COLUMN_SIZE(conn, esType) \ + estype_attr_column_size(conn, esType, ES_ATP_UNSET, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_TO_CONCISE_TYPE(conn, esType) \ + estype_attr_to_concise_type(conn, esType, ES_ATP_UNSET, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_TO_SQLDESCTYPE(conn, esType) \ + estype_attr_to_sqldesctype(conn, esType, ES_ATP_UNSET, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_BUFFER_LENGTH(conn, esType) \ + estype_attr_buffer_length(conn, esType, ES_ATP_UNSET, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_DECIMAL_DIGITS(conn, esType) \ + estype_attr_decimal_digits(conn, esType, ES_ATP_UNSET, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_TRANSFER_OCTET_LENGTH(conn, esType) \ + estype_attr_transfer_octet_length(conn, esType, ES_ATP_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_TO_NAME(conn, esType, auto_increment) \ + estype_attr_to_name(conn, esType, ES_ATP_UNSET, auto_increment) + +RETCODE SQL_API ESAPI_GetFunctions(HDBC hdbc, SQLUSMALLINT fFunction, + SQLUSMALLINT *pfExists) { + UNUSED(hdbc); + MYLOG(ES_TRACE, "entering...%u\n", fFunction); + + if (fFunction == SQL_API_ALL_FUNCTIONS) { + memset(pfExists, 0, sizeof(pfExists[0]) * 100); + + /* ODBC core functions */ + pfExists[SQL_API_SQLALLOCCONNECT] = TRUE; + pfExists[SQL_API_SQLALLOCENV] = TRUE; + pfExists[SQL_API_SQLALLOCSTMT] = TRUE; + pfExists[SQL_API_SQLBINDCOL] = TRUE; + pfExists[SQL_API_SQLCANCEL] = TRUE; + pfExists[SQL_API_SQLCOLATTRIBUTES] = TRUE; + pfExists[SQL_API_SQLCONNECT] = TRUE; + pfExists[SQL_API_SQLDESCRIBECOL] = TRUE; /* partial */ + pfExists[SQL_API_SQLDISCONNECT] = TRUE; + pfExists[SQL_API_SQLERROR] = TRUE; + pfExists[SQL_API_SQLEXECDIRECT] = TRUE; + pfExists[SQL_API_SQLEXECUTE] = TRUE; + pfExists[SQL_API_SQLFETCH] = TRUE; + pfExists[SQL_API_SQLFREECONNECT] = TRUE; + pfExists[SQL_API_SQLFREEENV] = TRUE; + pfExists[SQL_API_SQLFREESTMT] = TRUE; + pfExists[SQL_API_SQLGETCURSORNAME] = TRUE; + pfExists[SQL_API_SQLNUMRESULTCOLS] = TRUE; + pfExists[SQL_API_SQLPREPARE] = TRUE; /* complete? */ + pfExists[SQL_API_SQLROWCOUNT] = TRUE; + pfExists[SQL_API_SQLSETCURSORNAME] = TRUE; + pfExists[SQL_API_SQLSETPARAM] = FALSE; /* odbc 1.0 */ + pfExists[SQL_API_SQLTRANSACT] = TRUE; + + /* ODBC level 1 functions */ + pfExists[SQL_API_SQLBINDPARAMETER] = TRUE; + pfExists[SQL_API_SQLCOLUMNS] = TRUE; + pfExists[SQL_API_SQLDRIVERCONNECT] = TRUE; + pfExists[SQL_API_SQLGETCONNECTOPTION] = TRUE; /* partial */ + pfExists[SQL_API_SQLGETDATA] = TRUE; + pfExists[SQL_API_SQLGETFUNCTIONS] = TRUE; + pfExists[SQL_API_SQLGETINFO] = TRUE; + pfExists[SQL_API_SQLGETSTMTOPTION] = TRUE; /* partial */ + pfExists[SQL_API_SQLGETTYPEINFO] = TRUE; + pfExists[SQL_API_SQLPARAMDATA] = TRUE; + pfExists[SQL_API_SQLPUTDATA] = TRUE; + pfExists[SQL_API_SQLSETCONNECTOPTION] = TRUE; /* partial */ + pfExists[SQL_API_SQLSETSTMTOPTION] = TRUE; + pfExists[SQL_API_SQLSPECIALCOLUMNS] = TRUE; + pfExists[SQL_API_SQLSTATISTICS] = TRUE; + pfExists[SQL_API_SQLTABLES] = TRUE; + + /* ODBC level 2 functions */ + pfExists[SQL_API_SQLBROWSECONNECT] = FALSE; + pfExists[SQL_API_SQLCOLUMNPRIVILEGES] = FALSE; + pfExists[SQL_API_SQLDATASOURCES] = FALSE; /* only implemented by + * DM */ + if (SUPPORT_DESCRIBE_PARAM(ci)) + pfExists[SQL_API_SQLDESCRIBEPARAM] = TRUE; + else + pfExists[SQL_API_SQLDESCRIBEPARAM] = FALSE; /* not properly + * implemented */ + pfExists[SQL_API_SQLDRIVERS] = FALSE; /* only implemented by + * DM */ + pfExists[SQL_API_SQLEXTENDEDFETCH] = TRUE; + pfExists[SQL_API_SQLFOREIGNKEYS] = TRUE; + pfExists[SQL_API_SQLMORERESULTS] = TRUE; + pfExists[SQL_API_SQLNATIVESQL] = TRUE; + pfExists[SQL_API_SQLNUMPARAMS] = TRUE; + pfExists[SQL_API_SQLPARAMOPTIONS] = TRUE; + pfExists[SQL_API_SQLPRIMARYKEYS] = TRUE; + pfExists[SQL_API_SQLPROCEDURECOLUMNS] = TRUE; + pfExists[SQL_API_SQLPROCEDURES] = TRUE; + pfExists[SQL_API_SQLSETPOS] = TRUE; + pfExists[SQL_API_SQLSETSCROLLOPTIONS] = TRUE; /* odbc 1.0 */ + pfExists[SQL_API_SQLTABLEPRIVILEGES] = TRUE; + pfExists[SQL_API_SQLBULKOPERATIONS] = FALSE; + } else { + switch (fFunction) { + case SQL_API_SQLBINDCOL: + *pfExists = TRUE; + break; + case SQL_API_SQLCANCEL: + *pfExists = TRUE; + break; + case SQL_API_SQLCOLATTRIBUTE: + *pfExists = TRUE; + break; + case SQL_API_SQLCONNECT: + *pfExists = TRUE; + break; + case SQL_API_SQLDESCRIBECOL: + *pfExists = TRUE; + break; /* partial */ + case SQL_API_SQLDISCONNECT: + *pfExists = TRUE; + break; + case SQL_API_SQLEXECDIRECT: + *pfExists = TRUE; + break; + case SQL_API_SQLEXECUTE: + *pfExists = TRUE; + break; + case SQL_API_SQLFETCH: + *pfExists = TRUE; + break; + case SQL_API_SQLFREESTMT: + *pfExists = TRUE; + break; + case SQL_API_SQLGETCURSORNAME: + *pfExists = TRUE; + break; + case SQL_API_SQLNUMRESULTCOLS: + *pfExists = TRUE; + break; + case SQL_API_SQLPREPARE: + *pfExists = TRUE; + break; + case SQL_API_SQLROWCOUNT: + *pfExists = TRUE; + break; + case SQL_API_SQLSETCURSORNAME: + *pfExists = TRUE; + break; + + /* ODBC level 1 functions */ + case SQL_API_SQLBINDPARAMETER: + *pfExists = TRUE; + break; + case SQL_API_SQLCOLUMNS: + *pfExists = TRUE; + break; + case SQL_API_SQLDRIVERCONNECT: + *pfExists = TRUE; + break; + case SQL_API_SQLGETDATA: + *pfExists = TRUE; + break; + case SQL_API_SQLGETFUNCTIONS: + *pfExists = TRUE; + break; + case SQL_API_SQLGETINFO: + *pfExists = TRUE; + break; + case SQL_API_SQLGETTYPEINFO: + *pfExists = TRUE; + break; + case SQL_API_SQLPARAMDATA: + *pfExists = TRUE; + break; + case SQL_API_SQLPUTDATA: + *pfExists = TRUE; + break; + case SQL_API_SQLSPECIALCOLUMNS: + *pfExists = TRUE; + break; + case SQL_API_SQLSTATISTICS: + *pfExists = TRUE; + break; + case SQL_API_SQLTABLES: + *pfExists = TRUE; + break; + + /* ODBC level 2 functions */ + case SQL_API_SQLBROWSECONNECT: + *pfExists = FALSE; + break; + case SQL_API_SQLCOLUMNPRIVILEGES: + *pfExists = FALSE; + break; + case SQL_API_SQLDATASOURCES: + *pfExists = FALSE; + break; /* only implemented by DM */ + case SQL_API_SQLDESCRIBEPARAM: + if (SUPPORT_DESCRIBE_PARAM(ci)) + *pfExists = TRUE; + else + *pfExists = FALSE; + break; /* not properly implemented */ + case SQL_API_SQLDRIVERS: + *pfExists = FALSE; + break; /* only implemented by DM */ + case SQL_API_SQLEXTENDEDFETCH: + *pfExists = TRUE; + break; + case SQL_API_SQLFOREIGNKEYS: + *pfExists = TRUE; + break; + case SQL_API_SQLMORERESULTS: + *pfExists = TRUE; + break; + case SQL_API_SQLNATIVESQL: + *pfExists = TRUE; + break; + case SQL_API_SQLNUMPARAMS: + *pfExists = TRUE; + break; + case SQL_API_SQLPRIMARYKEYS: + *pfExists = TRUE; + break; + case SQL_API_SQLPROCEDURECOLUMNS: + *pfExists = TRUE; + break; + case SQL_API_SQLPROCEDURES: + *pfExists = TRUE; + break; + case SQL_API_SQLSETPOS: + *pfExists = TRUE; + break; + case SQL_API_SQLTABLEPRIVILEGES: + *pfExists = TRUE; + break; + case SQL_API_SQLBULKOPERATIONS: /* 24 */ + case SQL_API_SQLALLOCHANDLE: /* 1001 */ + case SQL_API_SQLBINDPARAM: /* 1002 */ + case SQL_API_SQLCLOSECURSOR: /* 1003 */ + case SQL_API_SQLENDTRAN: /* 1005 */ + case SQL_API_SQLFETCHSCROLL: /* 1021 */ + case SQL_API_SQLFREEHANDLE: /* 1006 */ + case SQL_API_SQLGETCONNECTATTR: /* 1007 */ + case SQL_API_SQLGETDESCFIELD: /* 1008 */ + case SQL_API_SQLGETDIAGFIELD: /* 1010 */ + case SQL_API_SQLGETDIAGREC: /* 1011 */ + case SQL_API_SQLGETENVATTR: /* 1012 */ + case SQL_API_SQLGETSTMTATTR: /* 1014 */ + case SQL_API_SQLSETCONNECTATTR: /* 1016 */ + case SQL_API_SQLSETDESCFIELD: /* 1017 */ + case SQL_API_SQLSETENVATTR: /* 1019 */ + case SQL_API_SQLSETSTMTATTR: /* 1020 */ + *pfExists = TRUE; + break; + case SQL_API_SQLGETDESCREC: /* 1009 */ + case SQL_API_SQLSETDESCREC: /* 1018 */ + case SQL_API_SQLCOPYDESC: /* 1004 */ + *pfExists = FALSE; + break; + default: + *pfExists = FALSE; + break; + } + } + return SQL_SUCCESS; +} + +char *identifierEscape(const SQLCHAR *src, SQLLEN srclen, + const ConnectionClass *conn, char *buf, size_t bufsize, + BOOL double_quote) { + int i; + size_t outlen; + UCHAR tchar; + char *dest = NULL, escape_ch = CC_get_escape(conn); + encoded_str encstr; + + if (!src || srclen == SQL_NULL_DATA) + return dest; + else if (srclen == SQL_NTS) + srclen = (SQLLEN)strlen((char *)src); + if (srclen <= 0) + return dest; + MYLOG(ES_TRACE, "entering in=%s(" FORMAT_LEN ")\n", src, srclen); + if (NULL != buf && bufsize > 0) + dest = buf; + else { + bufsize = 2 * srclen + 1; + dest = malloc(bufsize); + } + if (!dest) + return NULL; + encoded_str_constr(&encstr, conn->ccsc, (char *)src); + outlen = 0; + if (double_quote) + dest[outlen++] = IDENTIFIER_QUOTE; + for (i = 0, tchar = (UCHAR)encoded_nextchar(&encstr); + i < srclen && outlen < bufsize - 1; + i++, tchar = (UCHAR)encoded_nextchar(&encstr)) { + if (MBCS_NON_ASCII(encstr)) { + dest[outlen++] = tchar; + continue; + } + if (LITERAL_QUOTE == tchar || escape_ch == tchar) + dest[outlen++] = tchar; + else if (double_quote && IDENTIFIER_QUOTE == tchar) + dest[outlen++] = tchar; + dest[outlen++] = tchar; + } + if (double_quote) + dest[outlen++] = IDENTIFIER_QUOTE; + dest[outlen] = '\0'; + MYLOG(ES_TRACE, "leaving output=%s(%d)\n", dest, (int)outlen); + return dest; +} + +#define CSTR_SYS_TABLE "SYSTEM TABLE" +#define CSTR_TABLE "TABLE" +#define CSTR_VIEW "VIEW" +#define CSTR_FOREIGN_TABLE "FOREIGN TABLE" +#define CSTR_MATVIEW "MATVIEW" + +#define IS_VALID_NAME(str) ((str) && (str)[0]) +#define TABLE_IN_RELKIND "('r', 'v', 'm', 'f', 'p')" + +/* + * macros for estype_attr_xxxx() calls which have + * ES_ADT_UNSET or ES_UNKNOWNS_UNSET parameters + */ +#define ESTYPE_ATTR_COLUMN_SIZE(conn, esType, atttypmod) \ + estype_attr_column_size(conn, esType, atttypmod, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_ATTR_TO_CONCISE_TYPE(conn, esType, atttypmod) \ + estype_attr_to_concise_type(conn, esType, atttypmod, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_ATTR_TO_SQLDESCTYPE(conn, esType, atttypmod) \ + estype_attr_to_sqldesctype(conn, esType, atttypmod, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_ATTR_DISPLAY_SIZE(conn, esType, atttypmod) \ + estype_attr_display_size(conn, esType, atttypmod, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_ATTR_BUFFER_LENGTH(conn, esType, atttypmod) \ + estype_attr_buffer_length(conn, esType, atttypmod, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_ATTR_DECIMAL_DIGITS(conn, esType, atttypmod) \ + estype_attr_decimal_digits(conn, esType, atttypmod, ES_ADT_UNSET, \ + ES_UNKNOWNS_UNSET) +#define ESTYPE_ATTR_TRANSFER_OCTET_LENGTH(conn, esType, atttypmod) \ + estype_attr_transfer_octet_length(conn, esType, atttypmod, \ + ES_UNKNOWNS_UNSET) + +RETCODE SQL_API ESAPI_SpecialColumns( + HSTMT hstmt, SQLUSMALLINT fColType, const SQLCHAR *szTableQualifier, + SQLSMALLINT cbTableQualifier, const SQLCHAR *szTableOwner, /* OA E*/ + SQLSMALLINT cbTableOwner, const SQLCHAR *szTableName, /* OA(R) E*/ + SQLSMALLINT cbTableName, SQLUSMALLINT fScope, SQLUSMALLINT fNullable) { + UNUSED(fColType, szTableQualifier, cbTableQualifier, szTableOwner, + cbTableOwner, szTableName, cbTableName, fScope, fNullable); + CSTR func = "ESAPI_SpecialColumns"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE result; + if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) + return result; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (!res) { + SC_set_error( + stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_SpecialColumns result.", func); + return SQL_ERROR; + } + + // Link QResultClass to statement and connection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_SPECOLS_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_SPECOLS_FIELDS); + QR_set_field_info_v(res, SPECOLS_SCOPE, "SCOPE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, SPECOLS_COLUMN_NAME, "COLUMN_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, SPECOLS_DATA_TYPE, "DATA_TYPE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, SPECOLS_TYPE_NAME, "TYPE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, SPECOLS_COLUMN_SIZE, "COLUMN_SIZE", ES_TYPE_INT4, + 4); + QR_set_field_info_v(res, SPECOLS_BUFFER_LENGTH, "BUFFER_LENGTH", + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, SPECOLS_DECIMAL_DIGITS, "DECIMAL_DIGITS", + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, SPECOLS_PSEUDO_COLUMN, "PSEUDO_COLUMN", + ES_TYPE_INT2, 2); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->status = STMT_FINISHED; + stmt->currTuple = -1; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return SQL_SUCCESS; +} + +#define INDOPTION_DESC 0x0001 /* values are in reverse order */ +RETCODE SQL_API ESAPI_Statistics( + HSTMT hstmt, const SQLCHAR *szTableQualifier, /* OA X*/ + SQLSMALLINT cbTableQualifier, const SQLCHAR *szTableOwner, /* OA E*/ + SQLSMALLINT cbTableOwner, const SQLCHAR *szTableName, /* OA(R) E*/ + SQLSMALLINT cbTableName, SQLUSMALLINT fUnique, SQLUSMALLINT fAccuracy) { + UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, + szTableName, cbTableName, fUnique, fAccuracy); + CSTR func = "ESAPI_Statistics"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE result; + if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) + return result; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (!res) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_Statistics result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and connection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_STATS_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_STATS_FIELDS); + QR_set_field_info_v(res, STATS_CATALOG_NAME, "TABLE_QUALIFIER", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, STATS_SCHEMA_NAME, "TABLE_OWNER", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, STATS_TABLE_NAME, "TABLE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, STATS_NON_UNIQUE, "NON_UNIQUE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, STATS_INDEX_QUALIFIER, "INDEX_QUALIFIER", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, STATS_INDEX_NAME, "INDEX_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, STATS_TYPE, "TYPE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, STATS_SEQ_IN_INDEX, "SEQ_IN_INDEX", ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, STATS_COLUMN_NAME, "COLUMN_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, STATS_COLLATION, "COLLATION", ES_TYPE_CHAR, 1); + QR_set_field_info_v(res, STATS_CARDINALITY, "CARDINALITY", ES_TYPE_INT4, 4); + QR_set_field_info_v(res, STATS_PAGES, "PAGES", ES_TYPE_INT4, 4); + QR_set_field_info_v(res, STATS_FILTER_CONDITION, "FILTER_CONDITION", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->status = STMT_FINISHED; + stmt->currTuple = -1; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return SQL_SUCCESS; +} + +RETCODE SQL_API ESAPI_ColumnPrivileges( + HSTMT hstmt, const SQLCHAR *szTableQualifier, /* OA X*/ + SQLSMALLINT cbTableQualifier, const SQLCHAR *szTableOwner, /* OA E*/ + SQLSMALLINT cbTableOwner, const SQLCHAR *szTableName, /* OA(R) E*/ + SQLSMALLINT cbTableName, const SQLCHAR *szColumnName, /* PV E*/ + SQLSMALLINT cbColumnName, UWORD flag) { + UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, + szTableName, cbTableName, szColumnName, cbColumnName, flag); + CSTR func = "ESAPI_ColumnPrivileges"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE result; + if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) + return result; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (!res) { + SC_set_error( + stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_ColumnPrivileges result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and connection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_COLPRIV_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_COLPRIV_FIELDS); + QR_set_field_info_v(res, COLPRIV_TABLE_CAT, "TABLE_CAT", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_TABLE_SCHEM, "TABLE_SCHEM", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_TABLE_NAME, "TABLE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_COLUMN_NAME, "COLUMN_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_GRANTOR, "GRANTOR", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_GRANTEE, "GRANTEE", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_PRIVILEGE, "PRIVILEGE", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, COLPRIV_IS_GRANTABLE, "IS_GRANTABLE", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->status = STMT_FINISHED; + stmt->currTuple = -1; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return SQL_SUCCESS; +} + +/* + * SQLPrimaryKeys() + * + * Retrieve the primary key columns for the specified table. + */ +RETCODE SQL_API ESAPI_PrimaryKeys(HSTMT hstmt, + const SQLCHAR *szTableQualifier, /* OA X*/ + SQLSMALLINT cbTableQualifier, + const SQLCHAR *szTableOwner, /* OA E*/ + SQLSMALLINT cbTableOwner, + const SQLCHAR *szTableName, /* OA(R) E*/ + SQLSMALLINT cbTableName, OID reloid) { + UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, + szTableName, cbTableName, reloid); + CSTR func = "ESAPI_PrimaryKeys"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE ret = SC_initialize_and_recycle(stmt); + if (ret != SQL_SUCCESS) + return ret; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (res == NULL) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_PrimaryKeys result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and cnnection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_PKS_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_PKS_FIELDS); + QR_set_field_info_v(res, PKS_TABLE_CAT, "TABLE_QUALIFIER", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PKS_TABLE_SCHEM, "TABLE_OWNER", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PKS_TABLE_NAME, "TABLE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PKS_COLUMN_NAME, "COLUMN_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PKS_KEY_SQ, "KEY_SEQ", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PKS_PK_NAME, "PK_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->currTuple = -1; + stmt->status = STMT_FINISHED; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return ret; +} + +RETCODE SQL_API ESAPI_ForeignKeys( + HSTMT hstmt, const SQLCHAR *szPkTableQualifier, /* OA X*/ + SQLSMALLINT cbPkTableQualifier, const SQLCHAR *szPkTableOwner, /* OA E*/ + SQLSMALLINT cbPkTableOwner, const SQLCHAR *szPkTableName, /* OA(R) E*/ + SQLSMALLINT cbPkTableName, const SQLCHAR *szFkTableQualifier, /* OA X*/ + SQLSMALLINT cbFkTableQualifier, const SQLCHAR *szFkTableOwner, /* OA E*/ + SQLSMALLINT cbFkTableOwner, const SQLCHAR *szFkTableName, /* OA(R) E*/ + SQLSMALLINT cbFkTableName) { + UNUSED(szPkTableQualifier, cbPkTableQualifier, szPkTableOwner, + cbPkTableOwner, szPkTableName, cbPkTableName, szFkTableQualifier, + cbFkTableQualifier, szFkTableOwner, cbFkTableOwner, szFkTableName, + cbFkTableName); + CSTR func = "ESAPI_ForeignKeys"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE result; + if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) + return result; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (!res) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_ForeignKeys result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and connection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_FKS_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_FKS_FIELDS); + QR_set_field_info_v(res, FKS_PKTABLE_CAT, "PKTABLE_QUALIFIER", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_PKTABLE_SCHEM, "PKTABLE_OWNER", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_PKTABLE_NAME, "PKTABLE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_PKCOLUMN_NAME, "PKCOLUMN_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_FKTABLE_CAT, "FKTABLE_QUALIFIER", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_FKTABLE_SCHEM, "FKTABLE_OWNER", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_FKTABLE_NAME, "FKTABLE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_FKCOLUMN_NAME, "FKCOLUMN_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_KEY_SEQ, "KEY_SEQ", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, FKS_UPDATE_RULE, "UPDATE_RULE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, FKS_DELETE_RULE, "DELETE_RULE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, FKS_FK_NAME, "FK_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_PK_NAME, "PK_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, FKS_DEFERRABILITY, "DEFERRABILITY", ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, FKS_TRIGGER_NAME, "TRIGGER_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->status = STMT_FINISHED; + stmt->currTuple = -1; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return SQL_SUCCESS; +} + +#define PRORET_COUNT +#define DISPLAY_ARGNAME + +RETCODE SQL_API ESAPI_ProcedureColumns( + HSTMT hstmt, const SQLCHAR *szProcQualifier, /* OA X*/ + SQLSMALLINT cbProcQualifier, const SQLCHAR *szProcOwner, /* PV E*/ + SQLSMALLINT cbProcOwner, const SQLCHAR *szProcName, /* PV E*/ + SQLSMALLINT cbProcName, const SQLCHAR *szColumnName, /* PV X*/ + SQLSMALLINT cbColumnName, UWORD flag) { + UNUSED(szProcQualifier, cbProcQualifier, szProcOwner, cbProcOwner, + szProcName, cbProcName, szColumnName, cbColumnName, flag); + CSTR func = "ESAPI_ProcedureColumns"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE ret = SC_initialize_and_recycle(stmt); + if (ret != SQL_SUCCESS) + return ret; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (res == NULL) { + SC_set_error( + stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_ProcedureColumns result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and cnnection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_PROCOLS_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_PROCOLS_FIELDS); + QR_set_field_info_v(res, PROCOLS_PROCEDURE_CAT, "PROCEDURE_CAT", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_PROCEDURE_SCHEM, "PROCEDUR_SCHEM", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_PROCEDURE_NAME, "PROCEDURE_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_COLUMN_NAME, "COLUMN_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_COLUMN_TYPE, "COLUMN_TYPE", ES_TYPE_INT2, + 2); + QR_set_field_info_v(res, PROCOLS_DATA_TYPE, "DATA_TYPE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PROCOLS_TYPE_NAME, "TYPE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_COLUMN_SIZE, "COLUMN_SIZE", ES_TYPE_INT4, + 4); + QR_set_field_info_v(res, PROCOLS_BUFFER_LENGTH, "BUFFER_LENGTH", + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, PROCOLS_DECIMAL_DIGITS, "DECIMAL_DIGITS", + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PROCOLS_NUM_PREC_RADIX, "NUM_PREC_RADIX", + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PROCOLS_NULLABLE, "NULLABLE", ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PROCOLS_REMARKS, "REMARKS", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_COLUMN_DEF, "COLUMN_DEF", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PROCOLS_SQL_DATA_TYPE, "SQL_DATA_TYPE", + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PROCOLS_SQL_DATETIME_SUB, "SQL_DATETIME_SUB", + ES_TYPE_INT2, 2); + QR_set_field_info_v(res, PROCOLS_CHAR_OCTET_LENGTH, "CHAR_OCTET_LENGTH", + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, PROCOLS_ORDINAL_POSITION, "ORDINAL_POSITION", + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, PROCOLS_IS_NULLABLE, "IS_NULLABLE", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->currTuple = -1; + stmt->status = STMT_FINISHED; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return ret; +} + +RETCODE SQL_API ESAPI_Procedures(HSTMT hstmt, + const SQLCHAR *szProcQualifier, /* OA X*/ + SQLSMALLINT cbProcQualifier, + const SQLCHAR *szProcOwner, /* PV E*/ + SQLSMALLINT cbProcOwner, + const SQLCHAR *szProcName, /* PV E*/ + SQLSMALLINT cbProcName, UWORD flag) { + UNUSED(szProcQualifier, cbProcQualifier, szProcOwner, cbProcOwner, + szProcName, cbProcName, flag); + CSTR func = "ESAPI_Procedures"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE ret = SC_initialize_and_recycle(stmt); + if (ret != SQL_SUCCESS) + return ret; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (res == NULL) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_Procedures result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and cnnection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_PRO_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_PRO_FIELDS); + QR_set_field_info_v(res, PRO_PROCEDURE_CAT, "PRO_PROCEDURE_CAT", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PRO_PROCEDURE_SCHEM, "PRO_PROCEDURE_SCHEM", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PRO_PROCEDURE_NAME, "PRO_PROCEDURE_NAME", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, PRO_NUM_INPUT_PARAMS, "PRO_NUM_INPUT_PARAMS", + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, PRO_NUM_OUTPUT_PARAMS, "PRO_NUM_OUTPUT_PARAMS", + ES_TYPE_INT4, 4); + QR_set_field_info_v(res, PRO_RESULT_SETS, "PRO_RESULT_SETS", ES_TYPE_INT4, + 4); + QR_set_field_info_v(res, PRO_REMARKS, "PRO_REMARKS", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, PRO_PROCEDURE_TYPE, "PRO_PROCEDURE_TYPE", + ES_TYPE_INT2, 2); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->currTuple = -1; + stmt->status = STMT_FINISHED; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return ret; +} + +#define ACLMAX 8 +#define ALL_PRIVILIGES "arwdRxt" + +RETCODE SQL_API ESAPI_TablePrivileges(HSTMT hstmt, + const SQLCHAR *szTableQualifier, /* OA X*/ + SQLSMALLINT cbTableQualifier, + const SQLCHAR *szTableOwner, /* PV E*/ + SQLSMALLINT cbTableOwner, + const SQLCHAR *szTableName, /* PV E*/ + SQLSMALLINT cbTableName, UWORD flag) { + UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, + szTableName, cbTableName, flag); + CSTR func = "ESAPI_TablePrivileges"; + + // Initialize Statement + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE result; + if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) + return result; + + // Initialize QResultClass + QResultClass *res = QR_Constructor(); + if (!res) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Couldn't allocate memory for ESAPI_Statistics result.", + func); + return SQL_ERROR; + } + + // Link QResultClass to statement and connection + QR_set_conn(res, SC_get_conn(stmt)); + SC_set_Result(stmt, res); + + // Set number of fields and declare as catalog result + extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_TABPRIV_FIELDS); + stmt->catalog_result = TRUE; + + // Setup fields + QR_set_num_fields(res, NUM_OF_TABPRIV_FIELDS); + QR_set_field_info_v(res, TABPRIV_TABLE_CAT, "TABLE_CAT", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABPRIV_TABLE_SCHEM, "TABLE_SCHEM", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + QR_set_field_info_v(res, TABPRIV_TABLE_NAME, "TABLE_NAME", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABPRIV_GRANTOR, "GRANTOR", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABPRIV_GRANTEE, "GRANTEE", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABPRIV_PRIVILEGE, "PRIVILEGE", ES_TYPE_VARCHAR, + MAX_INFO_STRING); + QR_set_field_info_v(res, TABPRIV_IS_GRANTABLE, "IS_GRANTABLE", + ES_TYPE_VARCHAR, MAX_INFO_STRING); + + // Set result to okay and adjust fields if keys exist + QR_set_rstatus(res, PORES_FIELDS_OK); + res->num_fields = CI_get_num_fields(QR_get_fields(res)); + if (QR_haskeyset(res)) + res->num_fields -= res->num_key_fields; + + // Finalize data + stmt->status = STMT_FINISHED; + stmt->currTuple = -1; + SC_set_rowset_start(stmt, -1, FALSE); + SC_set_current_col(stmt, -1); + + return SQL_SUCCESS; +} diff --git a/sql-odbc/src/odfesqlodbc/loadlib.c b/sql-odbc/src/odfesqlodbc/loadlib.c new file mode 100644 index 0000000000..628681aa9c --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/loadlib.c @@ -0,0 +1,274 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include +#include +#ifndef WIN32 +#include +#endif /* WIN32 */ + +#include "elasticenlist.h" +#include "loadlib.h" +#include "misc.h" + +#ifdef WIN32 +#ifdef _MSC_VER +#pragma comment(lib, "Delayimp") +#ifdef _HANDLE_ENLIST_IN_DTC_ +#ifdef UNICODE_SUPPORT +#pragma comment(lib, "elasticenlist") +#else +#pragma comment(lib, "elasticenlista") +#endif /* UNICODE_SUPPORT */ +#endif /* _HANDLE_ENLIST_IN_DTC_ */ +// The followings works under VC++6.0 but doesn't work under VC++7.0. +// Please add the equivalent linker options using command line etc. +#if (_MSC_VER == 1200) && defined(DYNAMIC_LOAD) // VC6.0 +#ifdef UNICODE_SUPPORT +#pragma comment(linker, "/Delayload:elasticenlist.dll") +#else +#pragma comment(linker, "/Delayload:elasticenlista.dll") +#endif /* UNICODE_SUPPORT */ +#pragma comment(linker, "/Delay:UNLOAD") +#endif /* _MSC_VER */ +#endif /* _MSC_VER */ + +#if defined(DYNAMIC_LOAD) +#define WIN_DYN_LOAD +#ifdef UNICODE_SUPPORT +CSTR elasticenlist = "elasticenlist"; +CSTR elasticenlistdll = "elasticenlist.dll"; +CSTR elasticodbc = "odfesqlodbc35w"; +CSTR elasticodbcdll = "odfesqlodbc35w.dll"; +#else +CSTR elasticenlist = "elasticenlista"; +CSTR elasticenlistdll = "elasticenlista.dll"; +CSTR elasticodbc = "odfesqlodbc30a"; +CSTR elasticodbcdll = "odfesqlodbc30a.dll"; +#endif /* UNICODE_SUPPORT */ +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +#define _MSC_DELAY_LOAD_IMPORT +#endif /* MSC_VER */ +#endif /* DYNAMIC_LOAD */ +#endif /* WIN32 */ + +#if defined(_MSC_DELAY_LOAD_IMPORT) +/* + * Error hook function for delay load import. + * Try to load a DLL based on elasticodbc path. + */ +#if (_MSC_VER >= 1900) /* vc14 or later */ +#define TRY_DLI_HOOK __try { +#define RELEASE_NOTIFY_HOOK +#elif (_MSC_VER < 1300) /* vc6 */ +extern PfnDliHook __pfnDliFailureHook; +extern PfnDliHook __pfnDliNotifyHook; +#define TRY_DLI_HOOK \ + __try { \ + __pfnDliFailureHook = DliErrorHook; \ + __pfnDliNotifyHook = DliErrorHook; +#define RELEASE_NOTIFY_HOOK __pfnDliNotifyHook = NULL; +#else /* vc7 ~ 12 */ +extern PfnDliHook __pfnDliFailureHook2; +extern PfnDliHook __pfnDliNotifyHook2; +#define TRY_DLI_HOOK \ + __try { \ + __pfnDliFailureHook2 = DliErrorHook; \ + __pfnDliNotifyHook2 = DliErrorHook; +#define RELEASE_NOTIFY_HOOK __pfnDliNotifyHook2 = NULL; +#endif /* _MSC_VER */ +#else +#define TRY_DLI_HOOK __try { +#define RELEASE_NOTIFY_HOOK +#endif /* _MSC_DELAY_LOAD_IMPORT */ + +#if defined(_MSC_DELAY_LOAD_IMPORT) +static BOOL loaded_elasticenlist = FALSE; +static HMODULE enlist_module = NULL; +static BOOL loaded_elasticodbc = FALSE; +/* + * Load a DLL based on elasticodbc path. + */ +HMODULE MODULE_load_from_elasticodbc_path(const char *module_name) { + extern HINSTANCE s_hModule; + HMODULE hmodule = NULL; + char szFileName[MAX_PATH]; + + if (GetModuleFileName(s_hModule, szFileName, sizeof(szFileName)) > 0) { + char drive[_MAX_DRIVE], dir[_MAX_DIR], sysdir[MAX_PATH]; + + _splitpath(szFileName, drive, dir, NULL, NULL); + GetSystemDirectory(sysdir, MAX_PATH); + SPRINTF_FIXED(szFileName, "%s%s%s.dll", drive, dir, module_name); + if (_strnicmp(szFileName, sysdir, strlen(sysdir)) != 0) { + hmodule = + LoadLibraryEx(szFileName, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + MYLOG(ES_DEBUG, "elasticodbc path based %s loaded module=%p\n", + module_name, hmodule); + } + } + return hmodule; +} + +static FARPROC WINAPI DliErrorHook(unsigned dliNotify, PDelayLoadInfo pdli) { + HMODULE hmodule = NULL; + const char *call_module = NULL; + + MYLOG(ES_DEBUG, "Dli%sHook %s Notify=%d\n", + (dliFailLoadLib == dliNotify || dliFailGetProc == dliNotify) + ? "Error" + : "Notify", + NULL != pdli->szDll ? pdli->szDll : pdli->dlp.szProcName, dliNotify); + switch (dliNotify) { + case dliNotePreLoadLibrary: + case dliFailLoadLib: + RELEASE_NOTIFY_HOOK + if (_strnicmp(pdli->szDll, elasticodbc, strlen(elasticodbc)) == 0) + call_module = elasticodbc; + if (call_module) { + if (hmodule = MODULE_load_from_elasticodbc_path(call_module), + NULL == hmodule) + hmodule = LoadLibrary(call_module); + if (NULL != hmodule) { + if (elasticenlist == call_module) + loaded_elasticenlist = TRUE; + else if (elasticodbc == call_module) + loaded_elasticodbc = TRUE; + } + } + break; + } + return (FARPROC)hmodule; +} + +void AlreadyLoadedElasticsearchodbc(void) { + loaded_elasticodbc = TRUE; +} + +/* + * unload delay loaded libraries. + */ + +typedef BOOL(WINAPI *UnloadFunc)(LPCSTR); +void CleanupDelayLoadedDLLs(void) { + BOOL success; +#if (_MSC_VER < 1300) /* VC6 DELAYLOAD IMPORT */ + UnloadFunc func = __FUnloadDelayLoadedDLL; +#else + UnloadFunc func = __FUnloadDelayLoadedDLL2; +#endif + /* The dll names are case sensitive for the unload helper */ + if (loaded_elasticenlist) { + if (enlist_module != NULL) { + MYLOG(ES_DEBUG, "Freeing Library %s\n", elasticenlistdll); + FreeLibrary(enlist_module); + } + MYLOG(ES_DEBUG, "%s unloading\n", elasticenlistdll); + success = (*func)(elasticenlistdll); + MYLOG(ES_DEBUG, "%s unloaded success=%d\n", elasticenlistdll, success); + loaded_elasticenlist = FALSE; + } + if (loaded_elasticodbc) { + MYLOG(ES_DEBUG, "%s unloading\n", elasticodbcdll); + success = (*func)(elasticodbcdll); + MYLOG(ES_DEBUG, "%s unloaded success=%d\n", elasticodbcdll, success); + loaded_elasticodbc = FALSE; + } + return; +} +#else +void CleanupDelayLoadedDLLs(void) { + return; +} +#endif /* _MSC_DELAY_LOAD_IMPORT */ + +#ifdef _HANDLE_ENLIST_IN_DTC_ +RETCODE CALL_EnlistInDtc(ConnectionClass *conn, void *pTra, int method) { + RETCODE ret; + BOOL loaded = TRUE; + +#if defined(_MSC_DELAY_LOAD_IMPORT) + if (!loaded_elasticenlist) { + TRY_DLI_HOOK + ret = EnlistInDtc(conn, pTra, method); + } + __except ((GetExceptionCode() & 0xffff) == ERROR_MOD_NOT_FOUND + ? EXCEPTION_EXECUTE_HANDLER + : EXCEPTION_CONTINUE_SEARCH) { + if (enlist_module = MODULE_load_from_elasticodbc_path(elasticenlist), + NULL == enlist_module) + loaded = FALSE; + else + ret = EnlistInDtc(conn, pTra, method); + } + if (loaded) + loaded_elasticenlist = TRUE; + RELEASE_NOTIFY_HOOK +} +else ret = EnlistInDtc(conn, pTra, method); +#else + ret = EnlistInDtc(conn, pTra, method); + loaded_elasticenlist = TRUE; +#endif /* _MSC_DELAY_LOAD_IMPORT */ +return ret; +} +RETCODE CALL_DtcOnDisconnect(ConnectionClass *conn) { + if (loaded_elasticenlist) + return DtcOnDisconnect(conn); + return FALSE; +} +RETCODE CALL_IsolateDtcConn(ConnectionClass *conn, BOOL continueConnection) { + if (loaded_elasticenlist) + return IsolateDtcConn(conn, continueConnection); + return FALSE; +} + +void *CALL_GetTransactionObject(HRESULT *hres) { + void *ret = NULL; + BOOL loaded = TRUE; + +#if defined(_MSC_DELAY_LOAD_IMPORT) + if (!loaded_elasticenlist) { + TRY_DLI_HOOK + ret = GetTransactionObject(hres); + } + __except ((GetExceptionCode() & 0xffff) == ERROR_MOD_NOT_FOUND + ? EXCEPTION_EXECUTE_HANDLER + : EXCEPTION_CONTINUE_SEARCH) { + if (enlist_module = MODULE_load_from_elasticodbc_path(elasticenlist), + NULL == enlist_module) + loaded = FALSE; + else + ret = GetTransactionObject(hres); + } + if (loaded) + loaded_elasticenlist = TRUE; + RELEASE_NOTIFY_HOOK +} +else ret = GetTransactionObject(hres); +#else + ret = GetTransactionObject(hres); + loaded_elasticenlist = TRUE; +#endif /* _MSC_DELAY_LOAD_IMPORT */ +return ret; +} +void CALL_ReleaseTransactionObject(void *pObj) { + if (loaded_elasticenlist) + ReleaseTransactionObject(pObj); + return; +} +#endif /* _HANDLE_ENLIST_IN_DTC_ */ diff --git a/sql-odbc/src/odfesqlodbc/loadlib.h b/sql-odbc/src/odfesqlodbc/loadlib.h new file mode 100644 index 0000000000..718e78140f --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/loadlib.h @@ -0,0 +1,51 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __LOADLIB_H__ +#define __LOADLIB_H__ + +#include "es_odbc.h" +#ifdef HAVE_LIBLTDL +#include +#else +#ifdef HAVE_DLFCN_H +#include +#endif /* HAVE_DLFCN_H */ +#endif /* HAVE_LIBLTDL */ + +#include +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _HANDLE_ENLIST_IN_DTC_ +RETCODE CALL_EnlistInDtc(ConnectionClass *conn, void *pTra, int method); +RETCODE CALL_DtcOnDisconnect(ConnectionClass *); +RETCODE CALL_IsolateDtcConn(ConnectionClass *, BOOL); +void *CALL_GetTransactionObject(HRESULT *); +void CALL_ReleaseTransactionObject(void *); +#endif /* _HANDLE_ENLIST_IN_DTC_ */ +/* void UnloadDelayLoadedDLLs(BOOL); */ +void CleanupDelayLoadedDLLs(void); +#ifdef WIN32 +HMODULE MODULE_load_from_elasticodbc_path(const char *module_name); +void AlreadyLoadedElasticsearchodbc(void); +#endif /* WIN32 */ + +#ifdef __cplusplus +} +#endif +#endif /* __LOADLIB_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/misc.c b/sql-odbc/src/odfesqlodbc/misc.c new file mode 100644 index 0000000000..c6a8b606f2 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/misc.c @@ -0,0 +1,217 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "es_odbc.h" +#include "misc.h" + +#include +#include +#include +#include +#include +// clang-format on + +#ifndef WIN32 +#include +#include +#include +#else +#include /* Byron: is this where Windows keeps def. + * of getpid ? */ +#endif + +/* + * returns STRCPY_FAIL, STRCPY_TRUNCATED, or #bytes copied + * (not including null term) + */ +ssize_t my_strcpy(char *dst, ssize_t dst_len, const char *src, + ssize_t src_len) { + if (dst_len <= 0) + return STRCPY_FAIL; + + if (src_len == SQL_NULL_DATA) { + dst[0] = '\0'; + return STRCPY_NULL; + } else if (src_len == SQL_NTS) + src_len = strlen(src); + + if (src_len <= 0) + return STRCPY_FAIL; + else { + if (src_len < dst_len) { + memcpy(dst, src, src_len); + dst[src_len] = '\0'; + } else { + memcpy(dst, src, dst_len - 1); + dst[dst_len - 1] = '\0'; /* truncated */ + return STRCPY_TRUNCATED; + } + } + + return strlen(dst); +} + +/* + * strncpy copies up to len characters, and doesn't terminate + * the destination string if src has len characters or more. + * instead, I want it to copy up to len-1 characters and always + * terminate the destination string. + */ +size_t strncpy_null(char *dst, const char *src, ssize_t len) { + int i; + + if (NULL != dst && len > 0) { + for (i = 0; src[i] && i < len - 1; i++) + dst[i] = src[i]; + + dst[i] = '\0'; + } else + return 0; + if (src[i]) + return strlen(src); + return i; +} + +/*------ + * Create a null terminated string (handling the SQL_NTS thing): + * 1. If buf is supplied, place the string in there + * (assumes enough space) and return buf. + * 2. If buf is not supplied, malloc space and return this string + *------ + */ +char *make_string(const SQLCHAR *s, SQLINTEGER len, char *buf, size_t bufsize) { + size_t length; + char *str; + + if (!s || SQL_NULL_DATA == len) + return NULL; + if (len >= 0) + length = len; + else if (SQL_NTS == len) + length = strlen((char *)s); + else { + MYLOG(ES_DEBUG, "invalid length=" FORMAT_INTEGER "\n", len); + return NULL; + } + if (buf) { + strncpy_null(buf, (char *)s, bufsize > length ? length + 1 : bufsize); + return buf; + } + + MYLOG(ES_DEBUG, "malloc size=" FORMAT_SIZE_T "\n", length); + str = malloc(length + 1); + MYLOG(ES_DEBUG, "str=%p\n", str); + if (!str) + return NULL; + + strncpy_null(str, (char *)s, length + 1); + return str; +} + +/* + * snprintfcat is a extension to snprintf + * It add format to buf at given pos + */ +#ifdef POSIX_SNPRINTF_REQUIRED +static posix_vsnprintf(char *str, size_t size, const char *format, va_list ap); +#define vsnprintf posix_vsnprintf +#endif /* POSIX_SNPRINTF_REQUIRED */ + +int snprintfcat(char *buf, size_t size, const char *format, ...) { + int len; + size_t pos = strlen(buf); + va_list arglist; + + va_start(arglist, format); + len = vsnprintf(buf + pos, size - pos, format, arglist); + va_end(arglist); + return len + (int)pos; +} + +/* + * Windows doesn't have snprintf(). It has _snprintf() which is similar, + * but it behaves differently wrt. truncation. This is a compatibility + * function that uses _snprintf() to provide POSIX snprintf() behavior. + * + * Our strategy, if the output doesn't fit, is to create a temporary buffer + * and call _snprintf() on that. If it still doesn't fit, enlarge the buffer + * and repeat. + */ +#ifdef POSIX_SNPRINTF_REQUIRED +static int posix_vsnprintf(char *str, size_t size, const char *format, + va_list ap) { + int len; + char *tmp; + size_t newsize; + + len = _vsnprintf(str, size, format, ap); + if (len < 0) { + if (size == 0) + newsize = 100; + else + newsize = size; + do { + newsize *= 2; + tmp = malloc(newsize); + if (!tmp) + return -1; + len = _vsnprintf(tmp, newsize, format, ap); + if (len >= 0) + memcpy(str, tmp, size); + free(tmp); + } while (len < 0); + } + if (len >= size && size > 0) { + /* Ensure the buffer is NULL-terminated */ + str[size - 1] = '\0'; + } + return len; +} + +int posix_snprintf(char *buf, size_t size, const char *format, ...) { + int len; + va_list arglist; + + va_start(arglist, format); + len = posix_vsnprintf(buf, size, format, arglist); + va_end(arglist); + return len; +} +#endif /* POSIX_SNPRINTF_REQUIRED */ + +#ifndef HAVE_STRLCAT +size_t strlcat(char *dst, const char *src, size_t size) { + size_t ttllen; + char *pd = dst; + const char *ps = src; + + for (ttllen = 0; ttllen < size; ttllen++, pd++) { + if (0 == *pd) + break; + } + if (ttllen >= size - 1) + return ttllen + strlen(src); + for (; ttllen < size - 1; ttllen++, pd++, ps++) { + if (0 == (*pd = *ps)) + return ttllen; + } + *pd = 0; + for (; *ps; ttllen++, ps++) + ; + return ttllen; +} +#endif /* HAVE_STRLCAT */ diff --git a/sql-odbc/src/odfesqlodbc/misc.h b/sql-odbc/src/odfesqlodbc/misc.h new file mode 100644 index 0000000000..62b7e7477a --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/misc.h @@ -0,0 +1,109 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __MISC_H__ +#define __MISC_H__ + +#include +#ifndef WIN32 +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +size_t strncpy_null(char *dst, const char *src, ssize_t len); +#ifndef HAVE_STRLCAT +size_t strlcat(char *, const char *, size_t); +#endif /* HAVE_STRLCAT */ +int snprintfcat(char *buf, size_t size, const char *format, ...) + __attribute__((format(ES_PRINTF_ATTRIBUTE, 3, 4))); + +char *make_string(const SQLCHAR *s, SQLINTEGER len, char *buf, size_t bufsize); +/* #define GET_SCHEMA_NAME(nspname) (stricmp(nspname, "public") ? nspname : + * "") */ + +#define GET_SCHEMA_NAME(nspname) (nspname) + +/* defines for return value of my_strcpy */ +#define STRCPY_SUCCESS 1 +#define STRCPY_FAIL 0 +#define STRCPY_TRUNCATED (-1) +#define STRCPY_NULL (-2) + +ssize_t my_strcpy(char *dst, ssize_t dst_len, const char *src, ssize_t src_len); + +/* + * Macros to safely strcpy, strcat or sprintf to fixed arrays. + * + */ + +/* + * With GCC, the macro CHECK_NOT_CHAR_P() causes a compilation error + * when the target is pointer not a fixed array. + */ +#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 406 +#define FUNCTION_BEGIN_MACRO ({ +#define FUNCTION_END_MACRO \ + ; \ + }) +#define CHECK_NOT_CHAR_P(t) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wunused-variable\"") if (0) { \ + typeof(t) dummy_for_check = {}; \ + } \ + _Pragma("GCC diagnostic pop") +#else +#define FUNCTION_BEGIN_MACRO +#define FUNCTION_END_MACRO +#define CHECK_NOT_CHAR_P(t) +#endif + +/* macro to safely strcpy() to fixed arrays. */ +#define STRCPY_FIXED(to, from) \ + FUNCTION_BEGIN_MACRO \ + CHECK_NOT_CHAR_P(to) \ + strncpy_null((to), (from), sizeof(to)) FUNCTION_END_MACRO + +/* macro to safely strcat() to fixed arrays. */ +#define STRCAT_FIXED(to, from) \ + FUNCTION_BEGIN_MACRO \ + CHECK_NOT_CHAR_P(to) \ + strlcat((to), (from), sizeof(to)) FUNCTION_END_MACRO + +/* macro to safely sprintf() to fixed arrays. */ +#define SPRINTF_FIXED(to, ...) \ + FUNCTION_BEGIN_MACRO \ + CHECK_NOT_CHAR_P(to) \ + snprintf((to), sizeof(to), __VA_ARGS__) FUNCTION_END_MACRO + +/* macro to safely sprintf() & cat to fixed arrays. */ +#define SPRINTFCAT_FIXED(to, ...) \ + FUNCTION_BEGIN_MACRO \ + CHECK_NOT_CHAR_P(to) \ + snprintfcat((to), sizeof(to), __VA_ARGS__) FUNCTION_END_MACRO + +#define ITOA_FIXED(to, from) \ + FUNCTION_BEGIN_MACRO \ + CHECK_NOT_CHAR_P(to) \ + snprintf((to), sizeof(to), "%d", from) FUNCTION_END_MACRO + +#ifdef __cplusplus +} +#endif + +#endif /* __MISC_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/multibyte.c b/sql-odbc/src/odfesqlodbc/multibyte.c new file mode 100644 index 0000000000..9199f84a49 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/multibyte.c @@ -0,0 +1,369 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include +#include +#include +#include "es_apifunc.h" +#include "es_connection.h" +#include "misc.h" +#include "multibyte.h" +#ifndef WIN32 +#include +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +typedef struct ES_CS { + char *name; + int code; +} ES_CS; + +static ES_CS CS_Table[] = { + {"SQL_ASCII", SQL_ASCII}, + {"EUC_JP", EUC_JP}, + {"EUC_CN", EUC_CN}, + {"EUC_KR", EUC_KR}, + {"EUC_TW", EUC_TW}, + {"JOHAB", JOHAB}, /* since 7.3 */ + {"UTF8", UTF8}, /* since 7.2 */ + {"MULE_INTERNAL", MULE_INTERNAL}, + {"LATIN1", LATIN1}, + {"LATIN2", LATIN2}, + {"LATIN3", LATIN3}, + {"LATIN4", LATIN4}, + {"LATIN5", LATIN5}, + {"LATIN6", LATIN6}, + {"LATIN7", LATIN7}, + {"LATIN8", LATIN8}, + {"LATIN9", LATIN9}, + {"LATIN10", LATIN10}, + {"WIN1256", WIN1256}, /* Arabic since 7.3 */ + {"WIN1258", WIN1258}, /* Vietnamese since 8.1 */ + {"WIN866", WIN866}, /* since 8.1 */ + {"WIN874", WIN874}, /* Thai since 7.3 */ + {"KOI8", KOI8R}, + {"WIN1251", WIN1251}, /* Cyrillic */ + {"WIN1252", WIN1252}, /* Western Europe since 8.1 */ + {"ISO_8859_5", ISO_8859_5}, + {"ISO_8859_6", ISO_8859_6}, + {"ISO_8859_7", ISO_8859_7}, + {"ISO_8859_8", ISO_8859_8}, + {"WIN1250", WIN1250}, /* Central Europe */ + {"WIN1253", WIN1253}, /* Greek since 8.2 */ + {"WIN1254", WIN1254}, /* Turkish since 8.2 */ + {"WIN1255", WIN1255}, /* Hebrew since 8.2 */ + {"WIN1257", WIN1257}, /* Baltic(North Europe) since 8.2 */ + + {"EUC_JIS_2004", + EUC_JIS_2004}, /* EUC for SHIFT-JIS-2004 Japanese, since 8.3 */ + {"SJIS", SJIS}, + {"BIG5", BIG5}, + {"GBK", GBK}, /* since 7.3 */ + {"UHC", UHC}, /* since 7.3 */ + {"GB18030", GB18030}, /* since 7.3 */ + {"SHIFT_JIS_2004", SHIFT_JIS_2004}, /* SHIFT-JIS-2004 Japanese, standard JIS + X 0213, since 8.3 */ + {"OTHER", OTHER}}; + +static ES_CS CS_Alias[] = {{"UNICODE", UTF8}, {"TCVN", WIN1258}, + {"ALT", WIN866}, {"WIN", WIN1251}, + {"KOI8R", KOI8R}, {"OTHER", OTHER}}; + +int es_CS_code(const char *characterset_string) { + int i, c = -1; + + for (i = 0; CS_Table[i].code != OTHER; i++) { + if (0 == stricmp(characterset_string, CS_Table[i].name)) { + c = CS_Table[i].code; + break; + } + } + if (c < 0) { + for (i = 0; CS_Alias[i].code != OTHER; i++) { + if (0 == stricmp(characterset_string, CS_Alias[i].name)) { + c = CS_Alias[i].code; + break; + } + } + } + if (c < 0) + c = OTHER; + return (c); +} + +int es_mb_maxlen(int characterset_code) { + switch (characterset_code) { + case UTF8: + return 4; + case EUC_TW: + return 4; + case EUC_JIS_2004: + case EUC_JP: + case GB18030: + return 3; + case SHIFT_JIS_2004: + case SJIS: + case BIG5: + case GBK: + case UHC: + case EUC_CN: + case EUC_KR: + case JOHAB: + return 2; + default: + return 1; + } +} + +static int es_CS_stat(int stat, unsigned int character, int characterset_code) { + if (character == 0) + stat = 0; + switch (characterset_code) { + case UTF8: { + if (stat < 2 && character >= 0x80) { + if (character >= 0xfc) + stat = 6; + else if (character >= 0xf8) + stat = 5; + else if (character >= 0xf0) + stat = 4; + else if (character >= 0xe0) + stat = 3; + else if (character >= 0xc0) + stat = 2; + } else if (stat >= 2 && character > 0x7f) + stat--; + else + stat = 0; + } break; + /* SHIFT_JIS_2004 Support. */ + case SHIFT_JIS_2004: { + if (stat < 2 && character >= 0x81 && character <= 0x9f) + stat = 2; + else if (stat < 2 && character >= 0xe0 && character <= 0xef) + stat = 2; + else if (stat < 2 && character >= 0xf0 && character <= 0xfc) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + /* Shift-JIS Support. */ + case SJIS: { + if (stat < 2 && character > 0x80 + && !(character > 0x9f && character < 0xe0)) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + /* Chinese Big5 Support. */ + case BIG5: { + if (stat < 2 && character > 0xA0) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + /* Chinese GBK Support. */ + case GBK: { + if (stat < 2 && character > 0x7F) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + + /* Korian UHC Support. */ + case UHC: { + if (stat < 2 && character > 0x7F) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + + case EUC_JIS_2004: + /* 0x8f is JIS X 0212 + JIS X 0213(2) 3 byte */ + /* 0x8e is JIS X 0201 2 byte */ + /* 0xa0-0xff is JIS X 0213(1) 2 byte */ + case EUC_JP: + /* 0x8f is JIS X 0212 3 byte */ + /* 0x8e is JIS X 0201 2 byte */ + /* 0xa0-0xff is JIS X 0208 2 byte */ + { + if (stat < 3 && character == 0x8f) /* JIS X 0212 */ + stat = 3; + else if (stat != 2 + && (character == 0x8e + || character > 0xa0)) /* Half Katakana HighByte & + Kanji HighByte */ + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } + break; + + /* EUC_CN, EUC_KR, JOHAB Support */ + case EUC_CN: + case EUC_KR: + case JOHAB: { + if (stat < 2 && character > 0xa0) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + case EUC_TW: { + if (stat < 4 && character == 0x8e) + stat = 4; + else if (stat == 4 && character > 0xa0) + stat = 3; + else if ((stat == 3 || stat < 2) && character > 0xa0) + stat = 2; + else if (stat == 2) + stat = 1; + else + stat = 0; + } break; + /*Chinese GB18030 support.Added by Bill Huang + * */ + case GB18030: { + if (stat < 2 && character > 0x80) + stat = 2; + else if (stat == 2) { + if (character >= 0x30 && character <= 0x39) + stat = 3; + else + stat = 1; + } else if (stat == 3) { + if (character >= 0x30 && character <= 0x39) + stat = 1; + else + stat = 3; + } else + stat = 0; + } break; + default: { + stat = 0; + } break; + } + return stat; +} + +/* + * This function is used to know the encoding corresponding to + * the current locale. + */ +const char *derive_locale_encoding(const char *dbencoding) { + UNUSED(dbencoding); + const char *wenc = NULL; +#ifdef WIN32 + int acp; +#endif /* WIN32 */ + + if (wenc = getenv("ESCLIENTENCODING"), + NULL != wenc) /* environmnt variable */ + return wenc; +#ifdef WIN32 + acp = GetACP(); + if (acp >= 1251 && acp <= 1258) { + if (stricmp(dbencoding, "SQL_ASCII") == 0) + return wenc; + } + switch (acp) { + case 932: + wenc = "SJIS"; + break; + case 936: + wenc = "GBK"; + break; + case 949: + wenc = "UHC"; + break; + case 950: + wenc = "BIG5"; + break; + case 1250: + wenc = "WIN1250"; + break; + case 1251: + wenc = "WIN1251"; + break; + case 1256: + wenc = "WIN1256"; + break; + case 1252: + if (strnicmp(dbencoding, "LATIN", 5) == 0) + break; + wenc = "WIN1252"; + break; + case 1258: + wenc = "WIN1258"; + break; + case 1253: + wenc = "WIN1253"; + break; + case 1254: + wenc = "WIN1254"; + break; + case 1255: + wenc = "WIN1255"; + break; + case 1257: + wenc = "WIN1257"; + break; + } +#else + // TODO #34 - Investigate locale handling on Mac +#endif /* WIN32 */ + return wenc; +} + +void encoded_str_constr(encoded_str *encstr, int ccsc, const char *str) { + encstr->ccsc = ccsc; + encstr->encstr = (const UCHAR *)str; + encstr->pos = -1; + encstr->ccst = 0; +} +int encoded_nextchar(encoded_str *encstr) { + int chr; + + if (encstr->pos >= 0 && !encstr->encstr[encstr->pos]) + return 0; + chr = encstr->encstr[++encstr->pos]; + encstr->ccst = es_CS_stat(encstr->ccst, (unsigned int)chr, encstr->ccsc); + return chr; +} + +int encoded_byte_check(encoded_str *encstr, size_t abspos) { + int chr; + + chr = encstr->encstr[encstr->pos = abspos]; + encstr->ccst = es_CS_stat(encstr->ccst, (unsigned int)chr, encstr->ccsc); + return chr; +} diff --git a/sql-odbc/src/odfesqlodbc/multibyte.h b/sql-odbc/src/odfesqlodbc/multibyte.h new file mode 100644 index 0000000000..c3251231a7 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/multibyte.h @@ -0,0 +1,140 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __MULTIBUYTE_H__ +#define __MULTIBUYTE_H__ + +/* + * + * Multibyte library header + * + */ +#include "es_odbc.h" +#include "qresult.h" + +/* Elastic client encoding */ +enum { + SQL_ASCII = 0 /* SQL/ASCII */ + , + EUC_JP /* EUC for Japanese */ + , + EUC_CN /* EUC for Chinese */ + , + EUC_KR /* EUC for Korean */ + , + EUC_TW /* EUC for Taiwan */ + , + JOHAB, + UTF8 /* Unicode UTF-8 */ + , + MULE_INTERNAL /* Mule internal code */ + , + LATIN1 /* ISO-8859 Latin 1 */ + , + LATIN2 /* ISO-8859 Latin 2 */ + , + LATIN3 /* ISO-8859 Latin 3 */ + , + LATIN4 /* ISO-8859 Latin 4 */ + , + LATIN5 /* ISO-8859 Latin 5 */ + , + LATIN6 /* ISO-8859 Latin 6 */ + , + LATIN7 /* ISO-8859 Latin 7 */ + , + LATIN8 /* ISO-8859 Latin 8 */ + , + LATIN9 /* ISO-8859 Latin 9 */ + , + LATIN10 /* ISO-8859 Latin 10 */ + , + WIN1256 /* Arabic Windows */ + , + WIN1258 /* Vietnamese Windows */ + , + WIN866 /* Alternativny Variant (MS-DOS CP866) */ + , + WIN874 /* Thai Windows */ + , + KOI8R /* KOI8-R/U */ + , + WIN1251 /* Cyrillic Windows */ + , + WIN1252 /* Western Europe Windows */ + , + ISO_8859_5 /* ISO-8859-5 */ + , + ISO_8859_6 /* ISO-8859-6 */ + , + ISO_8859_7 /* ISO-8859-7 */ + , + ISO_8859_8 /* ISO-8859-8 */ + , + WIN1250 /* Central Europe Windows */ + , + WIN1253 /* Greek Windows */ + , + WIN1254 /* Turkish Windows */ + , + WIN1255 /* Hebrew Windows */ + , + WIN1257 /* Baltic(North Europe) Windows */ + , + EUC_JIS_2004 /* EUC for SHIFT-JIS-2004 Japanese */ + , + SJIS /* Shift JIS */ + , + BIG5 /* Big5 */ + , + GBK /* GBK */ + , + UHC /* UHC */ + , + GB18030 /* GB18030 */ + , + SHIFT_JIS_2004 /* SHIFT-JIS-2004 Japanese, JIS X 0213 */ + , + OTHER = -1 +}; + +/* Old Type Compatible */ +typedef struct { + int ccsc; + const UCHAR *encstr; + ssize_t pos; + int ccst; +} encoded_str; + +#ifdef __cplusplus +extern "C" { +#endif +int es_CS_code(const char *stat_string); +int encoded_nextchar(encoded_str *encstr); +int encoded_byte_check(encoded_str *encstr, size_t abspos); +const char *derive_locale_encoding(const char *dbencoding); +int es_mb_maxlen(int characterset_code); +#ifdef __cplusplus +} +#endif +#define ENCODE_STATUS(enc) ((enc).ccst) +#define ENCODE_PTR(enc) ((enc).encstr + (enc).pos) +#define MBCS_NON_ASCII(enc) (0 != (enc).ccst || (enc).encstr[(enc).pos] >= 0x80) + +void encoded_str_constr(encoded_str *encstr, int ccsc, const char *str); +#define make_encoded_str(encstr, conn, str) \ + encoded_str_constr(encstr, conn->ccsc, str) +#endif /* __MULTIBUYTE_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/mylog.c b/sql-odbc/src/odfesqlodbc/mylog.c new file mode 100644 index 0000000000..1e50b7b140 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/mylog.c @@ -0,0 +1,541 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#define _MYLOG_FUNCS_IMPLEMENT_ +#include +#include +#include +#include +#include +#include + +#include "dlg_specific.h" +#include "es_helper.h" +#include "es_odbc.h" +#include "misc.h" + +#ifndef WIN32 +#include +#include +#include +#include +#define GENERAL_ERRNO (errno) +#define GENERAL_ERRNO_SET(e) (errno = e) +#else +#define GENERAL_ERRNO (GetLastError()) +#define GENERAL_ERRNO_SET(e) SetLastError(e) +#include /* Byron: is this where Windows keeps def. + * of getpid ? */ +#endif + +#ifdef WIN32 +#define DIRSEPARATOR "\\" +#define ES_BINARY O_BINARY +#define ES_BINARY_R "rb" +#define ES_BINARY_W "wb" +#define ES_BINARY_A "ab" +#else +#define DIRSEPARATOR "/" +#define ES_BINARY 0 +#define ES_BINARY_R "r" +#define ES_BINARY_W "w" +#define ES_BINARY_A "a" +#endif /* WIN32 */ + +static char *logdir = NULL; + +void generate_filename(const char *dirname, const char *prefix, char *filename, + size_t filenamelen) { + const char *exename = GetExeProgramName(); +#ifdef WIN32 + int pid; + + pid = _getpid(); +#else + pid_t pid; + struct passwd *ptr; + + ptr = getpwuid(getuid()); + pid = getpid(); +#endif + if (dirname == 0 || filename == 0) + return; + + snprintf(filename, filenamelen, "%s%s", dirname, DIRSEPARATOR); + if (prefix != 0) + strlcat(filename, prefix, filenamelen); + if (exename[0]) + snprintfcat(filename, filenamelen, "%s_", exename); +#ifndef WIN32 + if (ptr) + strlcat(filename, ptr->pw_name, filenamelen); +#endif + snprintfcat(filename, filenamelen, "%u%s", pid, ".log"); + return; +} + +static void generate_homefile(const char *prefix, char *filename, + size_t filenamelen) { + char dir[PATH_MAX]; +#ifdef WIN32 + const char *ptr; + + dir[0] = '\0'; + if (ptr = getenv("HOMEDRIVE"), NULL != ptr) + strlcat(dir, ptr, filenamelen); + if (ptr = getenv("HOMEPATH"), NULL != ptr) + strlcat(dir, ptr, filenamelen); +#else + STRCPY_FIXED(dir, "~"); +#endif /* WIN32 */ + generate_filename(dir, prefix, filename, filenamelen); + + return; +} + +#ifdef WIN32 +static char exename[_MAX_FNAME]; +#elif defined MAXNAMELEN +static char exename[MAXNAMELEN]; +#else +static char exename[256]; +#endif + +const char *GetExeProgramName() { + static int init = 1; + + if (init) { + UCHAR *p; +#ifdef WIN32 + char pathname[_MAX_PATH]; + + if (GetModuleFileName(NULL, pathname, sizeof(pathname)) > 0) + _splitpath(pathname, NULL, NULL, exename, NULL); +#else + CSTR flist[] = {"/proc/self/exe", "/proc/curproc/file", + "/proc/curproc/exe"}; + unsigned long i; + char path_name[256]; + + for (i = 0; i < sizeof(flist) / sizeof(flist[0]); i++) { + if (readlink(flist[i], path_name, sizeof(path_name)) > 0) { + /* fprintf(stderr, "i=%d pathname=%s\n", i, path_name); */ + STRCPY_FIXED(exename, po_basename(path_name)); + break; + } + } +#endif /* WIN32 */ + for (p = (UCHAR *)exename; '\0' != *p; p++) { + if (isalnum(*p)) + continue; + switch (*p) { + case '_': + case '-': + continue; + } + *p = '\0'; /* avoid multi bytes for safety */ + break; + } + init = 0; + } + return exename; +} + +static void *qlog_cs, *mylog_cs; + +static int mylog_on = ES_WARNING, qlog_on = ES_WARNING; + +#define INIT_QLOG_CS XPlatformInitializeCriticalSection(&qlog_cs) +#define ENTER_QLOG_CS XPlatformEnterCriticalSection(qlog_cs) +#define LEAVE_QLOG_CS XPlatformLeaveCriticalSection(qlog_cs) +#define DELETE_QLOG_CS XPlatformDeleteCriticalSection(&qlog_cs) +#define INIT_MYLOG_CS XPlatformInitializeCriticalSection(&mylog_cs) +#define ENTER_MYLOG_CS XPlatformEnterCriticalSection(mylog_cs) +#define LEAVE_MYLOG_CS XPlatformLeaveCriticalSection(mylog_cs) +#define DELETE_MYLOG_CS XPlatformDeleteCriticalSection(&mylog_cs) + +#define MYLOGFILE "mylog_" +#ifndef WIN32 +#define MYLOGDIR "/tmp" +#else +#define MYLOGDIR "c:" +#endif /* WIN32 */ + +#define QLOGFILE "elasticodbc_" +#ifndef WIN32 +#define QLOGDIR "/tmp" +#else +#define QLOGDIR "c:" +#endif /* WIN32 */ + +int get_mylog(void) { + return mylog_on; +} +int get_qlog(void) { + return qlog_on; +} + +const char *po_basename(const char *path) { + char *p; + + if (p = strrchr(path, DIRSEPARATOR[0]), NULL != p) + return p + 1; + return path; +} + +void logs_on_off(int cnopen, int mylog_onoff, int qlog_onoff) { + static int mylog_on_count = 0, mylog_off_count = 0, qlog_on_count = 0, + qlog_off_count = 0; + + ENTER_MYLOG_CS; + if (mylog_onoff) + mylog_on_count += cnopen; + else + mylog_off_count += cnopen; + if (mylog_on_count > 0) { + if (mylog_onoff > mylog_on) + mylog_on = mylog_onoff; + else if (mylog_on < 1) + mylog_on = 1; + } else if (mylog_off_count > 0) + mylog_on = 0; + else if (getGlobalDebug() > 0) + mylog_on = getGlobalDebug(); + LEAVE_MYLOG_CS; + + ENTER_QLOG_CS; + if (qlog_onoff) + qlog_on_count += cnopen; + else + qlog_off_count += cnopen; + if (qlog_on_count > 0) { + if (qlog_onoff > qlog_on) + qlog_on = qlog_onoff; + else if (qlog_on < 1) + qlog_on = 1; + } else if (qlog_off_count > 0) + qlog_on = 0; + else if (getGlobalCommlog() > 0) + qlog_on = getGlobalCommlog(); + LEAVE_QLOG_CS; + MYLOG(ES_DEBUG, "mylog_on=%d qlog_on=%d\n", mylog_on, qlog_on); +} + +#ifdef WIN32 +#define LOGGING_PROCESS_TIME +#include +#endif /* WIN32 */ +#ifdef LOGGING_PROCESS_TIME +#include +static DWORD start_time = 0; +#endif /* LOGGING_PROCESS_TIME */ +static FILE *MLOGFP = NULL; + +static void MLOG_open() { + char filebuf[80], errbuf[160]; + BOOL open_error = FALSE; + + if (MLOGFP) + return; + + generate_filename(logdir ? logdir : MYLOGDIR, MYLOGFILE, filebuf, + sizeof(filebuf)); + MLOGFP = fopen(filebuf, ES_BINARY_A); + if (!MLOGFP) { + int lasterror = GENERAL_ERRNO; + + open_error = TRUE; + SPRINTF_FIXED(errbuf, "%s open error %d\n", filebuf, lasterror); + generate_homefile(MYLOGFILE, filebuf, sizeof(filebuf)); + MLOGFP = fopen(filebuf, ES_BINARY_A); + } + if (MLOGFP) { + if (open_error) + fputs(errbuf, MLOGFP); + } +} + +static int mylog_misc(unsigned int option, const char *fmt, va_list args) { + // va_list args; + int gerrno; + BOOL log_threadid = option; + + gerrno = GENERAL_ERRNO; + ENTER_MYLOG_CS; +#ifdef LOGGING_PROCESS_TIME + if (!start_time) + start_time = timeGetTime(); +#endif /* LOGGING_PROCESS_TIME */ + // va_start(args, fmt); + + if (!MLOGFP) { + MLOG_open(); + if (!MLOGFP) + mylog_on = 0; + } + + if (MLOGFP) { + if (log_threadid) { +#ifdef WIN_MULTITHREAD_SUPPORT +#ifdef LOGGING_PROCESS_TIME + DWORD proc_time = timeGetTime() - start_time; + fprintf(MLOGFP, "[%u-%d.%03d]", GetCurrentThreadId(), + proc_time / 1000, proc_time % 1000); +#else + fprintf(MLOGFP, "[%u]", GetCurrentThreadId()); +#endif /* LOGGING_PROCESS_TIME */ +#endif /* WIN_MULTITHREAD_SUPPORT */ +#if defined(POSIX_MULTITHREAD_SUPPORT) + fprintf(MLOGFP, "[%lx]", (unsigned long int)pthread_self()); +#endif /* POSIX_MULTITHREAD_SUPPORT */ + } + vfprintf(MLOGFP, fmt, args); + fflush(MLOGFP); + } + + // va_end(args); + LEAVE_MYLOG_CS; + GENERAL_ERRNO_SET(gerrno); + + return 1; +} + +DLL_DECLARE int mylog(const char *fmt, ...) { + int ret = 0; + unsigned int option = 1; + va_list args; + + if (!mylog_on) + return ret; + + va_start(args, fmt); + ret = mylog_misc(option, fmt, args); + va_end(args); + return ret; +} + +DLL_DECLARE int myprintf(const char *fmt, ...) { + int ret = 0; + va_list args; + + va_start(args, fmt); + ret = mylog_misc(0, fmt, args); + va_end(args); + return ret; +} + +static void mylog_initialize(void) { + INIT_MYLOG_CS; +} +static void mylog_finalize(void) { + mylog_on = 0; + if (MLOGFP) { + fclose(MLOGFP); + MLOGFP = NULL; + } + DELETE_MYLOG_CS; +} + +static FILE *QLOGFP = NULL; + +static int qlog_misc(unsigned int option, const char *fmt, va_list args) { + char filebuf[80]; + int gerrno; + + if (!qlog_on) + return 0; + + gerrno = GENERAL_ERRNO; + ENTER_QLOG_CS; +#ifdef LOGGING_PROCESS_TIME + if (!start_time) + start_time = timeGetTime(); +#endif /* LOGGING_PROCESS_TIME */ + + if (!QLOGFP) { + generate_filename(logdir ? logdir : QLOGDIR, QLOGFILE, filebuf, + sizeof(filebuf)); + QLOGFP = fopen(filebuf, ES_BINARY_A); + if (!QLOGFP) { + generate_homefile(QLOGFILE, filebuf, sizeof(filebuf)); + QLOGFP = fopen(filebuf, ES_BINARY_A); + } + if (!QLOGFP) + qlog_on = 0; + } + + if (QLOGFP) { + if (option) { +#ifdef LOGGING_PROCESS_TIME + DWORD proc_time = timeGetTime() - start_time; + fprintf(QLOGFP, "[%d.%03d]", proc_time / 1000, proc_time % 1000); +#endif /* LOGGING_PROCESS_TIME */ + } + vfprintf(QLOGFP, fmt, args); + fflush(QLOGFP); + } + + LEAVE_QLOG_CS; + GENERAL_ERRNO_SET(gerrno); + + return 1; +} +int qlog(const char *fmt, ...) { + int ret = 0; + unsigned int option = 1; + va_list args; + + if (!qlog_on) + return ret; + + va_start(args, fmt); + ret = qlog_misc(option, fmt, args); + va_end(args); + return ret; +} +int qprintf(char *fmt, ...) { + int ret = 0; + va_list args; + + va_start(args, fmt); + ret = qlog_misc(0, fmt, args); + va_end(args); + return ret; +} + +static void qlog_initialize(void) { + INIT_QLOG_CS; +} +static void qlog_finalize(void) { + qlog_on = 0; + if (QLOGFP) { + fclose(QLOGFP); + QLOGFP = NULL; + } + DELETE_QLOG_CS; +} + +static int globalDebug = -1; +int getGlobalDebug() { + char temp[16]; + + if (globalDebug >= 0) + return globalDebug; + /* Debug is stored in the driver section */ + SQLGetPrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, "", temp, sizeof(temp), + ODBCINST_INI); + if (temp[0]) + globalDebug = atoi(temp); + else + globalDebug = DEFAULT_LOGLEVEL; + + return globalDebug; +} + +int setGlobalDebug(int val) { + return (globalDebug = val); +} + +static int globalCommlog = -1; +int getGlobalCommlog() { + char temp[16]; + + if (globalCommlog >= 0) + return globalCommlog; + /* Commlog is stored in the driver section */ + SQLGetPrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, "", temp, sizeof(temp), + ODBCINST_INI); + if (temp[0]) + globalCommlog = atoi(temp); + else + globalCommlog = DEFAULT_LOGLEVEL; + + return globalCommlog; +} + +int setGlobalCommlog(int val) { + return (globalCommlog = val); +} + +int writeGlobalLogs() { + char temp[10]; + + ITOA_FIXED(temp, globalDebug); + SQLWritePrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, temp, ODBCINST_INI); + ITOA_FIXED(temp, globalCommlog); + SQLWritePrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, temp, ODBCINST_INI); + return 0; +} + +void logInstallerError(int ret, const char *dir) { + DWORD err = (DWORD)ret; + char msg[SQL_MAX_MESSAGE_LENGTH] = ""; + msg[0] = '\0'; + ret = SQLInstallerError(1, &err, msg, sizeof(msg), NULL); + if (msg[0] != '\0') + MYLOG(ES_DEBUG, "Dir= %s ErrorMsg = %s\n", dir, msg); +} + +int getLogDir(char *dir, int dirmax) { + int ret = SQLGetPrivateProfileString(DBMS_NAME, INI_LOG_OUTPUT, "", + dir, dirmax, ODBCINST_INI); + if (!ret) + logInstallerError(ret, dir); + return ret; +} + +int setLogDir(const char *dir) { + int ret = SQLWritePrivateProfileString(DBMS_NAME, INI_LOG_OUTPUT, dir, + ODBCINST_INI); + if (!ret) + logInstallerError(ret, dir); + return ret; +} + +/* + * This function starts a logging out of connections according the ODBCINST.INI + * portion of the DBMS_NAME registry. + */ +static void start_logging() { + /* + * GlobalDebug or GlobalCommlog means whether take mylog or commlog + * out of the connection time or not but doesn't mean the default of + * ci->drivers.debug(commlog). + */ + logs_on_off(0, 0, 0); + mylog("\t%s:Global.debug&commlog=%d&%d\n", __FUNCTION__, getGlobalDebug(), + getGlobalCommlog()); +} + +void InitializeLogging(void) { + char dir[PATH_MAX]; + getLogDir(dir, sizeof(dir)); + if (dir[0]) + logdir = strdup(dir); + mylog_initialize(); + qlog_initialize(); + start_logging(); + MYLOG(ES_DEBUG, "Log Output Dir: %s\n", logdir); +} + +void FinalizeLogging(void) { + mylog_finalize(); + qlog_finalize(); + if (logdir) { + free(logdir); + logdir = NULL; + } +} diff --git a/sql-odbc/src/odfesqlodbc/mylog.h b/sql-odbc/src/odfesqlodbc/mylog.h new file mode 100644 index 0000000000..cb1f13c63c --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/mylog.h @@ -0,0 +1,158 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __MYLOG_H__ +#define __MYLOG_H__ + +#undef DLL_DECLARE +#ifdef WIN32 +#ifdef _MYLOG_FUNCS_IMPLEMENT_ +#define DLL_DECLARE _declspec(dllexport) +#else +#ifdef _MYLOG_FUNCS_IMPORT_ +#define DLL_DECLARE _declspec(dllimport) +#else +#define DLL_DECLARE +#endif /* _MYLOG_FUNCS_IMPORT_ */ +#endif /* _MYLOG_FUNCS_IMPLEMENT_ */ +#else +#define DLL_DECLARE +#endif /* WIN32 */ + +#include +#ifndef WIN32 +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef __GNUC__ +#define __attribute__(x) +#endif + +DLL_DECLARE int mylog(const char *fmt, ...) + __attribute__((format(ES_PRINTF_ATTRIBUTE, 1, 2))); +DLL_DECLARE int myprintf(const char *fmt, ...) + __attribute__((format(ES_PRINTF_ATTRIBUTE, 1, 2))); + +extern int qlog(const char *fmt, ...) + __attribute__((format(ES_PRINTF_ATTRIBUTE, 1, 2))); +extern int qprintf(char *fmt, ...) + __attribute__((format(ES_PRINTF_ATTRIBUTE, 1, 2))); + +const char *po_basename(const char *path); + +#define PREPEND_FMT "%10.10s[%s]%d: " +#define PREPEND_ITEMS , po_basename(__FILE__), __FUNCTION__, __LINE__ +#define QLOG_MARK "[QLOG]" + +#if defined(__GNUC__) && !defined(__APPLE__) +#define MYLOG(level, fmt, ...) \ + (level < get_mylog() ? mylog(PREPEND_FMT fmt PREPEND_ITEMS, ##__VA_ARGS__) \ + : 0) +#define MYPRINTF(level, fmt, ...) \ + (level < get_mylog() ? myprintf((fmt), ##__VA_ARGS__) : 0) +#define QLOG(level, fmt, ...) \ + ((level < get_qlog() ? qlog((fmt), ##__VA_ARGS__) : 0), \ + MYLOG(level, QLOG_MARK fmt, ##__VA_ARGS__)) +#define QPRINTF(level, fmt, ...) \ + ((level < get_qlog() ? qprintf((fmt), ##__VA_ARGS__) : 0), \ + MYPRINTF(level, (fmt), ##__VA_ARGS__)) +#elif defined WIN32 /* && _MSC_VER > 1800 */ +#define MYLOG(level, fmt, ...) \ + ((int)level <= get_mylog() \ + ? mylog(PREPEND_FMT fmt PREPEND_ITEMS, __VA_ARGS__) \ + : (printf || printf((fmt), __VA_ARGS__))) +#define MYPRINTF(level, fmt, ...) \ + ((int)level <= get_mylog() ? myprintf(fmt, __VA_ARGS__) \ + : (printf || printf((fmt), __VA_ARGS__))) +#define QLOG(level, fmt, ...) \ + (((int)level <= get_qlog() ? qlog((fmt), __VA_ARGS__) \ + : (printf || printf(fmt, __VA_ARGS__))), \ + MYLOG(level, QLOG_MARK fmt, __VA_ARGS__)) +#define QPRINTF(level, fmt, ...) \ + (((int)level <= get_qlog() ? qprintf(fmt, __VA_ARGS__) \ + : (printf || printf((fmt), __VA_ARGS__))), \ + MYPRINTF(level, (fmt), __VA_ARGS__)) +#else +#define MYLOG(level, ...) \ + do { \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ + (level < get_mylog() \ + ? (mylog(PREPEND_FMT PREPEND_ITEMS), myprintf(__VA_ARGS__)) \ + : 0); \ + _Pragma("clang diagnostic pop"); \ + } while (0) +#define MYPRINTF(level, ...) \ + do { \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ + (level < get_mylog() ? myprintf(__VA_ARGS__) : 0); \ + _Pragma("clang diagnostic pop"); \ + } while (0) +#define QLOG(level, ...) \ + do { \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ + (level < get_qlog() ? qlog(__VA_ARGS__) : 0); \ + MYLOG(level, QLOG_MARK); \ + MYPRINTF(level, __VA_ARGS__); \ + _Pragma("clang diagnostic pop"); \ + } while (0) +#define QPRINTF(level, ...) \ + do { \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ + (level < get_qlog() ? qprintf(__VA_ARGS__) : 0); \ + MYPRINTF(level, __VA_ARGS__); \ + _Pragma("clang diagnostic pop"); \ + } while (0) +#endif /* __GNUC__ */ + +enum ESLogLevel { + // Prefixing with ES_ because C does not support namespaces and we may get a + // collision, given how common these names are + ES_OFF, + ES_FATAL, + ES_ERROR, + ES_WARNING, + ES_INFO, + ES_DEBUG, + ES_TRACE, + ES_ALL +}; + +int get_qlog(void); +int get_mylog(void); + +int getGlobalDebug(); +int setGlobalDebug(int val); +int getGlobalCommlog(); +int setGlobalCommlog(int val); +int writeGlobalLogs(); +int getLogDir(char *dir, int dirmax); +int setLogDir(const char *dir); + +void InitializeLogging(void); +void FinalizeLogging(void); + +#ifdef __cplusplus +} +#endif +#endif /* __MYLOG_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/odbcapi.c b/sql-odbc/src/odfesqlodbc/odbcapi.c new file mode 100644 index 0000000000..a2d1490411 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/odbcapi.c @@ -0,0 +1,1293 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include + +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_driver_connect.h" +#include "es_info.h" +#include "es_odbc.h" +#include "es_statement.h" +#include "loadlib.h" +#include "misc.h" +#include "qresult.h" +#include "statement.h" + +BOOL SC_connection_lost_check(StatementClass *stmt, const char *funcname) { + ConnectionClass *conn = SC_get_conn(stmt); + char message[64]; + + if (NULL != conn->esconn) + return FALSE; + SC_clear_error(stmt); + SPRINTF_FIXED(message, "%s unable due to the connection lost", funcname); + SC_set_error(stmt, STMT_COMMUNICATION_ERROR, message, funcname); + return TRUE; +} + +RETCODE SQL_API SQLBindCol(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, + SQLSMALLINT TargetType, PTR TargetValue, + SQLLEN BufferLength, SQLLEN *StrLen_or_Ind) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_BindCol(StatementHandle, ColumnNumber, TargetType, TargetValue, + BufferLength, StrLen_or_Ind); + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLCancel(HSTMT StatementHandle) { + MYLOG(ES_TRACE, "entering\n"); + if (!StatementHandle) + return SQL_INVALID_HANDLE; + if (SC_connection_lost_check((StatementClass *)StatementHandle, + __FUNCTION__)) + return SQL_ERROR; + return ESAPI_Cancel(StatementHandle); +} + +static BOOL theResultIsEmpty(const StatementClass *stmt) { + QResultClass *res = SC_get_Result(stmt); + if (NULL == res) + return FALSE; + return (0 == QR_get_num_total_tuples(res)); +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLColumns(HSTMT StatementHandle, SQLCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLCHAR *SchemaName, + SQLSMALLINT NameLength2, SQLCHAR *TableName, + SQLSMALLINT NameLength3, SQLCHAR *ColumnName, + SQLSMALLINT NameLength4) { + CSTR func = "SQLColumns"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName, + *clName = ColumnName; + UWORD flag = PODBC_SEARCH_PUBLIC_SCHEMA; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Columns(StatementHandle, ctName, NameLength1, scName, + NameLength2, tbName, NameLength3, clName, + NameLength4, flag, 0, 0); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL, *newCl = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = + make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (newCl = make_lstring_ifneeded(conn, ColumnName, NameLength4, + ifallupper), + NULL != newCl) { + clName = newCl; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_Columns(StatementHandle, ctName, NameLength1, scName, + NameLength2, tbName, NameLength3, clName, + NameLength4, flag, 0, 0); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + if (newCl) + free(newCl); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLConnect(HDBC ConnectionHandle, SQLCHAR *ServerName, + SQLSMALLINT NameLength1, SQLCHAR *UserName, + SQLSMALLINT NameLength2, SQLCHAR *Authentication, + SQLSMALLINT NameLength3) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + ret = ESAPI_Connect(ConnectionHandle, ServerName, NameLength1, UserName, + NameLength2, Authentication, NameLength3); + LEAVE_CONN_CS(conn); + return ret; +} + +RETCODE SQL_API SQLDriverConnect(HDBC hdbc, HWND hwnd, SQLCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, SQLCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut, + SQLUSMALLINT fDriverCompletion) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + ret = + ESAPI_DriverConnect(hdbc, hwnd, szConnStrIn, cbConnStrIn, szConnStrOut, + cbConnStrOutMax, pcbConnStrOut, fDriverCompletion); + LEAVE_CONN_CS(conn); + return ret; +} +RETCODE SQL_API SQLBrowseConnect(HDBC hdbc, SQLCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, SQLCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + ret = ESAPI_BrowseConnect(hdbc, szConnStrIn, cbConnStrIn, szConnStrOut, + cbConnStrOutMax, pcbConnStrOut); + LEAVE_CONN_CS(conn); + return ret; +} + +RETCODE SQL_API SQLDataSources(HENV EnvironmentHandle, SQLUSMALLINT Direction, + SQLCHAR *ServerName, SQLSMALLINT BufferLength1, + SQLSMALLINT *NameLength1, SQLCHAR *Description, + SQLSMALLINT BufferLength2, + SQLSMALLINT *NameLength2) { + UNUSED(EnvironmentHandle, Direction, ServerName, BufferLength1, NameLength1, + Description, BufferLength2, NameLength2); + MYLOG(ES_TRACE, "entering\n"); + return SQL_ERROR; +} + +RETCODE SQL_API SQLDescribeCol(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, + SQLCHAR *ColumnName, SQLSMALLINT BufferLength, + SQLSMALLINT *NameLength, SQLSMALLINT *DataType, + SQLULEN *ColumnSize, SQLSMALLINT *DecimalDigits, + SQLSMALLINT *Nullable) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_DescribeCol(StatementHandle, ColumnNumber, ColumnName, + BufferLength, NameLength, DataType, ColumnSize, + DecimalDigits, Nullable); + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLDisconnect(HDBC ConnectionHandle) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + + MYLOG(ES_TRACE, "entering for %p\n", ConnectionHandle); +#ifdef _HANDLE_ENLIST_IN_DTC_ + if (CC_is_in_global_trans(conn)) + CALL_DtcOnDisconnect(conn); +#endif /* _HANDLE_ENLIST_IN_DTC_ */ + ENTER_CONN_CS(conn); + CC_clear_error(conn); + ret = ESAPI_Disconnect(ConnectionHandle); + LEAVE_CONN_CS(conn); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLExecDirect(HSTMT StatementHandle, SQLCHAR *StatementText, + SQLINTEGER TextLength) { + if(StatementHandle == NULL) + return SQL_ERROR; + StatementClass *stmt = (StatementClass *)StatementHandle; + + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + // Enter critical + ENTER_STMT_CS(stmt); + + // Clear error and rollback + SC_clear_error(stmt); + + // Execute statement if statement is ready + RETCODE ret = SQL_ERROR; + if (!SC_opencheck(stmt, "SQLExecDirect")) + ret = ESAPI_ExecDirect(StatementHandle, StatementText, TextLength, 1); + + // Exit critical + LEAVE_STMT_CS(stmt); + + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLExecute(HSTMT StatementHandle) { + if(StatementHandle == NULL) + return SQL_ERROR; + + StatementClass *stmt = (StatementClass *)StatementHandle; + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + // Enter critical + ENTER_STMT_CS(stmt); + + // Clear error and rollback + SC_clear_error(stmt); + RETCODE ret = SQL_ERROR; + if (!SC_opencheck(stmt, "SQLExecute")) + ret = ESAPI_Execute(StatementHandle); + + // Exit critical + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLFetch(HSTMT StatementHandle) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + IRDFields *irdopts = SC_get_IRDF(stmt); + ARDFields *ardopts = SC_get_ARDF(stmt); + SQLUSMALLINT *rowStatusArray = irdopts->rowStatusArray; + SQLULEN *pcRow = irdopts->rowsFetched; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_ExtendedFetch(StatementHandle, SQL_FETCH_NEXT, 0, pcRow, + rowStatusArray, 0, ardopts->size_of_rowset); + stmt->transition_status = STMT_TRANSITION_FETCH_SCROLL; + + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLFreeStmt(HSTMT StatementHandle, SQLUSMALLINT Option) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + ConnectionClass *conn = NULL; + + MYLOG(ES_TRACE, "entering\n"); + + if (stmt) { + if (Option == SQL_DROP) { + conn = stmt->hdbc; + if (conn) + ENTER_CONN_CS(conn); + } else + ENTER_STMT_CS(stmt); + } + + ret = ESAPI_FreeStmt(StatementHandle, Option); + + if (stmt) { + if (Option == SQL_DROP) { + if (conn) + LEAVE_CONN_CS(conn); + } else + LEAVE_STMT_CS(stmt); + } + + return ret; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLGetCursorName(HSTMT StatementHandle, SQLCHAR *CursorName, + SQLSMALLINT BufferLength, + SQLSMALLINT *NameLength) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_GetCursorName(StatementHandle, CursorName, BufferLength, + NameLength); + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLGetData(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, + SQLSMALLINT TargetType, PTR TargetValue, + SQLLEN BufferLength, SQLLEN *StrLen_or_Ind) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_GetData(StatementHandle, ColumnNumber, TargetType, TargetValue, + BufferLength, StrLen_or_Ind); + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLGetFunctions(HDBC ConnectionHandle, SQLUSMALLINT FunctionId, + SQLUSMALLINT *Supported) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + if (FunctionId == SQL_API_ODBC3_ALL_FUNCTIONS) + ret = ESAPI_GetFunctions30(ConnectionHandle, FunctionId, Supported); + else + ret = ESAPI_GetFunctions(ConnectionHandle, FunctionId, Supported); + + LEAVE_CONN_CS(conn); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLGetInfo(HDBC ConnectionHandle, SQLUSMALLINT InfoType, + PTR InfoValue, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + + ENTER_CONN_CS(conn); + CC_clear_error(conn); + MYLOG(ES_TRACE, "entering\n"); + if ((ret = ESAPI_GetInfo(ConnectionHandle, InfoType, InfoValue, + BufferLength, StringLength)) + == SQL_ERROR) + CC_log_error("SQLGetInfo(30)", "", conn); + LEAVE_CONN_CS(conn); + return ret; +} + +RETCODE SQL_API SQLGetTypeInfo(HSTMT StatementHandle, SQLSMALLINT DataType) { + CSTR func = "SQLGetTypeInfo"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check((StatementClass *)StatementHandle, + __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_GetTypeInfo(StatementHandle, DataType); + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLNumResultCols(HSTMT StatementHandle, + SQLSMALLINT *ColumnCount) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_NumResultCols(StatementHandle, ColumnCount); + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLParamData(HSTMT StatementHandle, PTR *Value) { + UNUSED(Value); + StatementClass *stmt = (StatementClass *)StatementHandle; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Elasticsearch does not support parameters.", "SQLParamData"); + return SQL_ERROR; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLPrepare(HSTMT StatementHandle, SQLCHAR *StatementText, + SQLINTEGER TextLength) { + if(StatementHandle == NULL) + return SQL_ERROR; + + CSTR func = "SQLPrepare"; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + // Enter critical + ENTER_STMT_CS(stmt); + + // Clear error and rollback + SC_clear_error(stmt); + + // Prepare statement if statement is ready + RETCODE ret = SQL_ERROR; + if (!SC_opencheck(stmt, func)) + ret = ESAPI_Prepare(StatementHandle, StatementText, TextLength); + + // Exit critical + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLPutData(HSTMT StatementHandle, PTR Data, + SQLLEN StrLen_or_Ind) { + UNUSED(Data, StrLen_or_Ind); + StatementClass *stmt = (StatementClass *)StatementHandle; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Elasticsearch does not support parameters.", "SQLPutData"); + return SQL_ERROR; +} + +RETCODE SQL_API SQLRowCount(HSTMT StatementHandle, SQLLEN *RowCount) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_RowCount(StatementHandle, RowCount); + LEAVE_STMT_CS(stmt); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLSetCursorName(HSTMT StatementHandle, SQLCHAR *CursorName, + SQLSMALLINT NameLength) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_SetCursorName(StatementHandle, CursorName, NameLength); + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLSetParam(HSTMT StatementHandle, SQLUSMALLINT ParameterNumber, + SQLSMALLINT ValueType, SQLSMALLINT ParameterType, + SQLULEN LengthPrecision, SQLSMALLINT ParameterScale, + PTR ParameterValue, SQLLEN *StrLen_or_Ind) { + UNUSED(ParameterNumber, ValueType, ParameterType, LengthPrecision, + ParameterScale, ParameterValue, StrLen_or_Ind); + StatementClass *stmt = (StatementClass *)StatementHandle; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Elasticsearch does not support parameters.", "SQLSetParam"); + return SQL_ERROR; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLSpecialColumns(HSTMT StatementHandle, + SQLUSMALLINT IdentifierType, + SQLCHAR *CatalogName, SQLSMALLINT NameLength1, + SQLCHAR *SchemaName, SQLSMALLINT NameLength2, + SQLCHAR *TableName, SQLSMALLINT NameLength3, + SQLUSMALLINT Scope, SQLUSMALLINT Nullable) { + CSTR func = "SQLSpecialColumns"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_SpecialColumns(StatementHandle, IdentifierType, ctName, + NameLength1, scName, NameLength2, tbName, + NameLength3, Scope, Nullable); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = + make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_SpecialColumns(StatementHandle, IdentifierType, ctName, + NameLength1, scName, NameLength2, tbName, + NameLength3, Scope, Nullable); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLStatistics(HSTMT StatementHandle, SQLCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLCHAR *SchemaName, + SQLSMALLINT NameLength2, SQLCHAR *TableName, + SQLSMALLINT NameLength3, SQLUSMALLINT Unique, + SQLUSMALLINT Reserved) { + CSTR func = "SQLStatistics"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Statistics(StatementHandle, ctName, NameLength1, scName, + NameLength2, tbName, NameLength3, Unique, + Reserved); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = + make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_Statistics(StatementHandle, ctName, NameLength1, scName, + NameLength2, tbName, NameLength3, Unique, + Reserved); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLTables(HSTMT StatementHandle, SQLCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLCHAR *SchemaName, + SQLSMALLINT NameLength2, SQLCHAR *TableName, + SQLSMALLINT NameLength3, SQLCHAR *TableType, + SQLSMALLINT NameLength4) { + CSTR func = "SQLTables"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Tables(StatementHandle, ctName, NameLength1, scName, + NameLength2, tbName, NameLength3, TableType, + NameLength4, flag); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = + make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_Tables(StatementHandle, ctName, NameLength1, scName, + NameLength2, tbName, NameLength3, TableType, + NameLength4, flag); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLColumnPrivileges( + HSTMT hstmt, SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, + SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLCHAR *szTableName, + SQLSMALLINT cbTableName, SQLCHAR *szColumnName, SQLSMALLINT cbColumnName) { + CSTR func = "SQLColumnPrivileges"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, + *tbName = szTableName, *clName = szColumnName; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_ColumnPrivileges(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, tbName, cbTableName, clName, + cbColumnName, flag); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL, *newCl = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = make_lstring_ifneeded(conn, szTableName, cbTableName, + ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (newCl = make_lstring_ifneeded(conn, szColumnName, cbColumnName, + ifallupper), + NULL != newCl) { + clName = newCl; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_ColumnPrivileges(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, tbName, cbTableName, + clName, cbColumnName, flag); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + if (newCl) + free(newCl); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLDescribeParam(HSTMT hstmt, SQLUSMALLINT ipar, + SQLSMALLINT *pfSqlType, SQLULEN *pcbParamDef, + SQLSMALLINT *pibScale, + SQLSMALLINT *pfNullable) { + UNUSED(ipar, pfSqlType, pcbParamDef, pibScale, pfNullable); + StatementClass *stmt = (StatementClass *)hstmt; + SC_clear_error(stmt); + + // COLNUM_ERROR translates to 'invalid descriptor index' + SC_set_error(stmt, STMT_COLNUM_ERROR, + "Elasticsearch does not support parameters.", "SQLNumParams"); + return SQL_ERROR; +} + +RETCODE SQL_API SQLExtendedFetch(HSTMT hstmt, SQLUSMALLINT fFetchType, + SQLLEN irow, +#if defined(WITH_UNIXODBC) && (SIZEOF_LONG_INT != 8) + SQLROWSETSIZE *pcrow, +#else + SQLULEN *pcrow, +#endif /* WITH_UNIXODBC */ + SQLUSMALLINT *rgfRowStatus) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); +#ifdef WITH_UNIXODBC + { + SQLULEN retrieved; + + ret = ESAPI_ExtendedFetch(hstmt, fFetchType, irow, &retrieved, + rgfRowStatus, 0, + SC_get_ARDF(stmt)->size_of_rowset_odbc2); + if (pcrow) + *pcrow = retrieved; + } +#else + ret = ESAPI_ExtendedFetch(hstmt, fFetchType, irow, pcrow, rgfRowStatus, 0, + SC_get_ARDF(stmt)->size_of_rowset_odbc2); +#endif /* WITH_UNIXODBC */ + stmt->transition_status = STMT_TRANSITION_EXTENDED_FETCH; + LEAVE_STMT_CS(stmt); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLForeignKeys( + HSTMT hstmt, SQLCHAR *szPkCatalogName, SQLSMALLINT cbPkCatalogName, + SQLCHAR *szPkSchemaName, SQLSMALLINT cbPkSchemaName, SQLCHAR *szPkTableName, + SQLSMALLINT cbPkTableName, SQLCHAR *szFkCatalogName, + SQLSMALLINT cbFkCatalogName, SQLCHAR *szFkSchemaName, + SQLSMALLINT cbFkSchemaName, SQLCHAR *szFkTableName, + SQLSMALLINT cbFkTableName) { + CSTR func = "SQLForeignKeys"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLCHAR *pkctName = szPkCatalogName, *pkscName = szPkSchemaName, + *pktbName = szPkTableName, *fkctName = szFkCatalogName, + *fkscName = szFkSchemaName, *fktbName = szFkTableName; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_ForeignKeys(hstmt, pkctName, cbPkCatalogName, pkscName, + cbPkSchemaName, pktbName, cbPkTableName, + fkctName, cbFkCatalogName, fkscName, + cbFkSchemaName, fktbName, cbFkTableName); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newPkct = NULL, *newPksc = NULL, *newPktb = NULL, + *newFkct = NULL, *newFksc = NULL, *newFktb = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newPkct = make_lstring_ifneeded(conn, szPkCatalogName, + cbPkCatalogName, ifallupper), + NULL != newPkct) { + pkctName = newPkct; + reexec = TRUE; + } + if (newPksc = make_lstring_ifneeded(conn, szPkSchemaName, + cbPkSchemaName, ifallupper), + NULL != newPksc) { + pkscName = newPksc; + reexec = TRUE; + } + if (newPktb = make_lstring_ifneeded(conn, szPkTableName, cbPkTableName, + ifallupper), + NULL != newPktb) { + pktbName = newPktb; + reexec = TRUE; + } + if (newFkct = make_lstring_ifneeded(conn, szFkCatalogName, + cbFkCatalogName, ifallupper), + NULL != newFkct) { + fkctName = newFkct; + reexec = TRUE; + } + if (newFksc = make_lstring_ifneeded(conn, szFkSchemaName, + cbFkSchemaName, ifallupper), + NULL != newFksc) { + fkscName = newFksc; + reexec = TRUE; + } + if (newFktb = make_lstring_ifneeded(conn, szFkTableName, cbFkTableName, + ifallupper), + NULL != newFktb) { + fktbName = newFktb; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_ForeignKeys(hstmt, pkctName, cbPkCatalogName, pkscName, + cbPkSchemaName, pktbName, cbPkTableName, + fkctName, cbFkCatalogName, fkscName, + cbFkSchemaName, fktbName, cbFkTableName); + if (newPkct) + free(newPkct); + if (newPksc) + free(newPksc); + if (newPktb) + free(newPktb); + if (newFkct) + free(newFkct); + if (newFksc) + free(newFksc); + if (newFktb) + free(newFktb); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLMoreResults(HSTMT hstmt) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_MoreResults(hstmt); + LEAVE_STMT_CS(stmt); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLNativeSql(HDBC hdbc, SQLCHAR *szSqlStrIn, + SQLINTEGER cbSqlStrIn, SQLCHAR *szSqlStr, + SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + ret = ESAPI_NativeSql(hdbc, szSqlStrIn, cbSqlStrIn, szSqlStr, cbSqlStrMax, + pcbSqlStr); + LEAVE_CONN_CS(conn); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLNumParams(HSTMT hstmt, SQLSMALLINT *pcpar) { + if (pcpar != NULL) + *pcpar = 0; + + StatementClass *stmt = (StatementClass *)hstmt; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Elasticsearch does not support parameters.", "SQLNumParams"); + return SQL_SUCCESS_WITH_INFO; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLPrimaryKeys(HSTMT hstmt, SQLCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, SQLCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, SQLCHAR *szTableName, + SQLSMALLINT cbTableName) { + CSTR func = "SQLPrimaryKeys"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, + *tbName = szTableName; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_PrimaryKeys(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, tbName, cbTableName, 0); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = make_lstring_ifneeded(conn, szTableName, cbTableName, + ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_PrimaryKeys(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, tbName, cbTableName, 0); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLProcedureColumns( + HSTMT hstmt, SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, + SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLCHAR *szProcName, + SQLSMALLINT cbProcName, SQLCHAR *szColumnName, SQLSMALLINT cbColumnName) { + CSTR func = "SQLProcedureColumns"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, + *prName = szProcName, *clName = szColumnName; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_ProcedureColumns(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, prName, cbProcName, clName, + cbColumnName, flag); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newPr = NULL, *newCl = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newPr = + make_lstring_ifneeded(conn, szProcName, cbProcName, ifallupper), + NULL != newPr) { + prName = newPr; + reexec = TRUE; + } + if (newCl = make_lstring_ifneeded(conn, szColumnName, cbColumnName, + ifallupper), + NULL != newCl) { + clName = newCl; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_ProcedureColumns(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, prName, cbProcName, + clName, cbColumnName, flag); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newPr) + free(newPr); + if (newCl) + free(newCl); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLProcedures(HSTMT hstmt, SQLCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, SQLCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, SQLCHAR *szProcName, + SQLSMALLINT cbProcName) { + CSTR func = "SQLProcedures"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, + *prName = szProcName; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Procedures(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, prName, cbProcName, flag); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newPr = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newPr = + make_lstring_ifneeded(conn, szProcName, cbProcName, ifallupper), + NULL != newPr) { + prName = newPr; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_Procedures(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, prName, cbProcName, flag); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newPr) + free(newPr); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLSetPos(HSTMT hstmt, SQLSETPOSIROW irow, SQLUSMALLINT fOption, + SQLUSMALLINT fLock) { + UNUSED(irow, fOption, fLock); + StatementClass *stmt = (StatementClass *)hstmt; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "SQLSetPos is not supported.", "SQLSetPos"); + return SQL_ERROR; +} + +#ifndef UNICODE_SUPPORTXX +RETCODE SQL_API SQLTablePrivileges(HSTMT hstmt, SQLCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + SQLCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, + SQLCHAR *szTableName, + SQLSMALLINT cbTableName) { + CSTR func = "SQLTablePrivileges"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, + *tbName = szTableName; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_TablePrivileges(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, tbName, cbTableName, flag); + if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { + BOOL ifallupper = TRUE, reexec = FALSE; + SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; + ConnectionClass *conn = SC_get_conn(stmt); + + if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, + ifallupper), + NULL != newCt) { + ctName = newCt; + reexec = TRUE; + } + if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, + ifallupper), + NULL != newSc) { + scName = newSc; + reexec = TRUE; + } + if (newTb = make_lstring_ifneeded(conn, szTableName, cbTableName, + ifallupper), + NULL != newTb) { + tbName = newTb; + reexec = TRUE; + } + if (reexec) { + ret = ESAPI_TablePrivileges(hstmt, ctName, cbCatalogName, scName, + cbSchemaName, tbName, cbTableName, 0); + if (newCt) + free(newCt); + if (newSc) + free(newSc); + if (newTb) + free(newTb); + } + } + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +RETCODE SQL_API SQLBindParameter(HSTMT hstmt, SQLUSMALLINT ipar, + SQLSMALLINT fParamType, SQLSMALLINT fCType, + SQLSMALLINT fSqlType, SQLULEN cbColDef, + SQLSMALLINT ibScale, PTR rgbValue, + SQLLEN cbValueMax, SQLLEN *pcbValue) { + UNUSED(ipar, fParamType, fCType, fSqlType, cbColDef, ibScale, rgbValue, + cbValueMax, pcbValue); + StatementClass *stmt = (StatementClass *)hstmt; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Elasticsearch does not support parameters.", + "SQLBindParameter"); + return SQL_ERROR; +} diff --git a/sql-odbc/src/odfesqlodbc/odbcapi30.c b/sql-odbc/src/odfesqlodbc/odbcapi30.c new file mode 100644 index 0000000000..0cde75ea31 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/odbcapi30.c @@ -0,0 +1,605 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include + +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_odbc.h" +#include "misc.h" +#include "statement.h" + +/* SQLAllocConnect/SQLAllocEnv/SQLAllocStmt -> SQLAllocHandle */ +RETCODE SQL_API SQLAllocHandle(SQLSMALLINT HandleType, SQLHANDLE InputHandle, + SQLHANDLE *OutputHandle) { + RETCODE ret; + ConnectionClass *conn; + + MYLOG(ES_TRACE, "entering\n"); + switch (HandleType) { + case SQL_HANDLE_ENV: + ret = ESAPI_AllocEnv(OutputHandle); + break; + case SQL_HANDLE_DBC: + ENTER_ENV_CS((EnvironmentClass *)InputHandle); + ret = ESAPI_AllocConnect(InputHandle, OutputHandle); + LEAVE_ENV_CS((EnvironmentClass *)InputHandle); + break; + case SQL_HANDLE_STMT: + conn = (ConnectionClass *)InputHandle; + ENTER_CONN_CS(conn); + ret = ESAPI_AllocStmt( + InputHandle, OutputHandle, + PODBC_EXTERNAL_STATEMENT | PODBC_INHERIT_CONNECT_OPTIONS); + if (*OutputHandle) + ((StatementClass *)(*OutputHandle))->external = 1; + LEAVE_CONN_CS(conn); + break; + case SQL_HANDLE_DESC: + conn = (ConnectionClass *)InputHandle; + ENTER_CONN_CS(conn); + ret = ESAPI_AllocDesc(InputHandle, OutputHandle); + LEAVE_CONN_CS(conn); + MYLOG(ES_DEBUG, "OutputHandle=%p\n", *OutputHandle); + break; + default: + ret = SQL_ERROR; + break; + } + return ret; +} + +/* SQLBindParameter/SQLSetParam -> SQLBindParam */ +RETCODE SQL_API SQLBindParam(HSTMT StatementHandle, + SQLUSMALLINT ParameterNumber, + SQLSMALLINT ValueType, SQLSMALLINT ParameterType, + SQLULEN LengthPrecision, + SQLSMALLINT ParameterScale, PTR ParameterValue, + SQLLEN *StrLen_or_Ind) { + UNUSED(ParameterNumber, ValueType, ParameterType, LengthPrecision, + ParameterScale, ParameterValue, StrLen_or_Ind); + StatementClass *stmt = (StatementClass *)StatementHandle; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Elasticsearch does not support parameters.", "SQLBindParam"); + return SQL_ERROR; +} + +/* New function */ +RETCODE SQL_API SQLCloseCursor(HSTMT StatementHandle) { + StatementClass *stmt = (StatementClass *)StatementHandle; + if(stmt == NULL) + return SQL_ERROR; + + RETCODE ret; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_FreeStmt(StatementHandle, SQL_CLOSE); + LEAVE_STMT_CS(stmt); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +/* SQLColAttributes -> SQLColAttribute */ +SQLRETURN SQL_API SQLColAttribute(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, + SQLUSMALLINT FieldIdentifier, + SQLPOINTER CharacterAttribute, + SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, +#if defined(_WIN64) || defined(SQLCOLATTRIBUTE_SQLLEN) + SQLLEN *NumericAttribute +#else + SQLPOINTER NumericAttribute +#endif +) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_ColAttributes(StatementHandle, ColumnNumber, FieldIdentifier, + CharacterAttribute, BufferLength, StringLength, + NumericAttribute); + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +/* new function */ +RETCODE SQL_API SQLCopyDesc(SQLHDESC SourceDescHandle, + SQLHDESC TargetDescHandle) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering\n"); + ret = ESAPI_CopyDesc(SourceDescHandle, TargetDescHandle); + return ret; +} + +/* SQLTransact -> SQLEndTran */ +RETCODE SQL_API SQLEndTran(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT CompletionType) { + UNUSED(CompletionType); + if (HandleType == SQL_HANDLE_STMT) { + StatementClass *stmt = (StatementClass *)Handle; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Transactions are not supported.", "SQLEndTran"); + } else if (HandleType == SQL_HANDLE_DBC) { + ConnectionClass *conn = (ConnectionClass *)Handle; + if (conn == NULL) + return SQL_ERROR; + CC_clear_error(conn); + CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, + "Transactions are not supported.", "SQLEndTran"); + } + return SQL_ERROR; +} + +/* SQLExtendedFetch -> SQLFetchScroll */ +RETCODE SQL_API SQLFetchScroll(HSTMT StatementHandle, + SQLSMALLINT FetchOrientation, + SQLLEN FetchOffset) { + CSTR func = "SQLFetchScroll"; + StatementClass *stmt = (StatementClass *)StatementHandle; + RETCODE ret = SQL_SUCCESS; + IRDFields *irdopts = SC_get_IRDF(stmt); + SQLUSMALLINT *rowStatusArray = irdopts->rowStatusArray; + SQLULEN *pcRow = irdopts->rowsFetched; + SQLLEN bkmarkoff = 0; + + MYLOG(ES_TRACE, "entering %d," FORMAT_LEN "\n", FetchOrientation, + FetchOffset); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (FetchOrientation == SQL_FETCH_BOOKMARK) { + if (stmt->options.bookmark_ptr) { + bkmarkoff = FetchOffset; + FetchOffset = *((Int4 *)stmt->options.bookmark_ptr); + MYLOG(ES_DEBUG, + "bookmark=" FORMAT_LEN " FetchOffset = " FORMAT_LEN "\n", + FetchOffset, bkmarkoff); + } else { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Bookmark isn't specifed yet", func); + ret = SQL_ERROR; + } + } + if (SQL_SUCCESS == ret) { + ARDFields *opts = SC_get_ARDF(stmt); + + ret = ESAPI_ExtendedFetch(StatementHandle, FetchOrientation, + FetchOffset, pcRow, rowStatusArray, bkmarkoff, + opts->size_of_rowset); + stmt->transition_status = STMT_TRANSITION_FETCH_SCROLL; + } + LEAVE_STMT_CS(stmt); + if (ret != SQL_SUCCESS) + MYLOG(ES_TRACE, "leaving return = %d\n", ret); + return ret; +} + +/* SQLFree(Connect/Env/Stmt) -> SQLFreeHandle */ +RETCODE SQL_API SQLFreeHandle(SQLSMALLINT HandleType, SQLHANDLE Handle) { + RETCODE ret; + StatementClass *stmt; + ConnectionClass *conn = NULL; + + MYLOG(ES_TRACE, "entering\n"); + + switch (HandleType) { + case SQL_HANDLE_ENV: + ret = ESAPI_FreeEnv(Handle); + break; + case SQL_HANDLE_DBC: + ret = ESAPI_FreeConnect(Handle); + break; + case SQL_HANDLE_STMT: + stmt = (StatementClass *)Handle; + + if (stmt) { + conn = stmt->hdbc; + if (conn) + ENTER_CONN_CS(conn); + } + + ret = ESAPI_FreeStmt(Handle, SQL_DROP); + + if (conn) + LEAVE_CONN_CS(conn); + + break; + case SQL_HANDLE_DESC: + ret = ESAPI_FreeDesc(Handle); + break; + default: + ret = SQL_ERROR; + break; + } + return ret; +} + +#ifndef UNICODE_SUPPORTXX +/* new function */ +RETCODE SQL_API SQLGetDescField(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering\n"); + ret = ESAPI_GetDescField(DescriptorHandle, RecNumber, FieldIdentifier, + Value, BufferLength, StringLength); + return ret; +} + +/* new function */ +RETCODE SQL_API SQLGetDescRec(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, + SQLCHAR *Name, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, SQLSMALLINT *Type, + SQLSMALLINT *SubType, SQLLEN *Length, + SQLSMALLINT *Precision, SQLSMALLINT *Scale, + SQLSMALLINT *Nullable) { + UNUSED(DescriptorHandle, RecNumber, Name, BufferLength, StringLength, Type, + SubType, Length, Precision, Scale, Nullable); + MYLOG(ES_TRACE, "entering\n"); + MYLOG(ES_DEBUG, "Error not implemented\n"); + return SQL_ERROR; +} + +/* new function */ +RETCODE SQL_API SQLGetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, + SQLSMALLINT DiagIdentifier, PTR DiagInfo, + SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering Handle=(%u,%p) Rec=%d Id=%d info=(%p,%d)\n", + HandleType, Handle, RecNumber, DiagIdentifier, DiagInfo, + BufferLength); + ret = ESAPI_GetDiagField(HandleType, Handle, RecNumber, DiagIdentifier, + DiagInfo, BufferLength, StringLength); + return ret; +} + +/* SQLError -> SQLDiagRec */ +RETCODE SQL_API SQLGetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLCHAR *MessageText, + SQLSMALLINT BufferLength, + SQLSMALLINT *TextLength) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering\n"); + ret = ESAPI_GetDiagRec(HandleType, Handle, RecNumber, Sqlstate, NativeError, + MessageText, BufferLength, TextLength); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +/* new function */ +RETCODE SQL_API SQLGetEnvAttr(HENV EnvironmentHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + UNUSED(BufferLength, StringLength); + RETCODE ret; + EnvironmentClass *env = (EnvironmentClass *)EnvironmentHandle; + + MYLOG(ES_TRACE, "entering " FORMAT_INTEGER "\n", Attribute); + ENTER_ENV_CS(env); + ret = SQL_SUCCESS; + switch (Attribute) { + case SQL_ATTR_CONNECTION_POOLING: + *((unsigned int *)Value) = + EN_is_pooling(env) ? SQL_CP_ONE_PER_DRIVER : SQL_CP_OFF; + break; + case SQL_ATTR_CP_MATCH: + *((unsigned int *)Value) = SQL_CP_RELAXED_MATCH; + break; + case SQL_ATTR_ODBC_VERSION: + *((unsigned int *)Value) = + EN_is_odbc2(env) ? SQL_OV_ODBC2 : SQL_OV_ODBC3; + break; + case SQL_ATTR_OUTPUT_NTS: + *((unsigned int *)Value) = SQL_TRUE; + break; + default: + env->errornumber = CONN_INVALID_ARGUMENT_NO; + ret = SQL_ERROR; + } + LEAVE_ENV_CS(env); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +/* SQLGetConnectOption -> SQLGetconnectAttr */ +RETCODE SQL_API SQLGetConnectAttr(HDBC ConnectionHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering " FORMAT_UINTEGER "\n", Attribute); + ENTER_CONN_CS((ConnectionClass *)ConnectionHandle); + CC_clear_error((ConnectionClass *)ConnectionHandle); + ret = ESAPI_GetConnectAttr(ConnectionHandle, Attribute, Value, BufferLength, + StringLength); + LEAVE_CONN_CS((ConnectionClass *)ConnectionHandle); + return ret; +} + +/* SQLGetStmtOption -> SQLGetStmtAttr */ +RETCODE SQL_API SQLGetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER BufferLength, + SQLINTEGER *StringLength) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering Handle=%p " FORMAT_INTEGER "\n", StatementHandle, + Attribute); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_GetStmtAttr(StatementHandle, Attribute, Value, BufferLength, + StringLength); + LEAVE_STMT_CS(stmt); + return ret; +} + +/* SQLSetConnectOption -> SQLSetConnectAttr */ +RETCODE SQL_API SQLSetConnectAttr(HDBC ConnectionHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER StringLength) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + + MYLOG(ES_TRACE, "entering " FORMAT_INTEGER "\n", Attribute); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + ret = + ESAPI_SetConnectAttr(ConnectionHandle, Attribute, Value, StringLength); + LEAVE_CONN_CS(conn); + return ret; +} + +/* new function */ +RETCODE SQL_API SQLSetDescField(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering h=%p rec=%d field=%d val=%p\n", DescriptorHandle, + RecNumber, FieldIdentifier, Value); + ret = ESAPI_SetDescField(DescriptorHandle, RecNumber, FieldIdentifier, + Value, BufferLength); + return ret; +} + +/* new fucntion */ +RETCODE SQL_API SQLSetDescRec(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, + SQLSMALLINT Type, SQLSMALLINT SubType, + SQLLEN Length, SQLSMALLINT Precision, + SQLSMALLINT Scale, PTR Data, SQLLEN *StringLength, + SQLLEN *Indicator) { + UNUSED(DescriptorHandle, RecNumber, Type, SubType, Length, Precision, Scale, + Data, StringLength, Indicator); + MYLOG(ES_TRACE, "entering\n"); + MYLOG(ES_DEBUG, "Error not implemented\n"); + return SQL_ERROR; +} +#endif /* UNICODE_SUPPORTXX */ + +/* new function */ +RETCODE SQL_API SQLSetEnvAttr(HENV EnvironmentHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER StringLength) { + UNUSED(StringLength); + RETCODE ret; + EnvironmentClass *env = (EnvironmentClass *)EnvironmentHandle; + + MYLOG(ES_TRACE, "entering att=" FORMAT_INTEGER "," FORMAT_ULEN "\n", + Attribute, (SQLULEN)Value); + ENTER_ENV_CS(env); + switch (Attribute) { + case SQL_ATTR_CONNECTION_POOLING: + switch ((ULONG_PTR)Value) { + case SQL_CP_OFF: + EN_unset_pooling(env); + ret = SQL_SUCCESS; + break; + case SQL_CP_ONE_PER_DRIVER: + EN_set_pooling(env); + ret = SQL_SUCCESS; + break; + default: + ret = SQL_SUCCESS_WITH_INFO; + } + break; + case SQL_ATTR_CP_MATCH: + /* *((unsigned int *) Value) = SQL_CP_RELAXED_MATCH; */ + ret = SQL_SUCCESS; + break; + case SQL_ATTR_ODBC_VERSION: + if (SQL_OV_ODBC2 == CAST_UPTR(SQLUINTEGER, Value)) + EN_set_odbc2(env); + else + EN_set_odbc3(env); + ret = SQL_SUCCESS; + break; + case SQL_ATTR_OUTPUT_NTS: + if (SQL_TRUE == CAST_UPTR(SQLUINTEGER, Value)) + ret = SQL_SUCCESS; + else + ret = SQL_SUCCESS_WITH_INFO; + break; + default: + env->errornumber = CONN_INVALID_ARGUMENT_NO; + ret = SQL_ERROR; + } + if (SQL_SUCCESS_WITH_INFO == ret) { + env->errornumber = CONN_OPTION_VALUE_CHANGED; + env->errormsg = "SetEnv changed to "; + } + LEAVE_ENV_CS(env); + return ret; +} + +#ifndef UNICODE_SUPPORTXX +/* SQLSet(Param/Scroll/Stmt)Option -> SQLSetStmtAttr */ +RETCODE SQL_API SQLSetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, + PTR Value, SQLINTEGER StringLength) { + StatementClass *stmt = (StatementClass *)StatementHandle; + RETCODE ret; + + MYLOG(ES_TRACE, "entering Handle=%p " FORMAT_INTEGER "," FORMAT_ULEN "\n", + StatementHandle, Attribute, (SQLULEN)Value); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_SetStmtAttr(StatementHandle, Attribute, Value, StringLength); + LEAVE_STMT_CS(stmt); + return ret; +} +#endif /* UNICODE_SUPPORTXX */ + +#define SQL_FUNC_ESET(pfExists, uwAPI) \ + (*(((UWORD *)(pfExists)) + ((uwAPI) >> 4)) |= (1 << ((uwAPI)&0x000F))) +RETCODE SQL_API ESAPI_GetFunctions30(HDBC hdbc, SQLUSMALLINT fFunction, + SQLUSMALLINT FAR *pfExists) { + ConnectionClass *conn = (ConnectionClass *)hdbc; + CC_clear_error(conn); + if (fFunction != SQL_API_ODBC3_ALL_FUNCTIONS) + return SQL_ERROR; + memset(pfExists, 0, sizeof(UWORD) * SQL_API_ODBC3_ALL_FUNCTIONS_SIZE); + + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCCONNECT); 1 deprecated */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCENV); 2 deprecated */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCSTMT); 3 deprecated */ + + /* + * for (i = SQL_API_SQLBINDCOL; i <= 23; i++) SQL_FUNC_ESET(pfExists, + * i); + */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLBINDCOL); /* 4 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLCANCEL); /* 5 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLCOLATTRIBUTE); /* 6 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLCONNECT); /* 7 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLDESCRIBECOL); /* 8 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLDISCONNECT); /* 9 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLERROR); 10 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLEXECDIRECT); /* 11 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLEXECUTE); /* 12 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLFETCH); /* 13 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLFREECONNECT); 14 deprecated */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLFREEENV); 15 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLFREESTMT); /* 16 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETCURSORNAME); /* 17 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLNUMRESULTCOLS); /* 18 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLPREPARE); /* 19 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLROWCOUNT); /* 20 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSETCURSORNAME); /* 21 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETPARAM); 22 deprecated */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLTRANSACT); 23 deprecated */ + + /* + * for (i = 40; i < SQL_API_SQLEXTENDEDFETCH; i++) + * SQL_FUNC_ESET(pfExists, i); + */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLCOLUMNS); /* 40 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLDRIVERCONNECT); /* 41 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLGETCONNECTOPTION); 42 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETDATA); /* 43 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETFUNCTIONS); /* 44 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETINFO); /* 45 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLGETSTMTOPTION); 46 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETTYPEINFO); /* 47 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLPARAMDATA); /* 48 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLPUTDATA); /* 49 */ + + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETCONNECTIONOPTION); 50 deprecated */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETSTMTOPTION); 51 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSPECIALCOLUMNS); /* 52 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSTATISTICS); /* 53 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLTABLES); /* 54 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLDATASOURCES); /* 57 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLDESCRIBEPARAM); /* 58 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLEXTENDEDFETCH); /* 59 deprecated ? */ + + /* + * for (++i; i < SQL_API_SQLBINDPARAMETER; i++) + * SQL_FUNC_ESET(pfExists, i); + */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLFOREIGNKEYS); /* 60 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLMORERESULTS); /* 61 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLNATIVESQL); /* 62 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLNUMPARAMS); /* 63 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLPARAMOPTIONS); 64 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLPRIMARYKEYS); /* 65 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLPROCEDURECOLUMNS); /* 66 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLPROCEDURES); /* 67 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSETPOS); /* 68 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETSCROLLOPTIONS); 69 deprecated */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLTABLEPRIVILEGES); /* 70 */ + /* SQL_FUNC_ESET(pfExists, SQL_API_SQLDRIVERS); */ /* 71 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLBINDPARAMETER); /* 72 */ + + SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCHANDLE); /* 1001 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLBINDPARAM); /* 1002 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLCLOSECURSOR); /* 1003 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLCOPYDESC); /* 1004 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLENDTRAN); /* 1005 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLFREEHANDLE); /* 1006 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETCONNECTATTR); /* 1007 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETDESCFIELD); /* 1008 */ + SQL_FUNC_ESET(pfExists, + SQL_API_SQLGETDIAGFIELD); /* 1010 minimal implementation */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETDIAGREC); /* 1011 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETENVATTR); /* 1012 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLGETSTMTATTR); /* 1014 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSETCONNECTATTR); /* 1016 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSETDESCFIELD); /* 1017 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSETENVATTR); /* 1019 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLSETSTMTATTR); /* 1020 */ + SQL_FUNC_ESET(pfExists, SQL_API_SQLFETCHSCROLL); /* 1021 */ + return SQL_SUCCESS; +} + +RETCODE SQL_API SQLBulkOperations(HSTMT hstmt, SQLSMALLINT operation) { + UNUSED(operation); + StatementClass *stmt = (StatementClass *)hstmt; + if (stmt == NULL) + return SQL_ERROR; + SC_clear_error(stmt); + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Bulk operations are not supported.", "SQLBulkOperations"); + return SQL_ERROR; +} diff --git a/sql-odbc/src/odfesqlodbc/odbcapi30w.c b/sql-odbc/src/odfesqlodbc/odbcapi30w.c new file mode 100644 index 0000000000..b0731eb696 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/odbcapi30w.c @@ -0,0 +1,403 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include +#include "es_odbc.h" +#include "unicode_support.h" + +#include "es_apifunc.h" +#include "es_connection.h" +#include "misc.h" +#include "statement.h" + +RETCODE SQL_API SQLGetStmtAttrW(SQLHSTMT hstmt, SQLINTEGER fAttribute, + PTR rgbValue, SQLINTEGER cbValueMax, + SQLINTEGER *pcbValue) { + UNUSED(hstmt, fAttribute, rgbValue, cbValueMax, pcbValue); + RETCODE ret; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_STMT_CS((StatementClass *)hstmt); + SC_clear_error((StatementClass *)hstmt); + ret = ESAPI_GetStmtAttr(hstmt, fAttribute, rgbValue, cbValueMax, pcbValue); + LEAVE_STMT_CS((StatementClass *)hstmt); + return ret; +} + +RETCODE SQL_API SQLSetStmtAttrW(SQLHSTMT hstmt, SQLINTEGER fAttribute, + PTR rgbValue, SQLINTEGER cbValueMax) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_SetStmtAttr(hstmt, fAttribute, rgbValue, cbValueMax); + LEAVE_STMT_CS(stmt); + return ret; +} + +RETCODE SQL_API SQLGetConnectAttrW(HDBC hdbc, SQLINTEGER fAttribute, + PTR rgbValue, SQLINTEGER cbValueMax, + SQLINTEGER *pcbValue) { + RETCODE ret; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS((ConnectionClass *)hdbc); + CC_clear_error((ConnectionClass *)hdbc); + ret = + ESAPI_GetConnectAttr(hdbc, fAttribute, rgbValue, cbValueMax, pcbValue); + LEAVE_CONN_CS((ConnectionClass *)hdbc); + return ret; +} + +RETCODE SQL_API SQLSetConnectAttrW(HDBC hdbc, SQLINTEGER fAttribute, + PTR rgbValue, SQLINTEGER cbValue) { + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + CC_set_in_unicode_driver(conn); + ret = ESAPI_SetConnectAttr(hdbc, fAttribute, rgbValue, cbValue); + LEAVE_CONN_CS(conn); + return ret; +} + +/* new function */ +RETCODE SQL_API SQLSetDescFieldW(SQLHDESC DescriptorHandle, + SQLSMALLINT RecNumber, + SQLSMALLINT FieldIdentifier, PTR Value, + SQLINTEGER BufferLength) { + RETCODE ret; + SQLLEN vallen; + char *uval = NULL; + BOOL val_alloced = FALSE; + + MYLOG(ES_TRACE, "entering\n"); + if (BufferLength > 0 || SQL_NTS == BufferLength) { + switch (FieldIdentifier) { + case SQL_DESC_BASE_COLUMN_NAME: + case SQL_DESC_BASE_TABLE_NAME: + case SQL_DESC_CATALOG_NAME: + case SQL_DESC_LABEL: + case SQL_DESC_LITERAL_PREFIX: + case SQL_DESC_LITERAL_SUFFIX: + case SQL_DESC_LOCAL_TYPE_NAME: + case SQL_DESC_NAME: + case SQL_DESC_SCHEMA_NAME: + case SQL_DESC_TABLE_NAME: + case SQL_DESC_TYPE_NAME: + uval = ucs2_to_utf8( + Value, + BufferLength > 0 ? BufferLength / WCLEN : BufferLength, + &vallen, FALSE); + val_alloced = TRUE; + break; + default: + vallen = BufferLength; + uval = Value; + break; + } + } else { + vallen = BufferLength; + uval = Value; + } + ret = ESAPI_SetDescField(DescriptorHandle, RecNumber, FieldIdentifier, uval, + (SQLINTEGER)vallen); + if (val_alloced) + free(uval); + return ret; +} + +RETCODE SQL_API SQLGetDescFieldW(SQLHDESC hdesc, SQLSMALLINT iRecord, + SQLSMALLINT iField, PTR rgbValue, + SQLINTEGER cbValueMax, SQLINTEGER *pcbValue) { + RETCODE ret; + SQLINTEGER blen = 0, bMax, *pcbV; + char *rgbV = NULL, *rgbVt; + + MYLOG(ES_TRACE, "entering\n"); + switch (iField) { + case SQL_DESC_BASE_COLUMN_NAME: + case SQL_DESC_BASE_TABLE_NAME: + case SQL_DESC_CATALOG_NAME: + case SQL_DESC_LABEL: + case SQL_DESC_LITERAL_PREFIX: + case SQL_DESC_LITERAL_SUFFIX: + case SQL_DESC_LOCAL_TYPE_NAME: + case SQL_DESC_NAME: + case SQL_DESC_SCHEMA_NAME: + case SQL_DESC_TABLE_NAME: + case SQL_DESC_TYPE_NAME: + bMax = cbValueMax * 3 / WCLEN; + rgbV = malloc(bMax + 1); + pcbV = &blen; + for (rgbVt = rgbV;; bMax = blen + 1, rgbVt = realloc(rgbV, bMax)) { + if (!rgbVt) { + ret = SQL_ERROR; + break; + } + rgbV = rgbVt; + ret = ESAPI_GetDescField(hdesc, iRecord, iField, rgbV, bMax, + pcbV); + if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) + break; + } + if (SQL_SUCCEEDED(ret)) { + blen = (SQLINTEGER)utf8_to_ucs2( + rgbV, blen, (SQLWCHAR *)rgbValue, cbValueMax / WCLEN); + if (SQL_SUCCESS == ret + && blen * WCLEN >= (unsigned long)cbValueMax) { + ret = SQL_SUCCESS_WITH_INFO; + DC_set_error(hdesc, STMT_TRUNCATED, + "The buffer was too small for the rgbDesc."); + } + if (pcbValue) + *pcbValue = blen * WCLEN; + } + if (rgbV) + free(rgbV); + break; + default: + rgbV = rgbValue; + bMax = cbValueMax; + pcbV = pcbValue; + ret = ESAPI_GetDescField(hdesc, iRecord, iField, rgbV, bMax, pcbV); + break; + } + + return ret; +} + +RETCODE SQL_API SQLGetDiagRecW(SQLSMALLINT fHandleType, SQLHANDLE handle, + SQLSMALLINT iRecord, SQLWCHAR *szSqlState, + SQLINTEGER *pfNativeError, SQLWCHAR *szErrorMsg, + SQLSMALLINT cbErrorMsgMax, + SQLSMALLINT *pcbErrorMsg) { + RETCODE ret; + SQLSMALLINT buflen, tlen; + char qstr_ansi[8], *mtxt = NULL; + + MYLOG(ES_TRACE, "entering\n"); + buflen = 0; + if (szErrorMsg && cbErrorMsgMax > 0) { + buflen = cbErrorMsgMax; + mtxt = malloc(buflen); + } + ret = ESAPI_GetDiagRec(fHandleType, handle, iRecord, (SQLCHAR *)qstr_ansi, + pfNativeError, (SQLCHAR *)mtxt, buflen, &tlen); + if (SQL_SUCCEEDED(ret)) { + if (szSqlState) + utf8_to_ucs2(qstr_ansi, -1, szSqlState, 6); + if (mtxt && tlen <= cbErrorMsgMax) { + SQLULEN ulen = utf8_to_ucs2_lf(mtxt, tlen, FALSE, szErrorMsg, + cbErrorMsgMax, TRUE); + if (ulen == (SQLULEN)-1) + tlen = (SQLSMALLINT)locale_to_sqlwchar( + (SQLWCHAR *)szErrorMsg, mtxt, cbErrorMsgMax, FALSE); + else + tlen = (SQLSMALLINT)ulen; + if (tlen >= cbErrorMsgMax) + ret = SQL_SUCCESS_WITH_INFO; + else if (tlen < 0) { + char errc[32]; + + SPRINTF_FIXED(errc, "Error: SqlState=%s", qstr_ansi); + tlen = (SQLSMALLINT)utf8_to_ucs2(errc, -1, szErrorMsg, + cbErrorMsgMax); + } + } + if (pcbErrorMsg) + *pcbErrorMsg = tlen; + } + if (mtxt) + free(mtxt); + return ret; +} + +SQLRETURN SQL_API SQLColAttributeW(SQLHSTMT hstmt, SQLUSMALLINT iCol, + SQLUSMALLINT iField, SQLPOINTER pCharAttr, + SQLSMALLINT cbCharAttrMax, + SQLSMALLINT *pcbCharAttr, +#if defined(_WIN64) || defined(SQLCOLATTRIBUTE_SQLLEN) + SQLLEN *pNumAttr +#else + SQLPOINTER pNumAttr +#endif +) { + CSTR func = "SQLColAttributeW"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)hstmt; + SQLSMALLINT *rgbL, blen = 0, bMax; + char *rgbD = NULL, *rgbDt; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + switch (iField) { + case SQL_DESC_BASE_COLUMN_NAME: + case SQL_DESC_BASE_TABLE_NAME: + case SQL_DESC_CATALOG_NAME: + case SQL_DESC_LABEL: + case SQL_DESC_LITERAL_PREFIX: + case SQL_DESC_LITERAL_SUFFIX: + case SQL_DESC_LOCAL_TYPE_NAME: + case SQL_DESC_NAME: + case SQL_DESC_SCHEMA_NAME: + case SQL_DESC_TABLE_NAME: + case SQL_DESC_TYPE_NAME: + case SQL_COLUMN_NAME: + bMax = cbCharAttrMax * 3 / WCLEN; + rgbD = malloc(bMax); + rgbL = &blen; + for (rgbDt = rgbD;; bMax = blen + 1, rgbDt = realloc(rgbD, bMax)) { + if (!rgbDt) { + ret = SQL_ERROR; + break; + } + rgbD = rgbDt; + ret = ESAPI_ColAttributes(hstmt, iCol, iField, rgbD, bMax, rgbL, + pNumAttr); + if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) + break; + } + if (SQL_SUCCEEDED(ret)) { + blen = (SQLSMALLINT)utf8_to_ucs2( + rgbD, blen, (SQLWCHAR *)pCharAttr, cbCharAttrMax / WCLEN); + if (SQL_SUCCESS == ret + && blen * WCLEN >= (unsigned long)cbCharAttrMax) { + ret = SQL_SUCCESS_WITH_INFO; + SC_set_error(stmt, STMT_TRUNCATED, + "The buffer was too small for the pCharAttr.", + func); + } + if (pcbCharAttr) + *pcbCharAttr = blen * WCLEN; + } + if (rgbD) + free(rgbD); + break; + default: + rgbD = pCharAttr; + bMax = cbCharAttrMax; + rgbL = pcbCharAttr; + ret = ESAPI_ColAttributes(hstmt, iCol, iField, rgbD, bMax, rgbL, + pNumAttr); + break; + } + LEAVE_STMT_CS(stmt); + + return ret; +} + +RETCODE SQL_API SQLGetDiagFieldW(SQLSMALLINT fHandleType, SQLHANDLE handle, + SQLSMALLINT iRecord, SQLSMALLINT fDiagField, + SQLPOINTER rgbDiagInfo, + SQLSMALLINT cbDiagInfoMax, + SQLSMALLINT *pcbDiagInfo) { + RETCODE ret; + SQLSMALLINT *rgbL, blen = 0, bMax; + char *rgbD = NULL, *rgbDt; + + MYLOG(ES_TRACE, "entering Handle=(%u,%p) Rec=%d Id=%d info=(%p,%d)\n", fHandleType, + handle, iRecord, fDiagField, rgbDiagInfo, cbDiagInfoMax); + switch (fDiagField) { + case SQL_DIAG_DYNAMIC_FUNCTION: + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + case SQL_DIAG_MESSAGE_TEXT: + case SQL_DIAG_SERVER_NAME: + case SQL_DIAG_SQLSTATE: + case SQL_DIAG_SUBCLASS_ORIGIN: + bMax = cbDiagInfoMax * 3 / WCLEN + 1; + if (rgbD = malloc(bMax), !rgbD) + return SQL_ERROR; + rgbL = &blen; + for (rgbDt = rgbD;; bMax = blen + 1, rgbDt = realloc(rgbD, bMax)) { + if (!rgbDt) { + free(rgbD); + return SQL_ERROR; + } + rgbD = rgbDt; + ret = ESAPI_GetDiagField(fHandleType, handle, iRecord, + fDiagField, rgbD, bMax, rgbL); + if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) + break; + } + if (SQL_SUCCEEDED(ret)) { + SQLULEN ulen = (SQLSMALLINT)utf8_to_ucs2_lf( + rgbD, blen, FALSE, (SQLWCHAR *)rgbDiagInfo, + cbDiagInfoMax / WCLEN, TRUE); + if (ulen == (SQLULEN)-1) + blen = (SQLSMALLINT)locale_to_sqlwchar( + (SQLWCHAR *)rgbDiagInfo, rgbD, cbDiagInfoMax / WCLEN, + FALSE); + else + blen = (SQLSMALLINT)ulen; + if (SQL_SUCCESS == ret + && blen * WCLEN >= (unsigned long)cbDiagInfoMax) + ret = SQL_SUCCESS_WITH_INFO; + if (pcbDiagInfo) { + *pcbDiagInfo = blen * WCLEN; + } + } + if (rgbD) + free(rgbD); + break; + default: + rgbD = rgbDiagInfo; + bMax = cbDiagInfoMax; + rgbL = pcbDiagInfo; + ret = ESAPI_GetDiagField(fHandleType, handle, iRecord, fDiagField, + rgbD, bMax, rgbL); + break; + } + + return ret; +} + +/* new function */ +RETCODE SQL_API SQLGetDescRecW(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, + SQLWCHAR *Name, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, SQLSMALLINT *Type, + SQLSMALLINT *SubType, SQLLEN *Length, + SQLSMALLINT *Precision, SQLSMALLINT *Scale, + SQLSMALLINT *Nullable) { + UNUSED(DescriptorHandle, RecNumber, Name, BufferLength, StringLength, Type, + SubType, Length, Precision, Scale, Nullable); + MYLOG(ES_TRACE, "entering\n"); + MYLOG(ES_DEBUG, "Error not implemented\n"); + return SQL_ERROR; +} + +/* new fucntion */ +RETCODE SQL_API SQLSetDescRecW(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, + SQLSMALLINT Type, SQLSMALLINT SubType, + SQLLEN Length, SQLSMALLINT Precision, + SQLSMALLINT Scale, PTR Data, + SQLLEN *StringLength, SQLLEN *Indicator) { + UNUSED(DescriptorHandle, RecNumber, Type, SubType, Length, Precision, Scale, + Data, StringLength, Indicator); + MYLOG(ES_TRACE, "entering\n"); + MYLOG(ES_DEBUG, "Error not implemented\n"); + return SQL_ERROR; +} diff --git a/sql-odbc/src/odfesqlodbc/odbcapiw.c b/sql-odbc/src/odfesqlodbc/odbcapiw.c new file mode 100644 index 0000000000..5a7a8ab92d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/odbcapiw.c @@ -0,0 +1,899 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include + +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_driver_connect.h" +#include "es_info.h" +#include "es_odbc.h" +#include "statement.h" +#include "unicode_support.h" + +RETCODE SQL_API SQLColumnsW(HSTMT StatementHandle, SQLWCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, + SQLSMALLINT NameLength2, SQLWCHAR *TableName, + SQLSMALLINT NameLength3, SQLWCHAR *ColumnName, + SQLSMALLINT NameLength4) { + CSTR func = "SQLColumnsW"; + RETCODE ret; + char *ctName, *scName, *tbName, *clName; + SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; + StatementClass *stmt = (StatementClass *)StatementHandle; + ConnectionClass *conn; + BOOL lower_id; + UWORD flag = PODBC_SEARCH_PUBLIC_SCHEMA; + ConnInfo *ci; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + ci = &(conn->connInfo); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); + scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); + tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); + clName = ucs2_to_utf8(ColumnName, NameLength4, &nmlen4, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Columns(StatementHandle, (SQLCHAR *)ctName, + (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, + (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, + (SQLSMALLINT)nmlen3, (SQLCHAR *)clName, + (SQLSMALLINT)nmlen4, flag, 0, 0); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + if (clName) + free(clName); + return ret; +} + +RETCODE SQL_API SQLConnectW(HDBC ConnectionHandle, SQLWCHAR *ServerName, + SQLSMALLINT NameLength1, SQLWCHAR *UserName, + SQLSMALLINT NameLength2, SQLWCHAR *Authentication, + SQLSMALLINT NameLength3) { + char *svName, *usName, *auth; + SQLLEN nmlen1, nmlen2, nmlen3; + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + CC_set_in_unicode_driver(conn); + svName = ucs2_to_utf8(ServerName, NameLength1, &nmlen1, FALSE); + usName = ucs2_to_utf8(UserName, NameLength2, &nmlen2, FALSE); + auth = ucs2_to_utf8(Authentication, NameLength3, &nmlen3, FALSE); + ret = + ESAPI_Connect(ConnectionHandle, (SQLCHAR *)svName, (SQLSMALLINT)nmlen1, + (SQLCHAR *)usName, (SQLSMALLINT)nmlen2, (SQLCHAR *)auth, + (SQLSMALLINT)nmlen3); + LEAVE_CONN_CS(conn); + if (svName) + free(svName); + if (usName) + free(usName); + if (auth) + free(auth); + return ret; +} + +RETCODE SQL_API SQLDriverConnectW(HDBC hdbc, HWND hwnd, SQLWCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLWCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut, + SQLUSMALLINT fDriverCompletion) { + CSTR func = "SQLDriverConnectW"; + char *szIn, *szOut = NULL; + SQLSMALLINT maxlen, obuflen = 0; + SQLLEN inlen; + SQLSMALLINT olen, *pCSO; + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + CC_set_in_unicode_driver(conn); + szIn = ucs2_to_utf8(szConnStrIn, cbConnStrIn, &inlen, FALSE); + maxlen = cbConnStrOutMax; + pCSO = NULL; + olen = 0; + if (maxlen > 0) { + obuflen = maxlen + 1; + szOut = malloc(obuflen); + if (!szOut) { + CC_set_error(conn, CONN_NO_MEMORY_ERROR, + "Could not allocate memory for output buffer", func); + ret = SQL_ERROR; + goto cleanup; + } + pCSO = &olen; + } else if (pcbConnStrOut) + pCSO = &olen; + ret = + ESAPI_DriverConnect(hdbc, hwnd, (SQLCHAR *)szIn, (SQLSMALLINT)inlen, + (SQLCHAR *)szOut, maxlen, pCSO, fDriverCompletion); + if (ret != SQL_ERROR && NULL != pCSO) { + SQLLEN outlen = olen; + + if (olen < obuflen) + outlen = utf8_to_ucs2(szOut, olen, szConnStrOut, cbConnStrOutMax); + else + utf8_to_ucs2(szOut, maxlen, szConnStrOut, cbConnStrOutMax); + if (outlen >= cbConnStrOutMax && NULL != szConnStrOut + && NULL != pcbConnStrOut) { + MYLOG(ES_ALL, "cbConnstrOutMax=%d pcb=%p\n", + cbConnStrOutMax, pcbConnStrOut); + if (SQL_SUCCESS == ret) { + CC_set_error(conn, CONN_TRUNCATED, + "the ConnStrOut is too small", func); + ret = SQL_SUCCESS_WITH_INFO; + } + } + if (pcbConnStrOut) + *pcbConnStrOut = (SQLSMALLINT)outlen; + } +cleanup: + LEAVE_CONN_CS(conn); + if (szOut) + free(szOut); + if (szIn) + free(szIn); + return ret; +} +RETCODE SQL_API SQLBrowseConnectW(HDBC hdbc, SQLWCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLWCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut) { + CSTR func = "SQLBrowseConnectW"; + char *szIn, *szOut; + SQLLEN inlen; + SQLUSMALLINT obuflen; + SQLSMALLINT olen = 0; + RETCODE ret; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + CC_set_in_unicode_driver(conn); + szIn = ucs2_to_utf8(szConnStrIn, cbConnStrIn, &inlen, FALSE); + obuflen = cbConnStrOutMax + 1; + szOut = malloc(obuflen); + if (szOut) + ret = ESAPI_BrowseConnect(hdbc, (SQLCHAR *)szIn, (SQLSMALLINT)inlen, + (SQLCHAR *)szOut, cbConnStrOutMax, &olen); + else { + CC_set_error(conn, CONN_NO_MEMORY_ERROR, + "Could not allocate memory for output buffer", func); + ret = SQL_ERROR; + } + LEAVE_CONN_CS(conn); + if (ret != SQL_ERROR) { + SQLLEN outlen = + utf8_to_ucs2(szOut, olen, szConnStrOut, cbConnStrOutMax); + if (pcbConnStrOut) + *pcbConnStrOut = (SQLSMALLINT)outlen; + } + free(szOut); + if (szIn) + free(szIn); + return ret; +} + +RETCODE SQL_API SQLDataSourcesW(HENV EnvironmentHandle, SQLUSMALLINT Direction, + SQLWCHAR *ServerName, SQLSMALLINT BufferLength1, + SQLSMALLINT *NameLength1, SQLWCHAR *Description, + SQLSMALLINT BufferLength2, + SQLSMALLINT *NameLength2) { + UNUSED(EnvironmentHandle, Direction, ServerName, BufferLength1, NameLength1, + Description, BufferLength2, NameLength2); + MYLOG(ES_TRACE, "entering\n"); + return SQL_ERROR; +} + +RETCODE SQL_API SQLDescribeColW(HSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLWCHAR *ColumnName, + SQLSMALLINT BufferLength, + SQLSMALLINT *NameLength, SQLSMALLINT *DataType, + SQLULEN *ColumnSize, SQLSMALLINT *DecimalDigits, + SQLSMALLINT *Nullable) { + CSTR func = "SQLDescribeColW"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + SQLSMALLINT buflen, nmlen = 0; + char *clName = NULL, *clNamet = NULL; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + buflen = 0; + if (BufferLength > 0) + buflen = BufferLength * 3; + else if (NameLength) + buflen = 32; + if (buflen > 0) + clNamet = malloc(buflen); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + for (;; buflen = nmlen + 1, clNamet = realloc(clName, buflen)) { + if (!clNamet) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Could not allocate memory for column name", func); + ret = SQL_ERROR; + break; + } + clName = clNamet; + ret = ESAPI_DescribeCol(StatementHandle, ColumnNumber, + (SQLCHAR *)clName, buflen, &nmlen, DataType, + ColumnSize, DecimalDigits, Nullable); + if (SQL_SUCCESS_WITH_INFO != ret || nmlen < buflen) + break; + } + if (SQL_SUCCEEDED(ret)) { + SQLLEN nmcount = nmlen; + + if (nmlen < buflen) + nmcount = utf8_to_ucs2(clName, nmlen, ColumnName, BufferLength); + if (SQL_SUCCESS == ret && BufferLength > 0 && nmcount > BufferLength) { + ret = SQL_SUCCESS_WITH_INFO; + SC_set_error(stmt, STMT_TRUNCATED, "Column name too large", func); + } + if (NameLength) + *NameLength = (SQLSMALLINT)nmcount; + } + LEAVE_STMT_CS(stmt); + if (clName) + free(clName); + return ret; +} + +RETCODE SQL_API SQLExecDirectW(HSTMT StatementHandle, SQLWCHAR *StatementText, + SQLINTEGER TextLength) { + if(StatementHandle == NULL) + return SQL_ERROR; + + StatementClass *stmt = (StatementClass *)StatementHandle; + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + // Get query string + SQLLEN slen = 0; + char *stxt = ucs2_to_utf8(StatementText, TextLength, &slen, FALSE); + + // Enter critical + ENTER_STMT_CS(stmt); + + // Clear error and rollback + SC_clear_error(stmt); + + // Execute statement if statement is ready + RETCODE ret = SQL_ERROR; + if (!SC_opencheck(stmt, "SQLExecDirectW")) + ret = ESAPI_ExecDirect(StatementHandle, (const SQLCHAR *)stxt, (SQLINTEGER)slen, 1); + + // Exit critical + LEAVE_STMT_CS(stmt); + + if (stxt) + free(stxt); + return ret; +} + +RETCODE SQL_API SQLGetCursorNameW(HSTMT StatementHandle, SQLWCHAR *CursorName, + SQLSMALLINT BufferLength, + SQLSMALLINT *NameLength) { + CSTR func = "SQLGetCursorNameW"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + char *crName = NULL, *crNamet; + SQLSMALLINT clen = 0, buflen; + + MYLOG(ES_TRACE, "entering\n"); + if (BufferLength > 0) + buflen = BufferLength * 3; + else + buflen = 32; + crNamet = malloc(buflen); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + for (;; buflen = clen + 1, crNamet = realloc(crName, buflen)) { + if (!crNamet) { + SC_set_error(stmt, STMT_NO_MEMORY_ERROR, + "Could not allocate memory for cursor name", func); + ret = SQL_ERROR; + break; + } + crName = crNamet; + ret = ESAPI_GetCursorName(StatementHandle, (SQLCHAR *)crName, buflen, + &clen); + if (SQL_SUCCESS_WITH_INFO != ret || clen < buflen) + break; + } + if (SQL_SUCCEEDED(ret)) { + SQLLEN nmcount = clen; + + if (clen < buflen) + nmcount = utf8_to_ucs2(crName, clen, CursorName, BufferLength); + if (SQL_SUCCESS == ret && nmcount > BufferLength) { + ret = SQL_SUCCESS_WITH_INFO; + SC_set_error(stmt, STMT_TRUNCATED, "Cursor name too large", func); + } + if (NameLength) + *NameLength = (SQLSMALLINT)nmcount; + } + LEAVE_STMT_CS(stmt); + free(crName); + return ret; +} + +RETCODE SQL_API SQLGetInfoW(HDBC ConnectionHandle, SQLUSMALLINT InfoType, + PTR InfoValue, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength) { + ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; + RETCODE ret; + + ENTER_CONN_CS(conn); + CC_set_in_unicode_driver(conn); + CC_clear_error(conn); + MYLOG(ES_TRACE, "entering\n"); + if ((ret = ESAPI_GetInfo(ConnectionHandle, InfoType, InfoValue, + BufferLength, StringLength)) + == SQL_ERROR) + CC_log_error("SQLGetInfoW", "", conn); + LEAVE_CONN_CS(conn); + return ret; +} + +RETCODE SQL_API SQLPrepareW(HSTMT StatementHandle, SQLWCHAR *StatementText, + SQLINTEGER TextLength) { + if(StatementHandle == NULL) + return SQL_ERROR; + + CSTR func = "SQLPrepareW"; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + SQLLEN slen; + char *stxt = ucs2_to_utf8(StatementText, TextLength, &slen, FALSE); + + // Enter critical + ENTER_STMT_CS(stmt); + + // Clear error and rollback + SC_clear_error(stmt); + + // Prepare statement if statement is ready + RETCODE ret = SQL_ERROR; + if (!SC_opencheck(stmt, func)) + ret = ESAPI_Prepare(StatementHandle, (const SQLCHAR *)stxt, (SQLINTEGER)slen); + + // Exit critical + LEAVE_STMT_CS(stmt); + + // Release memory + if (stxt) + free(stxt); + return ret; +} + +RETCODE SQL_API SQLSetCursorNameW(HSTMT StatementHandle, SQLWCHAR *CursorName, + SQLSMALLINT NameLength) { + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + char *crName; + SQLLEN nlen; + + MYLOG(ES_TRACE, "entering\n"); + crName = ucs2_to_utf8(CursorName, NameLength, &nlen, FALSE); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + ret = ESAPI_SetCursorName(StatementHandle, (SQLCHAR *)crName, + (SQLSMALLINT)nlen); + LEAVE_STMT_CS(stmt); + if (crName) + free(crName); + return ret; +} + +RETCODE SQL_API SQLSpecialColumnsW( + HSTMT StatementHandle, SQLUSMALLINT IdentifierType, SQLWCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, SQLSMALLINT NameLength2, + SQLWCHAR *TableName, SQLSMALLINT NameLength3, SQLUSMALLINT Scope, + SQLUSMALLINT Nullable) { + CSTR func = "SQLSpecialColumnsW"; + RETCODE ret; + char *ctName, *scName, *tbName; + SQLLEN nmlen1, nmlen2, nmlen3; + StatementClass *stmt = (StatementClass *)StatementHandle; + ConnectionClass *conn; + BOOL lower_id; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); + scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); + tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_SpecialColumns( + StatementHandle, IdentifierType, (SQLCHAR *)ctName, + (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, + (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, Scope, Nullable); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + return ret; +} + +RETCODE SQL_API SQLStatisticsW(HSTMT StatementHandle, SQLWCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, + SQLSMALLINT NameLength2, SQLWCHAR *TableName, + SQLSMALLINT NameLength3, SQLUSMALLINT Unique, + SQLUSMALLINT Reserved) { + CSTR func = "SQLStatisticsW"; + RETCODE ret; + char *ctName, *scName, *tbName; + SQLLEN nmlen1, nmlen2, nmlen3; + StatementClass *stmt = (StatementClass *)StatementHandle; + ConnectionClass *conn; + BOOL lower_id; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); + scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); + tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Statistics(StatementHandle, (SQLCHAR *)ctName, + (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, + (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, + (SQLSMALLINT)nmlen3, Unique, Reserved); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + return ret; +} + +RETCODE SQL_API SQLTablesW(HSTMT StatementHandle, SQLWCHAR *CatalogName, + SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, + SQLSMALLINT NameLength2, SQLWCHAR *TableName, + SQLSMALLINT NameLength3, SQLWCHAR *TableType, + SQLSMALLINT NameLength4) { + CSTR func = "SQLTablesW"; + RETCODE ret; + char *ctName, *scName, *tbName, *tbType; + SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; + StatementClass *stmt = (StatementClass *)StatementHandle; + ConnectionClass *conn; + BOOL lower_id; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); + scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); + tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); + tbType = ucs2_to_utf8(TableType, NameLength4, &nmlen4, FALSE); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Tables( + StatementHandle, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, + (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, + (SQLSMALLINT)nmlen3, (SQLCHAR *)tbType, (SQLSMALLINT)nmlen4, flag); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + if (tbType) + free(tbType); + return ret; +} + +RETCODE SQL_API SQLColumnPrivilegesW( + HSTMT hstmt, SQLWCHAR *szCatalogName, SQLSMALLINT cbCatalogName, + SQLWCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLWCHAR *szTableName, + SQLSMALLINT cbTableName, SQLWCHAR *szColumnName, SQLSMALLINT cbColumnName) { + CSTR func = "SQLColumnPrivilegesW"; + RETCODE ret; + char *ctName, *scName, *tbName, *clName; + SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + BOOL lower_id; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); + scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); + tbName = ucs2_to_utf8(szTableName, cbTableName, &nmlen3, lower_id); + clName = ucs2_to_utf8(szColumnName, cbColumnName, &nmlen4, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_ColumnPrivileges( + hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, + (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, + (SQLCHAR *)clName, (SQLSMALLINT)nmlen4, flag); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + if (clName) + free(clName); + return ret; +} + +RETCODE SQL_API SQLForeignKeysW( + HSTMT hstmt, SQLWCHAR *szPkCatalogName, SQLSMALLINT cbPkCatalogName, + SQLWCHAR *szPkSchemaName, SQLSMALLINT cbPkSchemaName, + SQLWCHAR *szPkTableName, SQLSMALLINT cbPkTableName, + SQLWCHAR *szFkCatalogName, SQLSMALLINT cbFkCatalogName, + SQLWCHAR *szFkSchemaName, SQLSMALLINT cbFkSchemaName, + SQLWCHAR *szFkTableName, SQLSMALLINT cbFkTableName) { + CSTR func = "SQLForeignKeysW"; + RETCODE ret; + char *ctName, *scName, *tbName, *fkctName, *fkscName, *fktbName; + SQLLEN nmlen1, nmlen2, nmlen3, nmlen4, nmlen5, nmlen6; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + BOOL lower_id; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(szPkCatalogName, cbPkCatalogName, &nmlen1, lower_id); + scName = ucs2_to_utf8(szPkSchemaName, cbPkSchemaName, &nmlen2, lower_id); + tbName = ucs2_to_utf8(szPkTableName, cbPkTableName, &nmlen3, lower_id); + fkctName = + ucs2_to_utf8(szFkCatalogName, cbFkCatalogName, &nmlen4, lower_id); + fkscName = ucs2_to_utf8(szFkSchemaName, cbFkSchemaName, &nmlen5, lower_id); + fktbName = ucs2_to_utf8(szFkTableName, cbFkTableName, &nmlen6, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_ForeignKeys( + hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, + (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, + (SQLCHAR *)fkctName, (SQLSMALLINT)nmlen4, (SQLCHAR *)fkscName, + (SQLSMALLINT)nmlen5, (SQLCHAR *)fktbName, (SQLSMALLINT)nmlen6); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + if (fkctName) + free(fkctName); + if (fkscName) + free(fkscName); + if (fktbName) + free(fktbName); + return ret; +} + +RETCODE SQL_API SQLNativeSqlW(HDBC hdbc, SQLWCHAR *szSqlStrIn, + SQLINTEGER cbSqlStrIn, SQLWCHAR *szSqlStr, + SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr) { + CSTR func = "SQLNativeSqlW"; + RETCODE ret; + char *szIn, *szOut = NULL, *szOutt = NULL; + SQLLEN slen; + SQLINTEGER buflen, olen = 0; + ConnectionClass *conn = (ConnectionClass *)hdbc; + + MYLOG(ES_TRACE, "entering\n"); + ENTER_CONN_CS(conn); + CC_clear_error(conn); + CC_set_in_unicode_driver(conn); + szIn = ucs2_to_utf8(szSqlStrIn, cbSqlStrIn, &slen, FALSE); + buflen = 3 * cbSqlStrMax; + if (buflen > 0) + szOutt = malloc(buflen); + for (;; buflen = olen + 1, szOutt = realloc(szOut, buflen)) { + if (!szOutt) { + CC_set_error(conn, CONN_NO_MEMORY_ERROR, + "Could not allocate memory for output buffer", func); + ret = SQL_ERROR; + break; + } + szOut = szOutt; + ret = ESAPI_NativeSql(hdbc, (SQLCHAR *)szIn, (SQLINTEGER)slen, + (SQLCHAR *)szOut, buflen, &olen); + if (SQL_SUCCESS_WITH_INFO != ret || olen < buflen) + break; + } + if (szIn) + free(szIn); + if (SQL_SUCCEEDED(ret)) { + SQLLEN szcount = olen; + + if (olen < buflen) + szcount = utf8_to_ucs2(szOut, olen, szSqlStr, cbSqlStrMax); + if (SQL_SUCCESS == ret && szcount > cbSqlStrMax) { + ret = SQL_SUCCESS_WITH_INFO; + CC_set_error(conn, CONN_TRUNCATED, "Sql string too large", func); + } + if (pcbSqlStr) + *pcbSqlStr = (SQLINTEGER)szcount; + } + LEAVE_CONN_CS(conn); + free(szOut); + return ret; +} + +RETCODE SQL_API SQLPrimaryKeysW(HSTMT hstmt, SQLWCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + SQLWCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, SQLWCHAR *szTableName, + SQLSMALLINT cbTableName) { + CSTR func = "SQLPrimaryKeysW"; + RETCODE ret; + char *ctName, *scName, *tbName; + SQLLEN nmlen1, nmlen2, nmlen3; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + BOOL lower_id; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); + scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); + tbName = ucs2_to_utf8(szTableName, cbTableName, &nmlen3, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_PrimaryKeys(hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, + (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, + (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, 0); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + return ret; +} + +RETCODE SQL_API SQLProcedureColumnsW( + HSTMT hstmt, SQLWCHAR *szCatalogName, SQLSMALLINT cbCatalogName, + SQLWCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLWCHAR *szProcName, + SQLSMALLINT cbProcName, SQLWCHAR *szColumnName, SQLSMALLINT cbColumnName) { + CSTR func = "SQLProcedureColumnsW"; + RETCODE ret; + char *ctName, *scName, *prName, *clName; + SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + BOOL lower_id; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); + scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); + prName = ucs2_to_utf8(szProcName, cbProcName, &nmlen3, lower_id); + clName = ucs2_to_utf8(szColumnName, cbColumnName, &nmlen4, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_ProcedureColumns( + hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, + (SQLSMALLINT)nmlen2, (SQLCHAR *)prName, (SQLSMALLINT)nmlen3, + (SQLCHAR *)clName, (SQLSMALLINT)nmlen4, flag); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (prName) + free(prName); + if (clName) + free(clName); + return ret; +} + +RETCODE SQL_API SQLProceduresW(HSTMT hstmt, SQLWCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + SQLWCHAR *szSchemaName, SQLSMALLINT cbSchemaName, + SQLWCHAR *szProcName, SQLSMALLINT cbProcName) { + CSTR func = "SQLProceduresW"; + RETCODE ret; + char *ctName, *scName, *prName; + SQLLEN nmlen1, nmlen2, nmlen3; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + BOOL lower_id; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); + scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); + prName = ucs2_to_utf8(szProcName, cbProcName, &nmlen3, lower_id); + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_Procedures(hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, + (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, + (SQLCHAR *)prName, (SQLSMALLINT)nmlen3, flag); + LEAVE_STMT_CS(stmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (prName) + free(prName); + return ret; +} + +RETCODE SQL_API SQLTablePrivilegesW(HSTMT hstmt, SQLWCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + SQLWCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, + SQLWCHAR *szTableName, + SQLSMALLINT cbTableName) { + CSTR func = "SQLTablePrivilegesW"; + RETCODE ret; + char *ctName, *scName, *tbName; + SQLLEN nmlen1, nmlen2, nmlen3; + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + BOOL lower_id; + UWORD flag = 0; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + conn = SC_get_conn(stmt); + lower_id = DEFAULT_LOWERCASEIDENTIFIER; + ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); + scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); + tbName = ucs2_to_utf8(szTableName, cbTableName, &nmlen3, lower_id); + ENTER_STMT_CS((StatementClass *)hstmt); + SC_clear_error(stmt); + if (stmt->options.metadata_id) + flag |= PODBC_NOT_SEARCH_PATTERN; + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_TablePrivileges( + hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, + (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, flag); + LEAVE_STMT_CS((StatementClass *)hstmt); + if (ctName) + free(ctName); + if (scName) + free(scName); + if (tbName) + free(tbName); + return ret; +} + +RETCODE SQL_API SQLGetTypeInfoW(SQLHSTMT StatementHandle, + SQLSMALLINT DataType) { + CSTR func = "SQLGetTypeInfoW"; + RETCODE ret; + StatementClass *stmt = (StatementClass *)StatementHandle; + + MYLOG(ES_TRACE, "entering\n"); + if (SC_connection_lost_check(stmt, __FUNCTION__)) + return SQL_ERROR; + + ENTER_STMT_CS(stmt); + SC_clear_error(stmt); + if (SC_opencheck(stmt, func)) + ret = SQL_ERROR; + else + ret = ESAPI_GetTypeInfo(StatementHandle, DataType); + LEAVE_STMT_CS(stmt); + return ret; +} diff --git a/sql-odbc/src/odfesqlodbc/options.c b/sql-odbc/src/odfesqlodbc/options.c new file mode 100644 index 0000000000..2210fdf784 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/options.c @@ -0,0 +1,726 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include + +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_odbc.h" +#include "misc.h" +#include "qresult.h" +#include "statement.h" +#include "unicode_support.h" + +static RETCODE set_statement_option(ConnectionClass *conn, StatementClass *stmt, + SQLUSMALLINT fOption, SQLULEN vParam) { + CSTR func = "set_statement_option"; + char changed = FALSE; + ConnInfo *ci = NULL; + SQLULEN setval; + + if (conn) + ci = &(conn->connInfo); + else + ci = &(SC_get_conn(stmt)->connInfo); + switch (fOption) { + case SQL_ASYNC_ENABLE: /* ignored */ + break; + + case SQL_BIND_TYPE: + /* now support multi-column and multi-row binding */ + if (conn) + conn->ardOptions.bind_size = (SQLUINTEGER)vParam; + if (stmt) + SC_get_ARDF(stmt)->bind_size = (SQLUINTEGER)vParam; + break; + + case SQL_CONCURRENCY: + /* + * positioned update isn't supported so cursor concurrency is + * read-only + */ + MYLOG(ES_DEBUG, "SQL_CONCURRENCY = " FORMAT_LEN " ", vParam); + setval = SQL_CONCUR_READ_ONLY; + if (conn) + conn->stmtOptions.scroll_concurrency = (SQLUINTEGER)setval; + else if (stmt) { + if (SC_get_Result(stmt)) { + SC_set_error( + stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "The attr can't be changed because the cursor is open.", + func); + return SQL_ERROR; + } + stmt->options.scroll_concurrency = + stmt->options_orig.scroll_concurrency = (SQLUINTEGER)setval; + } + if (setval != vParam) + changed = TRUE; + MYPRINTF(0, "-> " FORMAT_LEN "\n", setval); + break; + + case SQL_CURSOR_TYPE: + /* + * if declare/fetch, then type can only be forward. otherwise, + * it can only be forward or static. + */ + MYLOG(ES_DEBUG, "SQL_CURSOR_TYPE = " FORMAT_LEN " ", vParam); + setval = SQL_CURSOR_FORWARD_ONLY; + if (SQL_CURSOR_STATIC == vParam) + setval = vParam; + else if (SQL_CURSOR_KEYSET_DRIVEN == vParam) { + setval = SQL_CURSOR_STATIC; /* at least scrollable */ + } else if (SQL_CURSOR_DYNAMIC == vParam) { + setval = SQL_CURSOR_STATIC; /* at least scrollable */ + } + if (conn) + conn->stmtOptions.cursor_type = (SQLUINTEGER)setval; + else if (stmt) { + if (SC_get_Result(stmt)) { + SC_set_error( + stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "The attr can't be changed because the cursor is open.", + func); + return SQL_ERROR; + } + stmt->options_orig.cursor_type = stmt->options.cursor_type = + (SQLUINTEGER)setval; + } + if (setval != vParam) + changed = TRUE; + MYPRINTF(0, "-> " FORMAT_LEN "\n", setval); + break; + + case SQL_KEYSET_SIZE: /* ignored, but saved and returned */ + MYLOG(ES_DEBUG, "SQL_KEYSET_SIZE, vParam = " FORMAT_LEN "\n", + vParam); + + if (conn) + conn->stmtOptions.keyset_size = vParam; + if (stmt) { + stmt->options_orig.keyset_size = vParam; + if (!SC_get_Result(stmt)) + stmt->options.keyset_size = vParam; + if (stmt->options.keyset_size != (SQLLEN)vParam) + changed = TRUE; + } + + break; + + case SQL_MAX_LENGTH: /* ignored, but saved */ + MYLOG(ES_DEBUG, "SQL_MAX_LENGTH, vParam = " FORMAT_LEN "\n", + vParam); + if (conn) + conn->stmtOptions.maxLength = vParam; + if (stmt) { + stmt->options_orig.maxLength = vParam; + if (!SC_get_Result(stmt)) + stmt->options.maxLength = vParam; + if (stmt->options.maxLength != (SQLLEN)vParam) + changed = TRUE; + } + break; + + case SQL_MAX_ROWS: /* ignored, but saved */ + MYLOG(ES_DEBUG, "SQL_MAX_ROWS, vParam = " FORMAT_LEN "\n", vParam); + if (conn) + conn->stmtOptions.maxRows = vParam; + if (stmt) { + stmt->options_orig.maxRows = vParam; + if (!SC_get_Result(stmt)) + stmt->options.maxRows = vParam; + if (stmt->options.maxRows != (SQLLEN)vParam) + changed = TRUE; + } + break; + + case SQL_NOSCAN: /* ignored */ + MYLOG(ES_DEBUG, "SQL_NOSCAN, vParam = " FORMAT_LEN "\n", vParam); + break; + + case SQL_QUERY_TIMEOUT: /* ignored */ + MYLOG(ES_DEBUG, "SQL_QUERY_TIMEOUT, vParam = " FORMAT_LEN "\n", + vParam); + if (conn) + conn->stmtOptions.stmt_timeout = (SQLULEN)vParam; + if (stmt) + stmt->options.stmt_timeout = (SQLULEN)vParam; + break; + + case SQL_RETRIEVE_DATA: + MYLOG(ES_DEBUG, "SQL_RETRIEVE_DATA, vParam = " FORMAT_LEN "\n", + vParam); + if (conn) + conn->stmtOptions.retrieve_data = (SQLUINTEGER)vParam; + if (stmt) + stmt->options.retrieve_data = (SQLUINTEGER)vParam; + break; + + case SQL_ROWSET_SIZE: + MYLOG(ES_DEBUG, "SQL_ROWSET_SIZE, vParam = " FORMAT_LEN "\n", + vParam); + + if (vParam < 1) { + vParam = 1; + changed = TRUE; + } + + if (conn) + conn->ardOptions.size_of_rowset_odbc2 = vParam; + if (stmt) + SC_get_ARDF(stmt)->size_of_rowset_odbc2 = vParam; + break; + + case SQL_SIMULATE_CURSOR: /* NOT SUPPORTED */ + if (stmt) { + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Simulated positioned update/delete not " + "supported. Use the cursor library.", + func); + } + if (conn) { + CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, + "Simulated positioned update/delete not " + "supported. Use the cursor library.", + func); + } + return SQL_ERROR; + + case SQL_USE_BOOKMARKS: + if (stmt) { + MYLOG( + ES_DEBUG, "USE_BOOKMARKS %s\n", + (vParam == SQL_UB_OFF) + ? "off" + : ((vParam == SQL_UB_VARIABLE) ? "variable" : "fixed")); + setval = vParam; + stmt->options.use_bookmarks = (SQLUINTEGER)setval; + } + if (conn) + conn->stmtOptions.use_bookmarks = (SQLUINTEGER)vParam; + break; + + case 1204: /* SQL_COPT_SS_PRESERVE_CURSORS ? */ + if (stmt) { + SC_set_error(stmt, STMT_OPTION_NOT_FOR_THE_DRIVER, + "The option may be for MS SQL Server(Set)", func); + } else if (conn) { + CC_set_error(conn, CONN_OPTION_NOT_FOR_THE_DRIVER, + "The option may be for MS SQL Server(Set)", func); + } + return SQL_ERROR; + case 1227: /* SQL_SOPT_SS_HIDDEN_COLUMNS ? */ + case 1228: /* SQL_SOPT_SS_NOBROWSETABLE ? */ + if (stmt) { +#ifndef NOT_USED + if (0 != vParam) + changed = TRUE; + break; +#else + SC_set_error(stmt, STMT_OPTION_NOT_FOR_THE_DRIVER, + "The option may be for MS SQL Server(Set)", func); +#endif /* NOT_USED */ + } else if (conn) { + CC_set_error(conn, CONN_OPTION_NOT_FOR_THE_DRIVER, + "The option may be for MS SQL Server(Set)", func); + } + return SQL_ERROR; + default: { + char option[64]; + + if (stmt) { + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Unknown statement option (Set)", func); + SPRINTF_FIXED(option, "fOption=%d, vParam=" FORMAT_ULEN, + fOption, vParam); + SC_log_error(func, option, stmt); + } + if (conn) { + CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, + "Unknown statement option (Set)", func); + SPRINTF_FIXED(option, "fOption=%d, vParam=" FORMAT_ULEN, + fOption, vParam); + CC_log_error(func, option, conn); + } + + return SQL_ERROR; + } + } + + if (changed) { + if (stmt) { + SC_set_error(stmt, STMT_OPTION_VALUE_CHANGED, + "Requested value changed.", func); + } + if (conn) { + CC_set_error(conn, CONN_OPTION_VALUE_CHANGED, + "Requested value changed.", func); + } + return SQL_SUCCESS_WITH_INFO; + } else + return SQL_SUCCESS; +} + +/* Implements only SQL_AUTOCOMMIT */ +RETCODE SQL_API ESAPI_SetConnectOption(HDBC hdbc, SQLUSMALLINT fOption, + SQLULEN vParam) { + CSTR func = "ESAPI_SetConnectOption"; + ConnectionClass *conn = (ConnectionClass *)hdbc; + char changed = FALSE; + RETCODE retval; + BOOL autocomm_on; + + MYLOG(ES_TRACE, "entering fOption = %d vParam = " FORMAT_LEN "\n", fOption, + vParam); + if (!conn) { + CC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + switch (fOption) { + /* + * Statement Options (apply to all stmts on the connection and + * become defaults for new stmts) + */ + case SQL_ASYNC_ENABLE: + case SQL_BIND_TYPE: + case SQL_CONCURRENCY: + case SQL_CURSOR_TYPE: + case SQL_KEYSET_SIZE: + case SQL_MAX_LENGTH: + case SQL_MAX_ROWS: + case SQL_NOSCAN: + case SQL_QUERY_TIMEOUT: + case SQL_RETRIEVE_DATA: + case SQL_ROWSET_SIZE: + case SQL_SIMULATE_CURSOR: + case SQL_USE_BOOKMARKS: + /* + * Become the default for all future statements on this + * connection + */ + retval = set_statement_option(conn, NULL, fOption, vParam); + + if (retval == SQL_SUCCESS_WITH_INFO) + changed = TRUE; + else if (retval == SQL_ERROR) + return SQL_ERROR; + + break; + + /* + * Connection Options + */ + + case SQL_ACCESS_MODE: /* ignored */ + break; + + case SQL_AUTOCOMMIT: + switch (vParam) { + case SQL_AUTOCOMMIT_ON: + autocomm_on = TRUE; + break; + default: + CC_set_error(conn, CONN_INVALID_ARGUMENT_NO, + "Illegal parameter value for SQL_AUTOCOMMIT. " + "Turning SQL_AUTOCOMMIT off requires " + "transactions, which are not supported.", + func); + return SQL_ERROR; + } + if (autocomm_on && SQL_AUTOCOMMIT_OFF != conn->autocommit_public) + break; + else if (!autocomm_on + && SQL_AUTOCOMMIT_OFF == conn->autocommit_public) + break; + conn->autocommit_public = + (autocomm_on ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF); + MYLOG(ES_DEBUG, + "AUTOCOMMIT: transact_status=%d, vparam=" FORMAT_LEN "\n", + conn->transact_status, vParam); + + CC_set_autocommit(conn, autocomm_on); + break; + + case SQL_CURRENT_QUALIFIER: /* ignored */ + break; + + case SQL_LOGIN_TIMEOUT: + conn->login_timeout = (SQLUINTEGER)vParam; + break; + + case SQL_PACKET_SIZE: /* ignored */ + break; + + case SQL_QUIET_MODE: /* ignored */ + break; + + case SQL_TXN_ISOLATION: + if (conn->isolation == vParam) + break; + /* + * If the connection is not established, just record the setting to + * reflect it upon connection. + */ + if (CC_not_connected(conn)) { + conn->isolation = (UInt4)vParam; + break; + } + + conn->isolation = (UInt4)vParam; + break; + + /* These options should be handled by driver manager */ + case SQL_ODBC_CURSORS: + case SQL_OPT_TRACE: + case SQL_OPT_TRACEFILE: + case SQL_TRANSLATE_DLL: + case SQL_TRANSLATE_OPTION: + CC_log_error( + func, + "This connect option (Set) is only used by the Driver Manager", + conn); + break; + + default: { + char option[64]; + + CC_set_error(conn, CONN_UNSUPPORTED_OPTION, + "Unknown connect option (Set)", func); + SPRINTF_FIXED(option, "fOption=%d, vParam=" FORMAT_LEN, fOption, + vParam); +#ifdef WIN32 + if (fOption == 30002 && vParam) { + int cmp; +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn)) { + char *asPara = + ucs2_to_utf8((SQLWCHAR *)vParam, SQL_NTS, NULL, FALSE); + cmp = strcmp(asPara, "Microsoft Jet"); + free(asPara); + } else +#endif /* UNICODE_SUPPORT */ + cmp = strncmp((char *)vParam, "Microsoft Jet", 13); + if (0 == cmp) { + MYLOG(ES_DEBUG, "Microsoft Jet !!!!\n"); + CC_set_errornumber(conn, 0); + conn->ms_jet = 1; + return SQL_SUCCESS; + } + } +#endif /* WIN32 */ + CC_log_error(func, option, conn); + return SQL_ERROR; + } + } + + if (changed) { + CC_set_error(conn, CONN_OPTION_VALUE_CHANGED, + "Requested value changed.", func); + return SQL_SUCCESS_WITH_INFO; + } else + return SQL_SUCCESS; +} + +/* This function just can tell you whether you are in Autcommit mode or not */ +RETCODE SQL_API ESAPI_GetConnectOption(HDBC hdbc, SQLUSMALLINT fOption, + PTR pvParam, SQLINTEGER *StringLength, + SQLINTEGER BufferLength) { + CSTR func = "ESAPI_GetConnectOption"; + ConnectionClass *conn = (ConnectionClass *)hdbc; + const char *p = NULL; + SQLLEN len = sizeof(SQLINTEGER); + SQLRETURN result = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering...\n"); + + if (!conn) { + CC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + switch (fOption) { + case SQL_ACCESS_MODE: /* NOT SUPPORTED */ + *((SQLUINTEGER *)pvParam) = SQL_MODE_READ_WRITE; + break; + + case SQL_AUTOCOMMIT: + *((SQLUINTEGER *)pvParam) = conn->autocommit_public; + break; + + case SQL_CURRENT_QUALIFIER: /* don't use qualifiers */ + len = 0; + p = CurrCatString(conn); + break; + + case SQL_LOGIN_TIMEOUT: + *((SQLUINTEGER *)pvParam) = conn->login_timeout; + break; + + case SQL_PACKET_SIZE: /* NOT SUPPORTED */ + *((SQLUINTEGER *)pvParam) = 4096; + break; + + case SQL_QUERY_TIMEOUT: + *((SQLULEN *)pvParam) = conn->stmtOptions.stmt_timeout; + break; + + case SQL_QUIET_MODE: /* NOT SUPPORTED */ + *((SQLULEN *)pvParam) = 0; + break; + + case SQL_TXN_ISOLATION: + if (conn->isolation == 0) { + if (CC_not_connected(conn)) + return SQL_NO_DATA; + conn->isolation = CC_get_isolation(conn); + } + *((SQLUINTEGER *)pvParam) = conn->isolation; + break; + +#ifdef SQL_ATTR_CONNECTION_DEAD + case SQL_ATTR_CONNECTION_DEAD: +#else + case 1209: +#endif /* SQL_ATTR_CONNECTION_DEAD */ + MYLOG(ES_DEBUG, "CONNECTION_DEAD status=%d", conn->status); + *((SQLUINTEGER *)pvParam) = CC_not_connected(conn); + MYPRINTF(0, " val=" FORMAT_UINTEGER "\n", + *((SQLUINTEGER *)pvParam)); + break; + + case SQL_ATTR_ANSI_APP: + *((SQLUINTEGER *)pvParam) = CC_is_in_ansi_app(conn); + MYLOG(ES_DEBUG, "ANSI_APP val=" FORMAT_UINTEGER "\n", + *((SQLUINTEGER *)pvParam)); + break; + + /* These options should be handled by driver manager */ + case SQL_ODBC_CURSORS: + case SQL_OPT_TRACE: + case SQL_OPT_TRACEFILE: + case SQL_TRANSLATE_DLL: + case SQL_TRANSLATE_OPTION: + CC_log_error( + func, + "This connect option (Get) is only used by the Driver Manager", + conn); + break; + + default: { + char option[64]; + + CC_set_error(conn, CONN_UNSUPPORTED_OPTION, + "Unknown connect option (Get)", func); + SPRINTF_FIXED(option, "fOption=%d", fOption); + CC_log_error(func, option, conn); + return SQL_ERROR; + break; + } + } + + if (NULL != p && 0 == len) { + /* char/binary data */ + len = strlen(p); + + if (pvParam) { +#ifdef UNICODE_SUPPORT + if (CC_is_in_unicode_driver(conn)) { + len = utf8_to_ucs2(p, len, (SQLWCHAR *)pvParam, + BufferLength / WCLEN); + len *= WCLEN; + } else +#endif /* UNICODE_SUPPORT */ + strncpy_null((char *)pvParam, p, (size_t)BufferLength); + + if (len >= BufferLength) { + result = SQL_SUCCESS_WITH_INFO; + CC_set_error(conn, CONN_TRUNCATED, + "The buffer was too small for the pvParam.", func); + } + } + } + if (StringLength) + *StringLength = (SQLINTEGER)len; + return result; +} + +RETCODE SQL_API ESAPI_SetStmtOption(HSTMT hstmt, SQLUSMALLINT fOption, + SQLULEN vParam) { + CSTR func = "ESAPI_SetStmtOption"; + StatementClass *stmt = (StatementClass *)hstmt; + RETCODE retval; + + MYLOG(ES_DEBUG, " entering...\n"); + + /* + * Though we could fake Access out by just returning SQL_SUCCESS all + * the time, but it tries to set a huge value for SQL_MAX_LENGTH and + * expects the driver to reduce it to the real value. + */ + if (!stmt) { + SC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + retval = set_statement_option(NULL, stmt, fOption, vParam); + return retval; +} + +RETCODE SQL_API ESAPI_GetStmtOption(HSTMT hstmt, SQLUSMALLINT fOption, + PTR pvParam, SQLINTEGER *StringLength, + SQLINTEGER BufferLength) { + UNUSED(BufferLength); + CSTR func = "ESAPI_GetStmtOption"; + StatementClass *stmt = (StatementClass *)hstmt; + QResultClass *res; + SQLLEN ridx; + SQLINTEGER len = sizeof(SQLINTEGER); + Int4 bookmark; + + MYLOG(ES_TRACE, "entering...\n"); + + /* + * thought we could fake Access out by just returning SQL_SUCCESS all + * the time, but it tries to set a huge value for SQL_MAX_LENGTH and + * expects the driver to reduce it to the real value + */ + if (!stmt) { + SC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + switch (fOption) { + case SQL_GET_BOOKMARK: + case SQL_ROW_NUMBER: + + res = SC_get_Curres(stmt); + if (!res) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "The cursor has no result.", func); + return SQL_ERROR; + } + + ridx = GIdx2CacheIdx(stmt->currTuple, stmt, res); + if (!SC_is_fetchcursor(stmt)) { + /* make sure we're positioned on a valid row */ + if ((ridx < 0) + || (((SQLULEN)ridx) >= QR_get_num_cached_tuples(res))) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Not positioned on a valid row.", func); + return SQL_ERROR; + } + } else { + if (stmt->currTuple < 0 || !res->tupleField) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Not positioned on a valid row.", func); + return SQL_ERROR; + } + } + + if (fOption == SQL_GET_BOOKMARK + && stmt->options.use_bookmarks == SQL_UB_OFF) { + SC_set_error( + stmt, STMT_OPERATION_INVALID, + "Operation invalid because use bookmarks not enabled.", + func); + return SQL_ERROR; + } + + bookmark = (int)SC_make_int4_bookmark(stmt->currTuple); + memcpy(pvParam, &bookmark, sizeof(UInt4)); + + break; + + case SQL_ASYNC_ENABLE: /* NOT SUPPORTED */ + *((SQLINTEGER *)pvParam) = SQL_ASYNC_ENABLE_OFF; + break; + + case SQL_BIND_TYPE: + *((SQLINTEGER *)pvParam) = SC_get_ARDF(stmt)->bind_size; + break; + + case SQL_CONCURRENCY: /* NOT REALLY SUPPORTED */ + MYLOG(ES_DEBUG, "SQL_CONCURRENCY " FORMAT_INTEGER "\n", + stmt->options.scroll_concurrency); + *((SQLINTEGER *)pvParam) = stmt->options.scroll_concurrency; + break; + + case SQL_CURSOR_TYPE: /* PARTIAL SUPPORT */ + MYLOG(ES_DEBUG, "SQL_CURSOR_TYPE " FORMAT_INTEGER "\n", + stmt->options.cursor_type); + *((SQLINTEGER *)pvParam) = stmt->options.cursor_type; + break; + + case SQL_KEYSET_SIZE: /* NOT SUPPORTED, but saved */ + MYLOG(ES_DEBUG, "SQL_KEYSET_SIZE\n"); + *((SQLLEN *)pvParam) = stmt->options.keyset_size; + break; + + case SQL_MAX_LENGTH: /* NOT SUPPORTED, but saved */ + *((SQLLEN *)pvParam) = stmt->options.maxLength; + break; + + case SQL_MAX_ROWS: /* NOT SUPPORTED, but saved */ + *((SQLLEN *)pvParam) = stmt->options.maxRows; + MYLOG(ES_DEBUG, "MAX_ROWS, returning " FORMAT_LEN "\n", + stmt->options.maxRows); + break; + + case SQL_NOSCAN: /* NOT SUPPORTED */ + *((SQLINTEGER *)pvParam) = SQL_NOSCAN_ON; + break; + + case SQL_QUERY_TIMEOUT: /* NOT SUPPORTED */ + *((SQLULEN *)pvParam) = stmt->options.stmt_timeout; + break; + + case SQL_RETRIEVE_DATA: + *((SQLINTEGER *)pvParam) = stmt->options.retrieve_data; + break; + + case SQL_ROWSET_SIZE: + *((SQLLEN *)pvParam) = SC_get_ARDF(stmt)->size_of_rowset_odbc2; + break; + + case SQL_SIMULATE_CURSOR: /* NOT SUPPORTED */ + *((SQLINTEGER *)pvParam) = SQL_SC_NON_UNIQUE; + break; + + case SQL_USE_BOOKMARKS: + *((SQLINTEGER *)pvParam) = stmt->options.use_bookmarks; + break; + case 1227: /* SQL_SOPT_SS_HIDDEN_COLUMNS ? */ + case 1228: /* SQL_SOPT_SS_NOBROWSETABLE ? */ + *((SQLINTEGER *)pvParam) = 0; + break; + + default: { + char option[64]; + + SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, + "Unknown statement option (Get)", func); + SPRINTF_FIXED(option, "fOption=%d", fOption); + SC_log_error(func, option, stmt); + return SQL_ERROR; + } + } + if (StringLength) + *StringLength = len; + + return SQL_SUCCESS; +} diff --git a/sql-odbc/src/odfesqlodbc/parse.c b/sql-odbc/src/odfesqlodbc/parse.c new file mode 100644 index 0000000000..4fde3f0b4f --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/parse.c @@ -0,0 +1,84 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "es_odbc.h" + +#include +#include +#include + +#include "catfunc.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_types.h" +#include "qresult.h" +#include "statement.h" + +#include "es_info.h" +#include "misc.h" +#include "multibyte.h" + +Int4 FI_precision(const FIELD_INFO *fi) { + OID ftype; + + if (!fi) + return -1; + ftype = FI_type(fi); + switch (ftype) { + case ES_TYPE_NUMERIC: + return fi->column_size; + case ES_TYPE_DATETIME: + case ES_TYPE_TIMESTAMP_NO_TMZONE: + return fi->decimal_digits; + } + return 0; +} + +static void setNumFields(IRDFields *irdflds, size_t numFields) { + FIELD_INFO **fi = irdflds->fi; + size_t nfields = irdflds->nfields; + + if (numFields < nfields) { + int i; + + for (i = (int)numFields; i < (int)nfields; i++) { + if (fi[i]) + fi[i]->flag = 0; + } + } + irdflds->nfields = (UInt4)numFields; +} + +void SC_initialize_cols_info(StatementClass *stmt, BOOL DCdestroy, + BOOL parseReset) { + IRDFields *irdflds = SC_get_IRDF(stmt); + + /* Free the parsed table information */ + if (stmt->ti) { + TI_Destructor(stmt->ti, stmt->ntab); + free(stmt->ti); + stmt->ti = NULL; + } + stmt->ntab = 0; + if (DCdestroy) /* Free the parsed field information */ + DC_Destructor((DescriptorClass *)SC_get_IRD(stmt)); + else + setNumFields(irdflds, 0); + if (parseReset) { + stmt->parse_status = STMT_PARSE_NONE; + SC_reset_updatable(stmt); + } +} diff --git a/sql-odbc/src/odfesqlodbc/qresult.c b/sql-odbc/src/odfesqlodbc/qresult.c new file mode 100644 index 0000000000..0348076e26 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/qresult.c @@ -0,0 +1,485 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include "qresult.h" + +#include +#include +#include + +#include "es_statement.h" +#include "misc.h" +#include "statement.h" + +/* + * Used for building a Manual Result only + * All info functions call this function to create the manual result set. + */ +void QR_set_num_fields(QResultClass *self, int new_num_fields) { + if (!self) + return; + MYLOG(ES_TRACE, "entering\n"); + + CI_set_num_fields(QR_get_fields(self), (SQLSMALLINT)new_num_fields); + + MYLOG(ES_TRACE, "leaving\n"); +} + +void QR_set_position(QResultClass *self, SQLLEN pos) { + self->tupleField = + self->backend_tuples + + ((QR_get_rowstart_in_cache(self) + pos) * self->num_fields); +} + +void QR_set_reqsize(QResultClass *self, Int4 reqsize) { + self->rowset_size_include_ommitted = reqsize; +} + +void QR_set_cursor(QResultClass *self, const char *name) { + ConnectionClass *conn = QR_get_conn(self); + + if (self->cursor_name) { + if (name && 0 == strcmp(name, self->cursor_name)) + return; + free(self->cursor_name); + if (conn) { + CONNLOCK_ACQUIRE(conn); + conn->ncursors--; + CONNLOCK_RELEASE(conn); + } + self->cursTuple = -1; + QR_set_no_cursor(self); + } else if (NULL == name) + return; + if (name) { + self->cursor_name = strdup(name); + if (conn) { + CONNLOCK_ACQUIRE(conn); + conn->ncursors++; + CONNLOCK_RELEASE(conn); + } + } else { + QResultClass *res; + + self->cursor_name = NULL; + for (res = self->next; NULL != res; res = res->next) { + if (NULL != res->cursor_name) + free(res->cursor_name); + res->cursor_name = NULL; + } + } +} + +void QR_set_rowstart_in_cache(QResultClass *self, SQLLEN start) { + if (QR_synchronize_keys(self)) + self->key_base = start; + self->base = start; +} + +void QR_inc_rowstart_in_cache(QResultClass *self, SQLLEN base_inc) { + if (!QR_has_valid_base(self)) + MYLOG(ES_DEBUG, " called while the cache is not ready\n"); + self->base += base_inc; + if (QR_synchronize_keys(self)) + self->key_base = self->base; +} + +void QR_set_fields(QResultClass *self, ColumnInfoClass *fields) { + ColumnInfoClass *curfields = QR_get_fields(self); + + if (curfields == fields) + return; + + /* + * Unlink the old columninfo from this result set, freeing it if this + * was the last reference. + */ + if (NULL != curfields) { + if (curfields->refcount > 1) + curfields->refcount--; + else + CI_Destructor(curfields); + } + self->fields = fields; + if (NULL != fields) + fields->refcount++; +} + +/* + * CLASS QResult + */ +QResultClass *QR_Constructor(void) { + QResultClass *rv; + + MYLOG(ES_TRACE, "entering\n"); + rv = (QResultClass *)malloc(sizeof(QResultClass)); + + if (rv != NULL) { + ColumnInfoClass *fields; + + rv->rstatus = PORES_EMPTY_QUERY; + rv->pstatus = 0; + + /* construct the column info */ + rv->fields = NULL; + if (fields = CI_Constructor(), NULL == fields) { + free(rv); + return NULL; + } + QR_set_fields(rv, fields); + rv->backend_tuples = NULL; + rv->sqlstate[0] = '\0'; + rv->message = NULL; + rv->messageref = NULL; + rv->command = NULL; + rv->notice = NULL; + rv->conn = NULL; + rv->next = NULL; + rv->count_backend_allocated = 0; + rv->count_keyset_allocated = 0; + rv->num_total_read = 0; + rv->num_cached_rows = 0; + rv->num_cached_keys = 0; + rv->fetch_number = 0; + rv->flags = + 0; /* must be cleared before calling QR_set_rowstart_in_cache() */ + QR_set_rowstart_in_cache(rv, -1); + rv->key_base = -1; + rv->recent_processed_row_count = -1; + rv->cursTuple = -1; + rv->move_offset = 0; + rv->num_fields = 0; + rv->num_key_fields = ES_NUM_NORMAL_KEYS; /* CTID + OID */ + rv->tupleField = NULL; + rv->cursor_name = NULL; + rv->aborted = FALSE; + + rv->cache_size = 0; + rv->cmd_fetch_size = 0; + rv->rowset_size_include_ommitted = 1; + rv->move_direction = 0; + rv->keyset = NULL; + rv->reload_count = 0; + rv->rb_alloc = 0; + rv->rb_count = 0; + rv->dataFilled = FALSE; + rv->rollback = NULL; + rv->ad_alloc = 0; + rv->ad_count = 0; + rv->added_keyset = NULL; + rv->added_tuples = NULL; + rv->up_alloc = 0; + rv->up_count = 0; + rv->updated = NULL; + rv->updated_keyset = NULL; + rv->updated_tuples = NULL; + rv->dl_alloc = 0; + rv->dl_count = 0; + rv->deleted = NULL; + rv->deleted_keyset = NULL; + rv->es_result = NULL; + rv->server_cursor_id = NULL; + } + + MYLOG(ES_TRACE, "leaving\n"); + return rv; +} + +void QR_close_result(QResultClass *self, BOOL destroy) { + UNUSED(self); + QResultClass *next; + BOOL top = TRUE; + + if (!self) + return; + MYLOG(ES_TRACE, "entering\n"); + + while (self) { + QR_free_memory(self); /* safe to call anyway */ + + /* + * Should have been freed in the close() but just in case... + * QR_set_cursor clears the cursor name of all the chained results too, + * so we only need to do this for the first result in the chain. + */ + if (top) + QR_set_cursor(self, NULL); + + /* Free up column info */ + if (destroy) + QR_set_fields(self, NULL); + + /* Free command info (this is from strdup()) */ + if (self->command) { + free(self->command); + self->command = NULL; + } + + /* Free message info (this is from strdup()) */ + if (self->message) { + free(self->message); + self->message = NULL; + } + + /* Free notice info (this is from strdup()) */ + if (self->notice) { + free(self->notice); + self->notice = NULL; + } + + /* Free server_cursor_id (this is from strdup()) */ + if (self->server_cursor_id) { + free(self->server_cursor_id); + self->server_cursor_id = NULL; + } + + /* Destruct the result object in the chain */ + next = self->next; + self->next = NULL; + if (destroy) + free(self); + + /* Repeat for the next result in the chain */ + self = next; + destroy = TRUE; /* always destroy chained results */ + top = FALSE; + } + + MYLOG(ES_TRACE, "leaving\n"); +} + +void QR_reset_for_re_execute(QResultClass *self) { + MYLOG(ES_TRACE, "entering for %p\n", self); + if (!self) + return; + QR_close_result(self, FALSE); + /* reset flags etc */ + self->flags = 0; + QR_set_rowstart_in_cache(self, -1); + self->recent_processed_row_count = -1; + /* clear error info etc */ + self->rstatus = PORES_EMPTY_QUERY; + self->aborted = FALSE; + self->sqlstate[0] = '\0'; + self->messageref = NULL; + + MYLOG(ES_TRACE, "leaving\n"); +} + +void QR_Destructor(QResultClass *self) { + MYLOG(ES_TRACE, "entering\n"); + if (!self) + return; + QR_close_result(self, TRUE); + + MYLOG(ES_TRACE, "leaving\n"); +} + +void QR_set_command(QResultClass *self, const char *msg) { + if (self->command) + free(self->command); + + self->command = msg ? strdup(msg) : NULL; +} + +void QR_set_message(QResultClass *self, const char *msg) { + if (self->message) + free(self->message); + self->messageref = NULL; + + self->message = msg ? strdup(msg) : NULL; +} + +void QR_set_server_cursor_id(QResultClass *self, const char *server_cursor_id) { + if (self->server_cursor_id) { + free(self->server_cursor_id); + } + + self->server_cursor_id = server_cursor_id ? strdup(server_cursor_id) : NULL; +} + +void QR_add_message(QResultClass *self, const char *msg) { + char *message = self->message; + size_t alsize, pos, addlen; + + if (!msg || !msg[0]) + return; + addlen = strlen(msg); + if (message) { + pos = strlen(message) + 1; + alsize = pos + addlen + 1; + } else { + pos = 0; + alsize = addlen + 1; + } + char *message_tmp = realloc(message, alsize); + if (message_tmp) { + message = message_tmp; + if (pos > 0) + message[pos - 1] = ';'; + strncpy_null(message + pos, msg, addlen + 1); + self->message = message; + } +} + +void QR_set_notice(QResultClass *self, const char *msg) { + if (self->notice) + free(self->notice); + + self->notice = msg ? strdup(msg) : NULL; +} + +void QR_add_notice(QResultClass *self, const char *msg) { + char *message = self->notice; + size_t alsize, pos, addlen; + + if (!msg || !msg[0]) + return; + addlen = strlen(msg); + if (message) { + pos = strlen(message) + 1; + alsize = pos + addlen + 1; + } else { + pos = 0; + alsize = addlen + 1; + } + char *message_tmp = realloc(message, alsize); + if (message_tmp) { + message = message_tmp; + if (pos > 0) + message[pos - 1] = ';'; + strncpy_null(message + pos, msg, addlen + 1); + self->notice = message; + } +} + +TupleField *QR_AddNew(QResultClass *self) { + size_t alloc; + UInt4 num_fields; + + if (!self) + return NULL; + MYLOG(ES_ALL, FORMAT_ULEN "th row(%d fields) alloc=" FORMAT_LEN "\n", + self->num_cached_rows, QR_NumResultCols(self), + self->count_backend_allocated); + if (num_fields = QR_NumResultCols(self), !num_fields) + return NULL; + if (self->num_fields <= 0) { + self->num_fields = (unsigned short)num_fields; + QR_set_reached_eof(self); + } + alloc = self->count_backend_allocated; + if (!self->backend_tuples) { + self->num_cached_rows = 0; + alloc = TUPLE_MALLOC_INC; + QR_MALLOC_return_with_error(self->backend_tuples, TupleField, + alloc * sizeof(TupleField) * num_fields, + self, "Out of memory in QR_AddNew.", NULL); + } else if (self->num_cached_rows >= self->count_backend_allocated) { + alloc = self->count_backend_allocated * 2; + QR_REALLOC_return_with_error(self->backend_tuples, TupleField, + alloc * sizeof(TupleField) * num_fields, + self, "Out of memory in QR_AddNew.", NULL); + } + self->count_backend_allocated = alloc; + + if (self->backend_tuples) { + memset(self->backend_tuples + num_fields * self->num_cached_rows, 0, + num_fields * sizeof(TupleField)); + self->num_cached_rows++; + self->ad_count++; + } + return self->backend_tuples + num_fields * (self->num_cached_rows - 1); +} + +void QR_free_memory(QResultClass *self) { + SQLLEN num_backend_rows = self->num_cached_rows; + int num_fields = self->num_fields; + + MYLOG(ES_TRACE, "entering fcount=" FORMAT_LEN "\n", num_backend_rows); + + if (self->backend_tuples) { + ClearCachedRows(self->backend_tuples, num_fields, num_backend_rows); + free(self->backend_tuples); + self->count_backend_allocated = 0; + self->backend_tuples = NULL; + self->dataFilled = FALSE; + self->tupleField = NULL; + } + if (self->keyset) { + free(self->keyset); + self->keyset = NULL; + self->count_keyset_allocated = 0; + self->reload_count = 0; + } + if (self->rollback) { + free(self->rollback); + self->rb_alloc = 0; + self->rb_count = 0; + self->rollback = NULL; + } + if (self->deleted) { + free(self->deleted); + self->deleted = NULL; + } + if (self->deleted_keyset) { + free(self->deleted_keyset); + self->deleted_keyset = NULL; + } + self->dl_alloc = 0; + self->dl_count = 0; + /* clear added info */ + if (self->added_keyset) { + free(self->added_keyset); + self->added_keyset = NULL; + } + if (self->added_tuples) { + ClearCachedRows(self->added_tuples, num_fields, self->ad_count); + free(self->added_tuples); + self->added_tuples = NULL; + } + self->ad_alloc = 0; + self->ad_count = 0; + /* clear updated info */ + if (self->updated) { + free(self->updated); + self->updated = NULL; + } + if (self->updated_keyset) { + free(self->updated_keyset); + self->updated_keyset = NULL; + } + if (self->updated_tuples) { + ClearCachedRows(self->updated_tuples, num_fields, self->up_count); + free(self->updated_tuples); + self->updated_tuples = NULL; + } + if (self->es_result) { + ClearESResult(self->es_result); + self->es_result = NULL; + } + + self->up_alloc = 0; + self->up_count = 0; + + self->num_total_read = 0; + self->num_cached_rows = 0; + self->num_cached_keys = 0; + self->cursTuple = -1; + self->pstatus = 0; + + MYLOG(ES_TRACE, "leaving\n"); +} diff --git a/sql-odbc/src/odfesqlodbc/qresult.h b/sql-odbc/src/odfesqlodbc/qresult.h new file mode 100644 index 0000000000..1b30e5709a --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/qresult.h @@ -0,0 +1,308 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __QRESULT_H__ +#define __QRESULT_H__ + +#include "columninfo.h" +#include "es_connection.h" +#include "es_odbc.h" +#include "tuple.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum QueryResultCode_ { + PORES_EMPTY_QUERY = 0, + PORES_COMMAND_OK, /* a query command that doesn't return + * anything was executed properly by the backend */ + PORES_TUPLES_OK, /* a query command that returns tuples + * was executed properly by the backend, ESresult + * contains the resulttuples */ + PORES_COPY_OUT, + PORES_COPY_IN, + PORES_BAD_RESPONSE, /* an unexpected response was recv'd from + * the backend */ + PORES_NONFATAL_ERROR, + PORES_FATAL_ERROR, + PORES_NO_MEMORY_ERROR, + PORES_FIELDS_OK = 100, /* field information from a query was + * successful */ + /* PORES_END_TUPLES, */ + PORES_INTERNAL_ERROR +} QueryResultCode; + +enum { + FQR_REACHED_EOF = (1L << 1) /* reached eof */ + , + FQR_HAS_VALID_BASE = (1L << 2), + FQR_NEEDS_SURVIVAL_CHECK = (1L << 3) /* check if the cursor is open */ +}; + +struct QResultClass_ { + ColumnInfoClass *fields; /* the Column information */ + ConnectionClass *conn; /* the connection this result is using + * (backend) */ + QResultClass *next; /* the following result class */ + + /* Stuff for declare/fetch tuples */ + SQLULEN num_total_read; /* the highest absolute position ever read in + 1 */ + SQLULEN count_backend_allocated; /* m(re)alloced count */ + SQLULEN num_cached_rows; /* count of tuples kept in backend_tuples member */ + SQLLEN fetch_number; /* 0-based index to the tuple to read next */ + SQLLEN cursTuple; /* absolute current position in the servr's cursor used to + retrieve tuples from the DB */ + SQLULEN move_offset; + SQLLEN base; /* relative position of rowset start in the current data + cache(backend_tuples) */ + + UInt2 num_fields; /* number of fields in the result */ + UInt2 num_key_fields; /* number of key fields in the result */ + UInt4 rowset_size_include_ommitted; /* ES restriction */ + SQLLEN recent_processed_row_count; + SQLULEN cache_size; + SQLULEN cmd_fetch_size; + + QueryResultCode rstatus; /* result status */ + + char sqlstate[8]; + char *message; + const char *messageref; + char *cursor_name; /* The name of the cursor for select statements */ + char *command; + char *notice; + + TupleField *backend_tuples; /* data from the backend (the tuple cache) */ + TupleField *tupleField; /* current backend tuple being retrieved */ + + char pstatus; /* processing status */ + char aborted; /* was aborted ? */ + char flags; /* this result contains keyset etc ? */ + po_ind_t move_direction; /* must move before fetching this + result set */ + SQLULEN count_keyset_allocated; /* m(re)alloced count */ + SQLULEN num_cached_keys; /* count of keys kept in backend_keys member */ + KeySet *keyset; + SQLLEN key_base; /* relative position of rowset start in the current keyset + cache */ + UInt2 reload_count; + UInt2 rb_alloc; /* count of allocated rollback info */ + UInt2 rb_count; /* count of rollback info */ + char dataFilled; /* Cache is filled with data ? */ + Rollback *rollback; + UInt4 ad_alloc; /* count of allocated added info */ + UInt4 ad_count; /* count of newly added rows */ + KeySet *added_keyset; /* added keyset info */ + TupleField *added_tuples; /* added data by myself */ + UInt2 dl_alloc; /* count of allocated deleted info */ + UInt2 dl_count; /* count of deleted info */ + SQLLEN *deleted; /* deleted index info */ + KeySet *deleted_keyset; /* deleted keyset info */ + UInt2 up_alloc; /* count of allocated updated info */ + UInt2 up_count; /* count of updated info */ + SQLLEN *updated; /* updated index info */ + KeySet *updated_keyset; /* uddated keyset info */ + TupleField *updated_tuples; /* uddated data by myself */ + void *es_result; + char *server_cursor_id; +}; + +enum { + FQR_HASKEYSET = 1L, + FQR_WITHHOLD = (1L << 1), + FQR_HOLDPERMANENT = (1L << 2) /* the cursor is alive across transactions */ + , + FQR_SYNCHRONIZEKEYS = + (1L + << 3) /* synchronize the keyset range with that of cthe tuples cache */ +}; + +#define QR_haskeyset(self) (0 != (self->flags & FQR_HASKEYSET)) +#define QR_is_withhold(self) (0 != (self->flags & FQR_WITHHOLD)) +#define QR_is_permanent(self) (0 != (self->flags & FQR_HOLDPERMANENT)) +#define QR_synchronize_keys(self) (0 != (self->flags & FQR_SYNCHRONIZEKEYS)) +#define QR_get_fields(self) (self->fields) + +/* These functions are for retrieving data from the qresult */ +#define QR_get_value_backend(self, fieldno) (self->tupleField[fieldno].value) +#define QR_get_value_backend_row(self, tupleno, fieldno) \ + ((self->backend_tuples + (tupleno * self->num_fields))[fieldno].value) +#define QR_get_value_backend_text(self, tupleno, fieldno) \ + QR_get_value_backend_row(self, tupleno, fieldno) +#define QR_get_value_backend_int(self, tupleno, fieldno, isNull) \ + atoi(QR_get_value_backend_row(self, tupleno, fieldno)) + +/* These functions are used by both manual and backend results */ +#define QR_NumResultCols(self) (CI_get_num_fields(self->fields)) +#define QR_NumPublicResultCols(self) \ + (QR_haskeyset(self) \ + ? (CI_get_num_fields(self->fields) - self->num_key_fields) \ + : CI_get_num_fields(self->fields)) +#define QR_get_fieldname(self, fieldno_) \ + (CI_get_fieldname(self->fields, fieldno_)) +#define QR_get_fieldsize(self, fieldno_) \ + (CI_get_fieldsize(self->fields, fieldno_)) +#define QR_get_display_size(self, fieldno_) \ + (CI_get_display_size(self->fields, fieldno_)) +#define QR_get_atttypmod(self, fieldno_) \ + (CI_get_atttypmod(self->fields, fieldno_)) +#define QR_get_field_type(self, fieldno_) (CI_get_oid(self->fields, fieldno_)) +#define QR_get_relid(self, fieldno_) (CI_get_relid(self->fields, fieldno_)) +#define QR_get_attid(self, fieldno_) (CI_get_attid(self->fields, fieldno_)) + +/* These functions are used only for manual result sets */ +#define QR_get_num_total_tuples(self) \ + (QR_once_reached_eof(self) ? (self->num_total_read + self->ad_count) \ + : self->num_total_read) +#define QR_get_num_total_read(self) (self->num_total_read) +#define QR_get_num_cached_tuples(self) (self->num_cached_rows) +#define QR_set_field_info(self, field_num, name, adtid, adtsize, relid, attid) \ + (CI_set_field_info(self->fields, field_num, name, adtid, adtsize, -1, \ + relid, attid)) +#define QR_set_field_info_v(self, field_num, name, adtid, adtsize) \ + (CI_set_field_info(self->fields, field_num, name, adtid, adtsize, -1, 0, 0)) + +/* status macros */ +#define QR_command_successful(self) \ + (self \ + && !(self->rstatus == PORES_BAD_RESPONSE \ + || self->rstatus == PORES_NONFATAL_ERROR \ + || self->rstatus == PORES_FATAL_ERROR \ + || self->rstatus == PORES_NO_MEMORY_ERROR)) +#define QR_command_maybe_successful(self) \ + (self \ + && !(self->rstatus == PORES_BAD_RESPONSE \ + || self->rstatus == PORES_FATAL_ERROR \ + || self->rstatus == PORES_NO_MEMORY_ERROR)) +#define QR_command_nonfatal(self) (self->rstatus == PORES_NONFATAL_ERROR) +#define QR_set_conn(self, conn_) (self->conn = conn_) +#define QR_set_rstatus(self, condition) (self->rstatus = condition) +#define QR_set_sqlstatus(self, status) strcpy(self->sqlstatus, status) +#define QR_set_messageref(self, m) ((self)->messageref = m) +#define QR_set_aborted(self, aborted_) (self->aborted = aborted_) +#define QR_set_haskeyset(self) (self->flags |= FQR_HASKEYSET) +#define QR_set_synchronize_keys(self) (self->flags |= FQR_SYNCHRONIZEKEYS) +#define QR_set_no_cursor(self) \ + ((self)->flags &= ~(FQR_WITHHOLD | FQR_HOLDPERMANENT), \ + (self)->pstatus &= ~FQR_NEEDS_SURVIVAL_CHECK) +#define QR_set_withhold(self) (self->flags |= FQR_WITHHOLD) +#define QR_set_permanent(self) (self->flags |= FQR_HOLDPERMANENT) +#define QR_set_reached_eof(self) (self->pstatus |= FQR_REACHED_EOF) +#define QR_set_has_valid_base(self) (self->pstatus |= FQR_HAS_VALID_BASE) +#define QR_set_no_valid_base(self) (self->pstatus &= ~FQR_HAS_VALID_BASE) +#define QR_set_survival_check(self) (self->pstatus |= FQR_NEEDS_SURVIVAL_CHECK) +#define QR_set_no_survival_check(self) \ + (self->pstatus &= ~FQR_NEEDS_SURVIVAL_CHECK) +#define QR_inc_num_cache(self) \ + do { \ + self->num_cached_rows++; \ + if (QR_haskeyset(self)) \ + self->num_cached_keys++; \ + } while (0) +#define QR_set_next_in_cache(self, number) \ + do { \ + MYLOG(ES_ALL, "set the number to " FORMAT_LEN " to read next\n", \ + number); \ + self->fetch_number = number; \ + } while (0) +#define QR_inc_next_in_cache(self) \ + do { \ + MYLOG(ES_ALL, "increased the number " FORMAT_LEN, self->fetch_number); \ + self->fetch_number++; \ + MYLOG(ES_ALL, "to " FORMAT_LEN " to next read\n", self->fetch_number); \ + } while (0) + +#define QR_get_message(self) \ + ((self)->message ? (self)->message : (self)->messageref) +#define QR_get_command(self) (self->command) +#define QR_get_notice(self) (self->notice) +#define QR_get_rstatus(self) (self->rstatus) +#define QR_get_aborted(self) (self->aborted) +#define QR_get_conn(self) (self->conn) +#define QR_get_cursor(self) (self->cursor_name) +#define QR_get_rowstart_in_cache(self) (self->base) +#define QR_once_reached_eof(self) ((self->pstatus & FQR_REACHED_EOF) != 0) +#define QR_has_valid_base(self) (0 != (self->pstatus & FQR_HAS_VALID_BASE)) +#define QR_needs_survival_check(self) \ + (0 != (self->pstatus & FQR_NEEDS_SURVIVAL_CHECK)) + +#define QR_aborted(self) (!self || self->aborted) +#define QR_get_reqsize(self) (self->rowset_size_include_ommitted) + +#define QR_stop_movement(self) (self->move_direction = 0) +#define QR_is_moving(self) (0 != self->move_direction) +#define QR_is_not_moving(self) (0 == self->move_direction) +#define QR_set_move_forward(self) (self->move_direction = 1) +#define QR_is_moving_forward(self) (1 == self->move_direction) +#define QR_set_move_backward(self) (self->move_direction = -1) +#define QR_is_moving_backward(self) (-1 == self->move_direction) +#define QR_set_move_from_the_last(self) (self->move_direction = 2) +#define QR_is_moving_from_the_last(self) (2 == self->move_direction) +#define QR_is_moving_not_backward(self) (0 < self->move_direction) + +/* Core Functions */ +QResultClass *QR_Constructor(void); +void QR_Destructor(QResultClass *self); +TupleField *QR_AddNew(QResultClass *self); +void QR_close_result(QResultClass *self, BOOL destroy); +void QR_reset_for_re_execute(QResultClass *self); +void QR_free_memory(QResultClass *self); +void QR_set_command(QResultClass *self, const char *msg); +void QR_set_message(QResultClass *self, const char *msg); +void QR_add_message(QResultClass *self, const char *msg); +void QR_set_notice(QResultClass *self, const char *msg); +void QR_add_notice(QResultClass *self, const char *msg); + +void QR_set_num_fields(QResultClass *self, + int new_num_fields); /* catalog functions' result only */ +void QR_set_fields(QResultClass *self, ColumnInfoClass *); + +void QR_set_rowstart_in_cache(QResultClass *, SQLLEN); +void QR_inc_rowstart_in_cache(QResultClass *self, SQLLEN base_inc); +void QR_set_reqsize(QResultClass *self, Int4 reqsize); +void QR_set_position(QResultClass *self, SQLLEN pos); +void QR_set_cursor(QResultClass *self, const char *name); +SQLLEN getNthValid(const QResultClass *self, SQLLEN sta, UWORD orientation, + SQLULEN nth, SQLLEN *nearest); +void QR_set_server_cursor_id(QResultClass *self, const char *server_cursor_id); +#define QR_MALLOC_return_with_error(t, tp, s, a, m, r) \ + do { \ + if (t = (tp *)malloc(s), NULL == t) { \ + QR_set_rstatus(a, PORES_NO_MEMORY_ERROR); \ + qlog("QR_MALLOC_error\n"); \ + QR_free_memory(a); \ + QR_set_messageref(a, m); \ + return r; \ + } \ + } while (0) +#define QR_REALLOC_return_with_error(t, tp, s, a, m, r) \ + do { \ + tp *tmp; \ + if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ + QR_set_rstatus(a, PORES_NO_MEMORY_ERROR); \ + qlog("QR_REALLOC_error\n"); \ + QR_free_memory(a); \ + QR_set_messageref(a, m); \ + return r; \ + } \ + t = tmp; \ + } while (0) + +#ifdef __cplusplus +} +#endif +#endif /* __QRESULT_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/resource.h b/sql-odbc/src/odfesqlodbc/resource.h new file mode 100644 index 0000000000..fe17397e22 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/resource.h @@ -0,0 +1,67 @@ +//{{NO_DEPENDENCIES}} +// Microsoft Visual C++ generated include file. +// Used by es_odbc.rc +// +#define IDS_BADDSN 1 +#define IDS_MSGTITLE 2 +#define IDOK2 3 +#define IDC_TEST 4 +#define IDC_PASSWORD_STATIC 4 +#define IDC_SSL_STATIC 4 +#define IDC_HOST_VER_STATIC 5 +#define IDC_DSNAME 400 +#define IDC_DSNAMETEXT 401 +#define IDC_DESC 404 +#define IDC_FETCH_SIZE_STATIC 404 +#define IDC_SERVER 407 +#define IDC_NOTICE_USER 414 +#define IDS_AUTHTYPE_NONE 417 +#define IDS_AUTHTYPE_BASIC 418 +#define IDS_AUTHTYPE_IAM 419 +#define IDS_LOGTYPE_OFF 420 +#define IDS_LOGTYPE_FATAL 421 +#define IDS_LOGTYPE_ERROR 422 +#define IDS_LOGTYPE_WARNING 423 +#define IDS_LOGTYPE_INFO 424 +#define IDS_LOGTYPE_DEBUG 425 +#define IDS_LOGTYPE_TRACE 426 +#define IDS_LOGTYPE_ALL 427 +#define DLG_CONFIG 1001 +#define IDC_PORT 1002 +#define IDC_USER 1006 +#define IDC_PASSWORD 1009 +#define IDC_MANAGEDSN 1077 +#define IDC_EDIT1 1112 +#define IDC_CONNTIMEOUT_STATIC 1112 +#define IDC_CHECK1 1113 +#define IDC_CHECK2 1114 +#define IDC_USESSL 1114 +#define IDC_COMBO1 1115 +#define IDC_AUTHTYPE 1115 +#define IDC_HOST_VER 1115 +#define IDC_USERNAME_STATIC 1116 +#define IDC_REGION 1121 +#define IDC_REGION_STATIC 1122 +#define IDC_AUTH_STATIC 1123 +#define ID_ADVANCED_OPTIONS 1124 +#define ID_LOG_OPTIONS 1125 +#define IDC_DRIVER_VERSION 1126 +#define IDC_AUTH_SETTINGS 1127 +#define IDC_CONN_SETTINGS 1128 +#define DLG_ADVANCED_OPTIONS 1129 +#define IDC_CONNTIMEOUT 1130 +#define DLG_LOG_OPTIONS 1131 +#define IDC_FETCH_SIZE 1131 +#define IDC_LOG_LEVEL 1132 +#define IDC_LOG_PATH 1133 + +// Next default values for new objects +// +#ifdef APSTUDIO_INVOKED +#ifndef APSTUDIO_READONLY_SYMBOLS +#define _APS_NEXT_RESOURCE_VALUE 113 +#define _APS_NEXT_COMMAND_VALUE 40001 +#define _APS_NEXT_CONTROL_VALUE 1135 +#define _APS_NEXT_SYMED_VALUE 101 +#endif +#endif diff --git a/sql-odbc/src/odfesqlodbc/results.c b/sql-odbc/src/odfesqlodbc/results.c new file mode 100644 index 0000000000..bc37adae14 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/results.c @@ -0,0 +1,1671 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#include +#include +#include + +#include "bind.h" +#include "convert.h" +#include "dlg_specific.h" +#include "environ.h" +#include "es_apifunc.h" +#include "es_connection.h" +#include "es_odbc.h" +#include "es_types.h" +#include "misc.h" +#include "qresult.h" +#include "statement.h" +#include "es_statement.h" + +/* Helper macro */ +#define getEffectiveOid(conn, fi) \ + es_true_type((conn), (fi)->columntype, FI_type(fi)) +#define NULL_IF_NULL(a) ((a) ? ((const char *)(a)) : "(null)") + +RETCODE SQL_API ESAPI_RowCount(HSTMT hstmt, SQLLEN *pcrow) { + CSTR func = "ESAPI_RowCount"; + StatementClass *stmt = (StatementClass *)hstmt; + QResultClass *res; + + MYLOG(ES_TRACE, "entering...\n"); + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + res = SC_get_Curres(stmt); + if (res) { + if (stmt->status != STMT_FINISHED) { + SC_set_error( + stmt, STMT_SEQUENCE_ERROR, + "Can't get row count while statement is still executing.", + func); + return SQL_ERROR; + } + } + + // Row count is not supported by this driver, so we will always report -1, + // as defined by the ODBC API for SQLRowCount. + *pcrow = -1; + + return SQL_SUCCESS; +} + +/* + * This returns the number of columns associated with the database + * attached to "hstmt". + */ +RETCODE SQL_API ESAPI_NumResultCols(HSTMT hstmt, SQLSMALLINT *pccol) { + CSTR func = "ESAPI_NumResultCols"; + StatementClass *stmt = (StatementClass *)hstmt; + QResultClass *result; + RETCODE ret = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering...\n"); + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + SC_clear_error(stmt); +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + if (stmt->proc_return > 0) { + *pccol = 0; + goto cleanup; + } + + result = SC_get_Curres(stmt); + *pccol = QR_NumPublicResultCols(result); + +cleanup: +#undef return + return ret; +} + +#define USE_FI(fi, unknown) (fi && UNKNOWNS_AS_LONGEST != unknown) + +/* + * Return information about the database column the user wants + * information about. + */ +RETCODE SQL_API ESAPI_DescribeCol(HSTMT hstmt, SQLUSMALLINT icol, + SQLCHAR *szColName, SQLSMALLINT cbColNameMax, + SQLSMALLINT *pcbColName, + SQLSMALLINT *pfSqlType, SQLULEN *pcbColDef, + SQLSMALLINT *pibScale, + SQLSMALLINT *pfNullable) { + CSTR func = "ESAPI_DescribeCol"; + + /* gets all the information about a specific column */ + StatementClass *stmt = (StatementClass *)hstmt; + ConnectionClass *conn; + IRDFields *irdflds; + QResultClass *res = NULL; + char *col_name = NULL; + OID fieldtype = 0; + SQLLEN column_size = 0; + int unknown_sizes; + SQLINTEGER decimal_digits = 0; + ConnInfo *ci; + FIELD_INFO *fi; + char buf[255]; + int len = 0; + RETCODE result = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering.%d..\n", icol); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + conn = SC_get_conn(stmt); + ci = &(conn->connInfo); + unknown_sizes = DEFAULT_UNKNOWNSIZES; + + SC_clear_error(stmt); + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + irdflds = SC_get_IRDF(stmt); + if (0 == icol) /* bookmark column */ + { + SQLSMALLINT fType = stmt->options.use_bookmarks == SQL_UB_VARIABLE + ? SQL_BINARY + : SQL_INTEGER; + + MYLOG(ES_ALL, "answering bookmark info\n"); + if (szColName && cbColNameMax > 0) + *szColName = '\0'; + if (pcbColName) + *pcbColName = 0; + if (pfSqlType) + *pfSqlType = fType; + if (pcbColDef) + *pcbColDef = 10; + if (pibScale) + *pibScale = 0; + if (pfNullable) + *pfNullable = SQL_NO_NULLS; + result = SQL_SUCCESS; + goto cleanup; + } + + /* + * Dont check for bookmark column. This is the responsibility of the + * driver manager. + */ + + icol--; /* use zero based column numbers */ + + fi = NULL; + if (icol < irdflds->nfields && irdflds->fi) + fi = irdflds->fi[icol]; + + if (!FI_is_applicable(fi)) { + fi = NULL; + + res = SC_get_Curres(stmt); + if (icol >= QR_NumPublicResultCols(res)) { + SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, + "Invalid column number in DescribeCol.", func); + SPRINTF_FIXED(buf, "Col#=%d, #Cols=%d,%d keys=%d", icol, + QR_NumResultCols(res), QR_NumPublicResultCols(res), + res->num_key_fields); + SC_log_error(func, buf, stmt); + result = SQL_ERROR; + goto cleanup; + } + if (icol < irdflds->nfields && irdflds->fi) + fi = irdflds->fi[icol]; + } + res = SC_get_Curres(stmt); +#ifdef SUPPRESS_LONGEST_ON_CURSORS + if (UNKNOWNS_AS_LONGEST == unknown_sizes) { + if (QR_once_reached_eof(res)) + unknown_sizes = UNKNOWNS_AS_LONGEST; + else + unknown_sizes = UNKNOWNS_AS_MAX; + } +#endif /* SUPPRESS_LONGEST_ON_CURSORS */ + /* handle constants */ + if (res && -2 == QR_get_fieldsize(res, icol)) + unknown_sizes = UNKNOWNS_AS_LONGEST; + + if (FI_is_applicable(fi)) { + fieldtype = getEffectiveOid(conn, fi); + if (NAME_IS_VALID(fi->column_alias)) + col_name = GET_NAME(fi->column_alias); + else + col_name = GET_NAME(fi->column_name); + if (USE_FI(fi, unknown_sizes)) { + column_size = fi->column_size; + decimal_digits = fi->decimal_digits; + } else { + column_size = + estype_column_size(stmt, fieldtype, icol, unknown_sizes); + decimal_digits = estype_decimal_digits(stmt, fieldtype, icol); + } + + MYLOG(ES_DEBUG, + "PARSE: fieldtype=%u, col_name='%s', column_size=" FORMAT_LEN + "\n", + fieldtype, NULL_IF_NULL(col_name), column_size); + } else { + col_name = QR_get_fieldname(res, icol); + fieldtype = QR_get_field_type(res, icol); + + column_size = estype_column_size(stmt, fieldtype, icol, unknown_sizes); + decimal_digits = estype_decimal_digits(stmt, fieldtype, icol); + } + + MYLOG(ES_DEBUG, "col %d fieldname = '%s'\n", icol, NULL_IF_NULL(col_name)); + MYLOG(ES_DEBUG, "col %d fieldtype = %d\n", icol, fieldtype); + MYLOG(ES_DEBUG, "col %d column_size = " FORMAT_LEN "\n", icol, column_size); + + result = SQL_SUCCESS; + + /* + * COLUMN NAME + */ + len = col_name ? (int)strlen(col_name) : 0; + + if (pcbColName) + *pcbColName = (SQLSMALLINT)len; + + if (szColName && cbColNameMax > 0) { + if (NULL != col_name) + strncpy_null((char *)szColName, col_name, cbColNameMax); + else + szColName[0] = '\0'; + + if (len >= cbColNameMax) { + result = SQL_SUCCESS_WITH_INFO; + SC_set_error(stmt, STMT_TRUNCATED, + "The buffer was too small for the colName.", func); + } + } + + /* + * CONCISE(SQL) TYPE + */ + if (pfSqlType) { + *pfSqlType = + estype_to_concise_type(stmt, fieldtype, icol, unknown_sizes); + + MYLOG(ES_DEBUG, "col %d *pfSqlType = %d\n", icol, *pfSqlType); + } + + /* + * COLUMN SIZE(PRECISION in 2.x) + */ + if (pcbColDef) { + if (column_size < 0) + column_size = 0; /* "I dont know" */ + + *pcbColDef = column_size; + + MYLOG(ES_DEBUG, "Col: col %d *pcbColDef = " FORMAT_ULEN "\n", icol, + *pcbColDef); + } + + /* + * DECIMAL DIGITS(SCALE in 2.x) + */ + if (pibScale) { + if (decimal_digits < 0) + decimal_digits = 0; + + *pibScale = (SQLSMALLINT)decimal_digits; + MYLOG(ES_DEBUG, "col %d *pibScale = %d\n", icol, *pibScale); + } + + /* + * NULLABILITY + */ + if (pfNullable) { + if (SC_has_outer_join(stmt)) + *pfNullable = TRUE; + else + *pfNullable = fi ? fi->nullable : estype_nullable(conn, fieldtype); + + MYLOG(ES_DEBUG, "col %d *pfNullable = %d\n", icol, *pfNullable); + } + +cleanup: +#undef return + return result; +} + +/* Returns result column descriptor information for a result set. */ +RETCODE SQL_API ESAPI_ColAttributes(HSTMT hstmt, SQLUSMALLINT icol, + SQLUSMALLINT fDescType, PTR rgbDesc, + SQLSMALLINT cbDescMax, SQLSMALLINT *pcbDesc, + SQLLEN *pfDesc) { + CSTR func = "ESAPI_ColAttributes"; + StatementClass *stmt = (StatementClass *)hstmt; + IRDFields *irdflds; + OID field_type = 0; + Int2 col_idx; + ConnectionClass *conn; + ConnInfo *ci; + int column_size, unknown_sizes; + int cols = 0; + RETCODE result; + const char *p = NULL; + SQLLEN value = 0; + const FIELD_INFO *fi = NULL; + const TABLE_INFO *ti = NULL; + QResultClass *res; + BOOL stmt_updatable; + + MYLOG(ES_TRACE, "entering..col=%d %d len=%d.\n", icol, fDescType, + cbDescMax); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + stmt_updatable = SC_is_updatable(stmt) + /* The following doesn't seem appropriate for client side cursors + && stmt->options.scroll_concurrency != SQL_CONCUR_READ_ONLY + */ + ; + + if (pcbDesc) + *pcbDesc = 0; + irdflds = SC_get_IRDF(stmt); + conn = SC_get_conn(stmt); + ci = &(conn->connInfo); + + /* + * Dont check for bookmark column. This is the responsibility of the + * driver manager. For certain types of arguments, the column number + * is ignored anyway, so it may be 0. + */ + + res = SC_get_Curres(stmt); + if (0 == icol && SQL_DESC_COUNT != fDescType) /* bookmark column */ + { + MYLOG(ES_ALL, "answering bookmark info\n"); + switch (fDescType) { + case SQL_DESC_OCTET_LENGTH: + if (pfDesc) + *pfDesc = 4; + break; + case SQL_DESC_TYPE: + if (pfDesc) + *pfDesc = stmt->options.use_bookmarks == SQL_UB_VARIABLE + ? SQL_BINARY + : SQL_INTEGER; + break; + } + return SQL_SUCCESS; + } + + col_idx = icol - 1; + + unknown_sizes = DEFAULT_UNKNOWNSIZES; + + /* not appropriate for SQLColAttributes() */ + if (stmt->catalog_result) + unknown_sizes = UNKNOWNS_AS_LONGEST; + else if (unknown_sizes == UNKNOWNS_AS_DONTKNOW) + unknown_sizes = UNKNOWNS_AS_MAX; + + if (!stmt->catalog_result && SC_is_parse_forced(stmt) + && SC_can_parse_statement(stmt)) { + cols = irdflds->nfields; + + /* + * Column Count is a special case. The Column number is ignored + * in this case. + */ + if (fDescType == SQL_DESC_COUNT) { + if (pfDesc) + *pfDesc = cols; + + return SQL_SUCCESS; + } + + if (SC_parsed_status(stmt) != STMT_PARSE_FATAL && irdflds->fi) { + if (col_idx >= cols) { + SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, + "Invalid column number in ColAttributes.", func); + return SQL_ERROR; + } + } + } + + if ((unsigned int)col_idx < irdflds->nfields && irdflds->fi) + fi = irdflds->fi[col_idx]; + if (FI_is_applicable(fi)) + field_type = getEffectiveOid(conn, fi); + else { + BOOL build_fi = FALSE; + + fi = NULL; + switch (fDescType) { + case SQL_COLUMN_OWNER_NAME: + case SQL_COLUMN_TABLE_NAME: + case SQL_COLUMN_TYPE: + case SQL_COLUMN_TYPE_NAME: + case SQL_COLUMN_AUTO_INCREMENT: + case SQL_DESC_NULLABLE: + case SQL_DESC_BASE_TABLE_NAME: + case SQL_DESC_BASE_COLUMN_NAME: + case SQL_COLUMN_UPDATABLE: + case 1212: /* SQL_CA_SS_COLUMN_KEY ? */ + build_fi = TRUE; + break; + } + + res = SC_get_Curres(stmt); + cols = QR_NumPublicResultCols(res); + + /* + * Column Count is a special case. The Column number is ignored + * in this case. + */ + if (fDescType == SQL_DESC_COUNT) { + if (pfDesc) + *pfDesc = cols; + + return SQL_SUCCESS; + } + + if (col_idx >= cols) { + SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, + "Invalid column number in ColAttributes.", func); + return SQL_ERROR; + } + + field_type = QR_get_field_type(res, col_idx); + if ((unsigned int)col_idx < irdflds->nfields && irdflds->fi) + fi = irdflds->fi[col_idx]; + } + if (FI_is_applicable(fi)) { + ti = fi->ti; + field_type = getEffectiveOid(conn, fi); + } + + MYLOG(ES_DEBUG, "col %d field_type=%d fi,ti=%p,%p\n", col_idx, field_type, + fi, ti); + +#ifdef SUPPRESS_LONGEST_ON_CURSORS + if (UNKNOWNS_AS_LONGEST == unknown_sizes) { + if (QR_once_reached_eof(res)) + unknown_sizes = UNKNOWNS_AS_LONGEST; + else + unknown_sizes = UNKNOWNS_AS_MAX; + } +#endif /* SUPPRESS_LONGEST_ON_CURSORS */ + /* handle constants */ + if (res && -2 == QR_get_fieldsize(res, col_idx)) + unknown_sizes = UNKNOWNS_AS_LONGEST; + + column_size = + (USE_FI(fi, unknown_sizes) && fi->column_size > 0) + ? fi->column_size + : estype_column_size(stmt, field_type, col_idx, unknown_sizes); + switch (fDescType) { + case SQL_COLUMN_AUTO_INCREMENT: /* == SQL_DESC_AUTO_UNIQUE_VALUE */ + if (fi && fi->auto_increment) + value = TRUE; + else + value = estype_auto_increment(conn, field_type); + if (value == -1) /* non-numeric becomes FALSE (ODBC Doc) */ + value = FALSE; + MYLOG(ES_DEBUG, "AUTO_INCREMENT=" FORMAT_LEN "\n", value); + + break; + + case SQL_COLUMN_CASE_SENSITIVE: /* == SQL_DESC_CASE_SENSITIVE */ + value = estype_case_sensitive(conn, field_type); + break; + + /* + * This special case is handled above. + * + * case SQL_COLUMN_COUNT: + */ + case SQL_COLUMN_DISPLAY_SIZE: /* == SQL_DESC_DISPLAY_SIZE */ + value = (USE_FI(fi, unknown_sizes) && 0 != fi->display_size) + ? fi->display_size + : estype_display_size(stmt, field_type, col_idx, + unknown_sizes); + + MYLOG(ES_DEBUG, "col %d, display_size= " FORMAT_LEN "\n", col_idx, + value); + + break; + + case SQL_COLUMN_LABEL: /* == SQL_DESC_LABEL */ + if (fi && (NAME_IS_VALID(fi->column_alias))) { + p = GET_NAME(fi->column_alias); + + MYLOG(ES_DEBUG, "COLUMN_LABEL = '%s'\n", p); + break; + } + /* otherwise same as column name -- FALL THROUGH!!! */ + + case SQL_DESC_NAME: + MYLOG(ES_ALL, "fi=%p (alias, name)=", fi); + if (fi) + MYPRINTF(ES_DEBUG, "(%s,%s)\n", PRINT_NAME(fi->column_alias), + PRINT_NAME(fi->column_name)); + else + MYPRINTF(ES_DEBUG, "NULL\n"); + p = fi ? (NAME_IS_NULL(fi->column_alias) + ? SAFE_NAME(fi->column_name) + : GET_NAME(fi->column_alias)) + : QR_get_fieldname(res, col_idx); + + MYLOG(ES_DEBUG, "COLUMN_NAME = '%s'\n", p); + break; + + case SQL_COLUMN_LENGTH: + value = (USE_FI(fi, unknown_sizes) && fi->length > 0) + ? fi->length + : estype_buffer_length(stmt, field_type, col_idx, + unknown_sizes); + if (0 > value) + /* if (-1 == value) I'm not sure which is right */ + value = 0; + + MYLOG(ES_DEBUG, "col %d, column_length = " FORMAT_LEN "\n", col_idx, + value); + break; + + case SQL_COLUMN_MONEY: /* == SQL_DESC_FIXED_PREC_SCALE */ + value = estype_money(conn, field_type); + MYLOG(ES_ALL, "COLUMN_MONEY=" FORMAT_LEN "\n", value); + break; + + case SQL_DESC_NULLABLE: + if (SC_has_outer_join(stmt)) + value = TRUE; + else + value = fi ? fi->nullable : estype_nullable(conn, field_type); + MYLOG(ES_ALL, "COLUMN_NULLABLE=" FORMAT_LEN "\n", value); + break; + + case SQL_COLUMN_OWNER_NAME: /* == SQL_DESC_SCHEMA_NAME */ + p = ti ? SAFE_NAME(ti->schema_name) : NULL_STRING; + MYLOG(ES_DEBUG, "SCHEMA_NAME = '%s'\n", p); + break; + + case SQL_COLUMN_PRECISION: /* in 2.x */ + value = column_size; + if (value < 0) + value = 0; + + MYLOG(ES_DEBUG, "col %d, column_size = " FORMAT_LEN "\n", col_idx, + value); + break; + + case SQL_COLUMN_QUALIFIER_NAME: /* == SQL_DESC_CATALOG_NAME */ + p = ti ? CurrCatString(conn) + : NULL_STRING; /* empty string means *not supported* */ + break; + + case SQL_COLUMN_SCALE: /* in 2.x */ + value = estype_decimal_digits(stmt, field_type, col_idx); + MYLOG(ES_ALL, "COLUMN_SCALE=" FORMAT_LEN "\n", value); + if (value < 0) + value = 0; + break; + + case SQL_COLUMN_SEARCHABLE: /* == SQL_DESC_SEARCHABLE */ + value = estype_searchable(conn, field_type); + break; + + case SQL_COLUMN_TABLE_NAME: /* == SQL_DESC_TABLE_NAME */ + p = ti ? SAFE_NAME(ti->table_name) : NULL_STRING; + + MYLOG(ES_DEBUG, "TABLE_NAME = '%s'\n", p); + break; + + case SQL_COLUMN_TYPE: /* == SQL_DESC_CONCISE_TYPE */ + value = estype_to_concise_type(stmt, field_type, col_idx, + unknown_sizes); + MYLOG(ES_DEBUG, "COLUMN_TYPE=" FORMAT_LEN "\n", value); + break; + + case SQL_COLUMN_TYPE_NAME: /* == SQL_DESC_TYPE_NAME */ + p = estype_to_name(stmt, field_type, col_idx, + fi && fi->auto_increment); + break; + + case SQL_COLUMN_UNSIGNED: /* == SQL_DESC_UNSINGED */ + value = estype_unsigned(conn, field_type); + if (value == -1) /* non-numeric becomes TRUE (ODBC Doc) */ + value = SQL_TRUE; + + break; + + case SQL_COLUMN_UPDATABLE: /* == SQL_DESC_UPDATABLE */ + + /* + * Neither Access or Borland care about this. + * + * if (field_type == ES_TYPE_OID) pfDesc = SQL_ATTR_READONLY; + * else + */ + if (!stmt_updatable) + value = SQL_ATTR_READONLY; + else + value = + fi ? (fi->updatable ? SQL_ATTR_WRITE : SQL_ATTR_READONLY) + : (QR_get_attid(res, col_idx) > 0 ? SQL_ATTR_WRITE + : SQL_ATTR_READONLY); + if (SQL_ATTR_READONLY != value) { + const char *name = fi ? SAFE_NAME(fi->column_name) + : QR_get_fieldname(res, col_idx); + if (stricmp(name, OID_NAME) == 0 || stricmp(name, "ctid") == 0 + || stricmp(name, XMIN_NAME) == 0) + value = SQL_ATTR_READONLY; + else if (conn->ms_jet && fi && fi->auto_increment) + value = SQL_ATTR_READONLY; + } + + MYLOG(ES_DEBUG, "%s: UPDATEABLE = " FORMAT_LEN "\n", func, value); + break; + case SQL_DESC_BASE_COLUMN_NAME: + + p = fi ? SAFE_NAME(fi->column_name) + : QR_get_fieldname(res, col_idx); + + MYLOG(ES_DEBUG, "BASE_COLUMN_NAME = '%s'\n", p); + break; + case SQL_DESC_BASE_TABLE_NAME: /* the same as TABLE_NAME ok ? */ + p = ti ? SAFE_NAME(ti->table_name) : NULL_STRING; + + MYLOG(ES_DEBUG, "BASE_TABLE_NAME = '%s'\n", p); + break; + case SQL_DESC_LENGTH: /* different from SQL_COLUMN_LENGTH */ + value = (fi && column_size > 0) + ? column_size + : estype_desclength(stmt, field_type, col_idx, + unknown_sizes); + if (-1 == value) + value = 0; + + MYLOG(ES_DEBUG, "col %d, desc_length = " FORMAT_LEN "\n", col_idx, + value); + break; + case SQL_DESC_OCTET_LENGTH: + value = (USE_FI(fi, unknown_sizes) && fi->length > 0) + ? fi->length + : estype_attr_transfer_octet_length( + conn, field_type, column_size, unknown_sizes); + if (-1 == value) + value = 0; + MYLOG(ES_DEBUG, "col %d, octet_length = " FORMAT_LEN "\n", col_idx, + value); + break; + case SQL_DESC_PRECISION: /* different from SQL_COLUMN_PRECISION */ + if (value = FI_precision(fi), value <= 0) + value = + estype_precision(stmt, field_type, col_idx, unknown_sizes); + if (value < 0) + value = 0; + + MYLOG(ES_DEBUG, "col %d, desc_precision = " FORMAT_LEN "\n", + col_idx, value); + break; + case SQL_DESC_SCALE: /* different from SQL_COLUMN_SCALE */ + value = estype_scale(stmt, field_type, col_idx); + if (value < 0) + value = 0; + break; + case SQL_DESC_LOCAL_TYPE_NAME: + p = estype_to_name(stmt, field_type, col_idx, + fi && fi->auto_increment); + break; + case SQL_DESC_TYPE: + value = + estype_to_sqldesctype(stmt, field_type, col_idx, unknown_sizes); + break; + case SQL_DESC_NUM_PREC_RADIX: + value = estype_radix(conn, field_type); + break; + case SQL_DESC_LITERAL_PREFIX: + p = estype_literal_prefix(conn, field_type); + break; + case SQL_DESC_LITERAL_SUFFIX: + p = estype_literal_suffix(conn, field_type); + break; + case SQL_DESC_UNNAMED: + value = (fi && NAME_IS_NULL(fi->column_name) + && NAME_IS_NULL(fi->column_alias)) + ? SQL_UNNAMED + : SQL_NAMED; + break; + case 1211: /* SQL_CA_SS_COLUMN_HIDDEN ? */ + value = 0; + break; + case 1212: /* SQL_CA_SS_COLUMN_KEY ? */ + SC_set_error(stmt, STMT_OPTION_NOT_FOR_THE_DRIVER, + "this request may be for MS SQL Server", func); + return SQL_ERROR; + default: + SC_set_error(stmt, STMT_INVALID_OPTION_IDENTIFIER, + "ColAttribute for this type not implemented yet", + func); + return SQL_ERROR; + } + + result = SQL_SUCCESS; + + if (p) { /* char/binary data */ + size_t len = strlen(p); + + if (rgbDesc) { + strncpy_null((char *)rgbDesc, p, (size_t)cbDescMax); + + if (len >= (size_t)cbDescMax) { + result = SQL_SUCCESS_WITH_INFO; + SC_set_error(stmt, STMT_TRUNCATED, + "The buffer was too small for the rgbDesc.", func); + } + } + + if (pcbDesc) + *pcbDesc = (SQLSMALLINT)len; + } else { + /* numeric data */ + if (pfDesc) + *pfDesc = value; + } + + return result; +} + +/* Returns result data for a single column in the current row. */ +RETCODE SQL_API ESAPI_GetData(HSTMT hstmt, SQLUSMALLINT icol, + SQLSMALLINT fCType, PTR rgbValue, + SQLLEN cbValueMax, SQLLEN *pcbValue) { + CSTR func = "ESAPI_GetData"; + QResultClass *res; + StatementClass *stmt = (StatementClass *)hstmt; + UInt2 num_cols; + SQLLEN num_rows; + OID field_type; + int atttypmod; + void *value = NULL; + RETCODE result = SQL_SUCCESS; + char get_bookmark = FALSE; + SQLSMALLINT target_type; + int precision = -1; +#ifdef WITH_UNIXODBC + SQLCHAR dum_rgb[2] = "\0\0"; +#endif /* WITH_UNIXODBC */ + + MYLOG(ES_TRACE, "entering stmt=%p icol=%d\n", stmt, icol); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + res = SC_get_Curres(stmt); + + if (STMT_EXECUTING == stmt->status) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Can't get data while statement is still executing.", + func); + return SQL_ERROR; + } + + if (stmt->status != STMT_FINISHED) { + SC_set_error(stmt, STMT_STATUS_ERROR, + "GetData can only be called after the successful " + "execution on a SQL statement", + func); + return SQL_ERROR; + } + +#ifdef WITH_UNIXODBC + if (NULL == rgbValue) /* unixODBC allows rgbValue is NULL? */ + { + cbValueMax = 0; + rgbValue = dum_rgb; /* to avoid a crash */ + } +#endif /* WITH_UNIXODBC */ + if (SQL_ARD_TYPE == fCType) { + ARDFields *opts; + BindInfoClass *binfo = NULL; + + opts = SC_get_ARDF(stmt); + if (0 == icol) + binfo = opts->bookmark; + else if (icol <= opts->allocated && opts->bindings) + binfo = &opts->bindings[icol - 1]; + if (binfo) { + target_type = binfo->returntype; + MYLOG(ES_DEBUG, "SQL_ARD_TYPE=%d\n", target_type); + precision = binfo->precision; + } else { + SC_set_error(stmt, STMT_STATUS_ERROR, + "GetData can't determine the type via ARD", func); + return SQL_ERROR; + } + } else + target_type = fCType; + if (icol == 0) { + if (stmt->options.use_bookmarks == SQL_UB_OFF) { + SC_set_error( + stmt, STMT_COLNUM_ERROR, + "Attempt to retrieve bookmark with bookmark usage disabled", + func); + return SQL_ERROR; + } + + /* Make sure it is the bookmark data type */ + switch (target_type) { + case SQL_C_BOOKMARK: + case SQL_C_VARBOOKMARK: + break; + default: + MYLOG( + ES_ALL, + "GetData Column 0 is type %d not of type SQL_C_BOOKMARK\n", + target_type); + SC_set_error(stmt, STMT_PROGRAM_TYPE_OUT_OF_RANGE, + "Column 0 is not of type SQL_C_BOOKMARK", func); + return SQL_ERROR; + } + + get_bookmark = TRUE; + } else { + /* use zero-based column numbers */ + icol--; + + /* make sure the column number is valid */ + num_cols = QR_NumPublicResultCols(res); + if (icol >= num_cols) { + SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, + "Invalid column number.", func); + return SQL_ERROR; + } + } + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + if (!SC_is_fetchcursor(stmt)) { + /* make sure we're positioned on a valid row */ + num_rows = QR_get_num_total_tuples(res); + if ((stmt->currTuple < 0) || (stmt->currTuple >= num_rows)) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Not positioned on a valid row for GetData.", func); + result = SQL_ERROR; + goto cleanup; + } + MYLOG(ES_DEBUG, " num_rows = " FORMAT_LEN "\n", num_rows); + + if (!get_bookmark) { + SQLLEN curt = GIdx2CacheIdx(stmt->currTuple, stmt, res); + value = QR_get_value_backend_row(res, curt, icol); + MYLOG(ES_DEBUG, + "currT=" FORMAT_LEN " base=" FORMAT_LEN " rowset=" FORMAT_LEN + "\n", + stmt->currTuple, QR_get_rowstart_in_cache(res), + SC_get_rowset_start(stmt)); + MYLOG(ES_DEBUG, " value = '%s'\n", NULL_IF_NULL(value)); + } + } else { + /* it's a SOCKET result (backend data) */ + if (stmt->currTuple == -1 || !res || !res->tupleField) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Not positioned on a valid row for GetData.", func); + result = SQL_ERROR; + goto cleanup; + } + + if (!get_bookmark) { + /** value = QR_get_value_backend(res, icol); maybe thiw doesn't work + */ + SQLLEN curt = GIdx2CacheIdx(stmt->currTuple, stmt, res); + value = QR_get_value_backend_row(res, curt, icol); + } + MYLOG(ES_DEBUG, " socket: value = '%s'\n", NULL_IF_NULL(value)); + } + + if (get_bookmark) { + BOOL contents_get = FALSE; + + if (rgbValue) { + if (SQL_C_BOOKMARK == target_type + || (SQLLEN)sizeof(UInt4) <= cbValueMax) { + Int4 bookmark = (int)SC_make_int4_bookmark(stmt->currTuple); + contents_get = TRUE; + memcpy(rgbValue, &bookmark, sizeof(bookmark)); + } + } + if (pcbValue) + *pcbValue = sizeof(Int4); + + if (contents_get) + result = SQL_SUCCESS; + else { + SC_set_error(stmt, STMT_TRUNCATED, + "The buffer was too small for the GetData.", func); + result = SQL_SUCCESS_WITH_INFO; + } + goto cleanup; + } + + field_type = QR_get_field_type(res, icol); + atttypmod = QR_get_atttypmod(res, icol); + + MYLOG(ES_DEBUG, + "**** icol = %d, target_type = %d, field_type = %d, value = '%s'\n", + icol, target_type, field_type, NULL_IF_NULL(value)); + + SC_set_current_col(stmt, icol); + + result = (RETCODE)copy_and_convert_field(stmt, field_type, atttypmod, value, + target_type, precision, rgbValue, + cbValueMax, pcbValue, pcbValue); + + switch (result) { + case COPY_OK: + result = SQL_SUCCESS; + break; + + case COPY_UNSUPPORTED_TYPE: + SC_set_error(stmt, STMT_RESTRICTED_DATA_TYPE_ERROR, + "Received an unsupported type from Elasticsearch.", + func); + result = SQL_ERROR; + break; + + case COPY_UNSUPPORTED_CONVERSION: + SC_set_error(stmt, STMT_RESTRICTED_DATA_TYPE_ERROR, + "Couldn't handle the necessary data type conversion.", + func); + result = SQL_ERROR; + break; + + case COPY_RESULT_TRUNCATED: + SC_set_error(stmt, STMT_TRUNCATED, + "The buffer was too small for the GetData.", func); + result = SQL_SUCCESS_WITH_INFO; + break; + + case COPY_INVALID_STRING_CONVERSION: /* invalid string */ + SC_set_error(stmt, STMT_STRING_CONVERSION_ERROR, + "invalid string conversion occured.", func); + result = SQL_ERROR; + break; + + case COPY_GENERAL_ERROR: /* error msg already filled in */ + result = SQL_ERROR; + break; + + case COPY_NO_DATA_FOUND: + /* SC_log_error(func, "no data found", stmt); */ + result = SQL_NO_DATA_FOUND; + break; + + default: + SC_set_error( + stmt, STMT_INTERNAL_ERROR, + "Unrecognized return value from copy_and_convert_field.", func); + result = SQL_ERROR; + break; + } + +cleanup: +#undef return + MYLOG(ES_TRACE, "leaving %d\n", result); + return result; +} + +/* + * Returns data for bound columns in the current row ("hstmt->iCursor"), + * advances the cursor. + */ +RETCODE SQL_API ESAPI_Fetch(HSTMT hstmt) { + CSTR func = "ESAPI_Fetch"; + StatementClass *stmt = (StatementClass *)hstmt; + ARDFields *opts; + QResultClass *res; + BindInfoClass *bookmark; + RETCODE retval = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering stmt = %p, stmt->result= %p\n", stmt, + stmt ? SC_get_Curres(stmt) : NULL); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + SC_clear_error(stmt); + + if (!(res = SC_get_Curres(stmt), res)) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Null statement result in ESAPI_Fetch.", func); + return SQL_ERROR; + } + + /* Not allowed to bind a bookmark column when using SQLFetch. */ + opts = SC_get_ARDF(stmt); + if ((bookmark = opts->bookmark, bookmark) && bookmark->buffer) { + SC_set_error( + stmt, STMT_COLNUM_ERROR, + "Not allowed to bind a bookmark column when using ESAPI_Fetch", + func); + return SQL_ERROR; + } + + if (stmt->status == STMT_EXECUTING) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Can't fetch while statement is still executing.", func); + return SQL_ERROR; + } + + if (stmt->status != STMT_FINISHED) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Fetch can only be called after the successful execution " + "on a SQL statement", + func); + return SQL_ERROR; + } + + if (opts->bindings == NULL) { + if (!SC_may_fetch_rows(stmt)) + return SQL_NO_DATA_FOUND; + /* just to avoid a crash if the user insists on calling this */ + /* function even if SQL_ExecDirect has reported an Error */ + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Bindings were not allocated properly.", func); + return SQL_ERROR; + } + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + if (stmt->rowset_start < 0) + SC_set_rowset_start(stmt, 0, TRUE); + QR_set_reqsize(res, 1); + /* QR_inc_rowstart_in_cache(res, stmt->last_fetch_count_include_ommitted); + */ + SC_inc_rowset_start(stmt, stmt->last_fetch_count_include_ommitted); + + retval = SC_fetch(stmt); +#undef return + return retval; +} + +SQLLEN +getNthValid(const QResultClass *res, SQLLEN sta, UWORD orientation, SQLULEN nth, + SQLLEN *nearest) { + SQLLEN i, num_tuples = QR_get_num_total_tuples(res), nearp; + SQLULEN count; + KeySet *keyset; + + if (!QR_once_reached_eof(res)) + num_tuples = INT_MAX; + /* Note that the parameter nth is 1-based */ + MYLOG(ES_DEBUG, + "get " FORMAT_ULEN "th Valid data from " FORMAT_LEN " to %s [dlt=%d]", + nth, sta, orientation == SQL_FETCH_PRIOR ? "backward" : "forward", + res->dl_count); + if (0 == res->dl_count) { + MYPRINTF(ES_DEBUG, "\n"); + if (SQL_FETCH_PRIOR == orientation) { + if (sta + 1 >= (SQLLEN)nth) { + *nearest = sta + 1 - nth; + return nth; + } + *nearest = -1; + return -(SQLLEN)(sta + 1); + } else { + nearp = sta - 1 + nth; + if (nearp < num_tuples) { + *nearest = nearp; + return nth; + } + *nearest = num_tuples; + return -(SQLLEN)(num_tuples - sta); + } + } + count = 0; + if (QR_get_cursor(res)) { + SQLLEN *deleted = res->deleted; + SQLLEN delsta; + + if (SQL_FETCH_PRIOR == orientation) { + *nearest = sta + 1 - nth; + delsta = (-1); + MYPRINTF(ES_DEBUG, "deleted "); + for (i = res->dl_count - 1; i >= 0 && *nearest <= deleted[i]; i--) { + MYPRINTF(ES_DEBUG, "[" FORMAT_LEN "]=" FORMAT_LEN " ", i, + deleted[i]); + if (sta >= deleted[i]) { + (*nearest)--; + if (i > delsta) + delsta = i; + } + } + MYPRINTF(ES_DEBUG, "nearest=" FORMAT_LEN "\n", *nearest); + if (*nearest < 0) { + *nearest = -1; + count = sta - delsta; + } else + return nth; + } else { + MYPRINTF(ES_DEBUG, "\n"); + *nearest = sta - 1 + nth; + delsta = res->dl_count; + if (!QR_once_reached_eof(res)) + num_tuples = INT_MAX; + for (i = 0; i < res->dl_count && *nearest >= deleted[i]; i++) { + if (sta <= deleted[i]) { + (*nearest)++; + if (i < delsta) + delsta = i; + } + } + if (*nearest >= num_tuples) { + *nearest = num_tuples; + count = *nearest - sta + delsta - res->dl_count; + } else + return nth; + } + } else if (SQL_FETCH_PRIOR == orientation) { + for (i = sta, keyset = res->keyset + sta; i >= 0; i--, keyset--) { + if (0 + == (keyset->status + & (CURS_SELF_DELETING | CURS_SELF_DELETED + | CURS_OTHER_DELETED))) { + *nearest = i; + MYPRINTF(ES_DEBUG, " nearest=" FORMAT_LEN "\n", *nearest); + if (++count == nth) + return count; + } + } + *nearest = -1; + } else { + for (i = sta, keyset = res->keyset + sta; i < num_tuples; + i++, keyset++) { + if (0 + == (keyset->status + & (CURS_SELF_DELETING | CURS_SELF_DELETED + | CURS_OTHER_DELETED))) { + *nearest = i; + MYPRINTF(ES_DEBUG, " nearest=" FORMAT_LEN "\n", *nearest); + if (++count == nth) + return count; + } + } + *nearest = num_tuples; + } + MYPRINTF(ES_DEBUG, " nearest not found\n"); + return -(SQLLEN)count; +} + +/* + * return NO_DATA_FOUND macros + * save_rowset_start or num_tuples must be defined + */ +#define EXTFETCH_RETURN_BOF(stmt, res) \ + { \ + MYLOG(ES_ALL, "RETURN_BOF\n"); \ + SC_set_rowset_start(stmt, -1, TRUE); \ + stmt->currTuple = -1; \ + /* move_cursor_position_if_needed(stmt, res); */ \ + return SQL_NO_DATA_FOUND; \ + } +#define EXTFETCH_RETURN_EOF(stmt, res) \ + { \ + MYLOG(ES_ALL, "RETURN_EOF\n"); \ + SC_set_rowset_start(stmt, num_tuples, TRUE); \ + stmt->currTuple = -1; \ + /* move_cursor_position_if_needed(stmt, res); */ \ + return SQL_NO_DATA_FOUND; \ + } + +/* This fetchs a block of data (rowset). */ +RETCODE SQL_API ESAPI_ExtendedFetch(HSTMT hstmt, SQLUSMALLINT fFetchType, + SQLLEN irow, SQLULEN *pcrow, + SQLUSMALLINT *rgfRowStatus, + SQLLEN bookmark_offset, SQLLEN rowsetSize) { + UNUSED(bookmark_offset, irow); + CSTR func = "ESAPI_ExtendedFetch"; + StatementClass *stmt = (StatementClass *)hstmt; + ARDFields *opts; + QResultClass *res; + BindInfoClass *bookmark; + SQLLEN num_tuples, i, fc_io; + SQLLEN save_rowset_size, progress_size; + SQLLEN rowset_start, rowset_end = (-1); + RETCODE result = SQL_SUCCESS; + char truncated, error, should_set_rowset_start = FALSE; + SQLLEN currp; + UWORD pstatus; + BOOL currp_is_valid, reached_eof, useCursor; + SQLLEN reqsize = rowsetSize; + + MYLOG(ES_TRACE, "entering stmt=%p rowsetSize=" FORMAT_LEN "\n", stmt, + rowsetSize); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + /* if (SC_is_fetchcursor(stmt) && !stmt->manual_result) */ + if ((SQL_CURSOR_FORWARD_ONLY != stmt->options.cursor_type) + || (fFetchType != SQL_FETCH_NEXT)) { + SC_set_error(stmt, STMT_FETCH_OUT_OF_RANGE, + "Only SQL_CURSOR_FORWARD_ONLY with SQL_FETCH_NEXT " + "cursor's are supported.", + func); + return SQL_ERROR; + } + + SC_clear_error(stmt); + + if (!(res = SC_get_Curres(stmt), res)) { + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Null statement result in ESAPI_ExtendedFetch.", func); + return SQL_ERROR; + } + + opts = SC_get_ARDF(stmt); + /* + * If a bookmark column is bound but bookmark usage is off, then error. + */ + if ((bookmark = opts->bookmark, bookmark) && bookmark->buffer + && stmt->options.use_bookmarks == SQL_UB_OFF) { + SC_set_error( + stmt, STMT_COLNUM_ERROR, + "Attempt to retrieve bookmark with bookmark usage disabled", func); + return SQL_ERROR; + } + + if (stmt->status == STMT_EXECUTING) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Can't fetch while statement is still executing.", func); + return SQL_ERROR; + } + + if (stmt->status != STMT_FINISHED) { + SC_set_error(stmt, STMT_STATUS_ERROR, + "ExtendedFetch can only be called after the successful " + "execution on a SQL statement", + func); + return SQL_ERROR; + } + + if (opts->bindings == NULL) { + if (!SC_may_fetch_rows(stmt)) + return SQL_NO_DATA_FOUND; + /* just to avoid a crash if the user insists on calling this */ + /* function even if SQL_ExecDirect has reported an Error */ + SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, + "Bindings were not allocated properly.", func); + return SQL_ERROR; + } + + /* Initialize to no rows fetched */ + if (rgfRowStatus) + for (i = 0; i < rowsetSize; i++) + *(rgfRowStatus + i) = SQL_ROW_NOROW; + + if (pcrow) + *pcrow = 0; + + useCursor = (SC_is_fetchcursor(stmt) && NULL != QR_get_cursor(res)); + num_tuples = QR_get_num_total_tuples(res); + reached_eof = QR_once_reached_eof(res) && QR_get_cursor(res); + if (useCursor && !reached_eof) + num_tuples = INT_MAX; + + MYLOG(ES_ALL, "num_tuples=" FORMAT_LEN "\n", num_tuples); + /* Save and discard the saved rowset size */ + save_rowset_size = stmt->save_rowset_size; + stmt->save_rowset_size = -1; + rowset_start = SC_get_rowset_start(stmt); + + QR_stop_movement(res); + res->move_offset = 0; + switch (fFetchType) { + case SQL_FETCH_NEXT: + progress_size = + (save_rowset_size > 0 ? save_rowset_size : rowsetSize); + if (rowset_start < 0) + SC_set_rowset_start(stmt, 0, TRUE); + else if (res->keyset) { + if (stmt->last_fetch_count <= progress_size) { + SC_inc_rowset_start( + stmt, stmt->last_fetch_count_include_ommitted); + progress_size -= stmt->last_fetch_count; + } + if (progress_size > 0) { + if (getNthValid(res, SC_get_rowset_start(stmt), + SQL_FETCH_NEXT, progress_size + 1, + &rowset_start) + <= 0) { + EXTFETCH_RETURN_EOF(stmt, res) + } else + should_set_rowset_start = TRUE; + } + } else + SC_inc_rowset_start(stmt, progress_size); + MYLOG(ES_DEBUG, + "SQL_FETCH_NEXT: num_tuples=" FORMAT_LEN + ", currtuple=" FORMAT_LEN ", rowst=" FORMAT_LEN "\n", + num_tuples, stmt->currTuple, rowset_start); + break; + default: + SC_set_error(stmt, STMT_FETCH_OUT_OF_RANGE, + "Unsupported ESAPI_ExtendedFetch Direction", func); + return SQL_ERROR; + } + + /* + * CHECK FOR PROPER CURSOR STATE + */ + + /* + * Handle Declare Fetch style specially because the end is not really + * the end... + */ + if (!should_set_rowset_start) + rowset_start = SC_get_rowset_start(stmt); + + // Get more results when cursor reaches end + { + ConnectionClass *conn = SC_get_conn(stmt); + if (conn != NULL) { + const SQLLEN end_rowset_size = rowset_start + rowsetSize; + while ((end_rowset_size >= num_tuples) + && (NULL != res->server_cursor_id)) { + GetNextResultSet(stmt); + num_tuples = QR_get_num_total_tuples(res); + } + } + } + + if (useCursor) { + if (reached_eof && rowset_start >= num_tuples) { + EXTFETCH_RETURN_EOF(stmt, res) + } + } else { + /* If *new* rowset is after the result_set, return no data found */ + if (rowset_start >= num_tuples) { + EXTFETCH_RETURN_EOF(stmt, res) + } + } + /* If *new* rowset is prior to result_set, return no data found */ + if (rowset_start < 0) { + if (rowset_start + rowsetSize <= 0) { + EXTFETCH_RETURN_BOF(stmt, res) + } else { /* overlap with beginning of result set, + * so get first rowset */ + SC_set_rowset_start(stmt, 0, TRUE); + } + should_set_rowset_start = FALSE; + } + +#ifdef __APPLE__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wkeyword-macro" +#endif // __APPLE__ +#define return DONT_CALL_RETURN_FROM_HERE ? ? ? +#ifdef __APPLE__ +#pragma clang diagnostic pop +#endif // __APPLE__ + /* set the rowset_start if needed */ + if (should_set_rowset_start) + SC_set_rowset_start(stmt, rowset_start, TRUE); + if (rowset_end < 0 && QR_haskeyset(res)) { + getNthValid(res, rowset_start, SQL_FETCH_NEXT, rowsetSize, &rowset_end); + reqsize = rowset_end - rowset_start + 1; + } + QR_set_reqsize(res, (Int4)reqsize); + /* currTuple is always 1 row prior to the rowset start */ + stmt->currTuple = RowIdx2GIdx(-1, stmt); + QR_set_rowstart_in_cache(res, SC_get_rowset_start(stmt)); + + /* Physical Row advancement occurs for each row fetched below */ + + MYLOG(ES_DEBUG, "new currTuple = " FORMAT_LEN "\n", stmt->currTuple); + + truncated = error = FALSE; + + currp = -1; + stmt->bind_row = 0; /* set the binding location */ + result = SC_fetch(stmt); + if (SQL_ERROR == result) + goto cleanup; + if (SQL_NO_DATA_FOUND != result && res->keyset) { + currp = GIdx2KResIdx(SC_get_rowset_start(stmt), stmt, res); + MYLOG(ES_ALL, "currp=" FORMAT_LEN "\n", currp); + if (currp < 0) { + result = SQL_ERROR; + MYLOG(ES_DEBUG, + "rowset_start=" FORMAT_LEN " but currp=" FORMAT_LEN "\n", + SC_get_rowset_start(stmt), currp); + SC_set_error(stmt, STMT_INTERNAL_ERROR, + "rowset_start not in the keyset", func); + goto cleanup; + } + } + for (i = 0, fc_io = 0; SQL_NO_DATA_FOUND != result && SQL_ERROR != result; + currp++) { + fc_io++; + currp_is_valid = FALSE; + if (res->keyset) { + if ((SQLULEN)currp < res->num_cached_keys) { + currp_is_valid = TRUE; + res->keyset[currp].status &= + ~CURS_IN_ROWSET; /* Off the flag first */ + } else { + MYLOG(ES_DEBUG, "Umm current row is out of keyset\n"); + break; + } + } + MYLOG(ES_ALL, "ExtFetch result=%d\n", result); + if (currp_is_valid && SQL_SUCCESS_WITH_INFO == result + && 0 == stmt->last_fetch_count) { + MYLOG(ES_ALL, "just skipping deleted row " FORMAT_LEN "\n", currp); + if (rowsetSize - i + fc_io > reqsize) + QR_set_reqsize(res, (Int4)(rowsetSize - i + fc_io)); + result = SC_fetch(stmt); + if (SQL_ERROR == result) + break; + continue; + } + + /* Determine Function status */ + if (result == SQL_SUCCESS_WITH_INFO) + truncated = TRUE; + else if (result == SQL_ERROR) + error = TRUE; + + /* Determine Row Status */ + if (rgfRowStatus) { + if (result == SQL_ERROR) + *(rgfRowStatus + i) = SQL_ROW_ERROR; + else if (currp_is_valid) { + pstatus = (res->keyset[currp].status & KEYSET_INFO_PUBLIC); + if (pstatus != 0 && pstatus != SQL_ROW_ADDED) { + rgfRowStatus[i] = pstatus; + } else + rgfRowStatus[i] = SQL_ROW_SUCCESS; + /* refresh the status */ + /* if (SQL_ROW_DELETED != pstatus) */ + res->keyset[currp].status &= (~KEYSET_INFO_PUBLIC); + } else + *(rgfRowStatus + i) = SQL_ROW_SUCCESS; + } + if (SQL_ERROR != result && currp_is_valid) + res->keyset[currp].status |= + CURS_IN_ROWSET; /* This is the unique place where the + CURS_IN_ROWSET bit is turned on */ + i++; + if (i >= rowsetSize) + break; + stmt->bind_row = (SQLSETPOSIROW)i; /* set the binding location */ + result = SC_fetch(stmt); + } + if (SQL_ERROR == result) + goto cleanup; + + /* Save the fetch count for SQLSetPos */ + stmt->last_fetch_count = i; + stmt->save_rowset_size = rowsetSize; + /* + currp = KResIdx2GIdx(currp, stmt, res); + stmt->last_fetch_count_include_ommitted = GIdx2RowIdx(currp, stmt); + */ + stmt->last_fetch_count_include_ommitted = fc_io; + + /* Reset next binding row */ + stmt->bind_row = 0; + + /* Move the cursor position to the first row in the result set. */ + stmt->currTuple = RowIdx2GIdx(0, stmt); + + /* For declare/fetch, need to reset cursor to beginning of rowset */ + if (useCursor) + QR_set_position(res, 0); + + /* Set the number of rows retrieved */ + if (pcrow) + *pcrow = i; + MYLOG(ES_ALL, "pcrow=" FORMAT_LEN "\n", i); + + if (i == 0) + /* Only DeclareFetch should wind up here */ + result = SQL_NO_DATA_FOUND; + else if (error) + result = SQL_ERROR; + else if (truncated) + result = SQL_SUCCESS_WITH_INFO; + else if (SC_get_errornumber(stmt) == STMT_POS_BEFORE_RECORDSET) + result = SQL_SUCCESS_WITH_INFO; + else + result = SQL_SUCCESS; + +cleanup: +#undef return + return result; +} + +/* + * This determines whether there are more results sets available for + * the "hstmt". + */ +/* CC: return SQL_NO_DATA_FOUND since we do not support multiple result sets */ +RETCODE SQL_API ESAPI_MoreResults(HSTMT hstmt) { + StatementClass *stmt = (StatementClass *)hstmt; + QResultClass *res; + RETCODE ret = SQL_SUCCESS; + + MYLOG(ES_TRACE, "entering...\n"); + res = SC_get_Curres(stmt); + if (res) { + res = res->next; + SC_set_Curres(stmt, res); + } + if (res) { + SQLSMALLINT num_p; + + if (stmt->multi_statement < 0) + ESAPI_NumParams(stmt, &num_p); + if (stmt->multi_statement > 0) { + const char *cmdstr; + + SC_initialize_cols_info(stmt, FALSE, TRUE); + stmt->statement_type = STMT_TYPE_UNKNOWN; + if (cmdstr = QR_get_command(res), NULL != cmdstr) + stmt->statement_type = (short)statement_type(cmdstr); + stmt->join_info = 0; + SC_clear_parse_method(stmt); + } + stmt->diag_row_count = res->recent_processed_row_count; + SC_set_rowset_start(stmt, -1, FALSE); + stmt->currTuple = -1; + } else { + ESAPI_FreeStmt(hstmt, SQL_CLOSE); + ret = SQL_NO_DATA_FOUND; + } + MYLOG(ES_DEBUG, "leaving %d\n", ret); + return ret; +} + +SQLLEN ClearCachedRows(TupleField *tuple, int num_fields, SQLLEN num_rows) { + SQLLEN i; + + for (i = 0; i < num_fields * num_rows; i++, tuple++) { + if (tuple->value) { + MYLOG(ES_ALL, + "freeing tuple[" FORMAT_LEN "][" FORMAT_LEN "].value=%p\n", + i / num_fields, i % num_fields, tuple->value); + free(tuple->value); + tuple->value = NULL; + } + tuple->len = -1; + } + return i; +} + +/* Set the cursor name on a statement handle */ +RETCODE SQL_API ESAPI_SetCursorName(HSTMT hstmt, const SQLCHAR *szCursor, + SQLSMALLINT cbCursor) { + CSTR func = "ESAPI_SetCursorName"; + StatementClass *stmt = (StatementClass *)hstmt; + + MYLOG(ES_TRACE, "entering hstmt=%p, szCursor=%p, cbCursorMax=%d\n", hstmt, + szCursor, cbCursor); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + + SET_NAME_DIRECTLY(stmt->cursor_name, + make_string(szCursor, cbCursor, NULL, 0)); + return SQL_SUCCESS; +} + +/* Return the cursor name for a statement handle */ +RETCODE SQL_API ESAPI_GetCursorName(HSTMT hstmt, SQLCHAR *szCursor, + SQLSMALLINT cbCursorMax, + SQLSMALLINT *pcbCursor) { + CSTR func = "ESAPI_GetCursorName"; + StatementClass *stmt = (StatementClass *)hstmt; + size_t len = 0; + RETCODE result; + + MYLOG(ES_DEBUG, + "entering hstmt=%p, szCursor=%p, cbCursorMax=%d, pcbCursor=%p\n", + hstmt, szCursor, cbCursorMax, pcbCursor); + + if (!stmt) { + SC_log_error(func, NULL_STRING, NULL); + return SQL_INVALID_HANDLE; + } + result = SQL_SUCCESS; + len = strlen(SC_cursor_name(stmt)); + + if (szCursor) { + strncpy_null((char *)szCursor, SC_cursor_name(stmt), cbCursorMax); + + if (len >= (size_t)cbCursorMax) { + result = SQL_SUCCESS_WITH_INFO; + SC_set_error(stmt, STMT_TRUNCATED, + "The buffer was too small for the GetCursorName.", + func); + } + } + + if (pcbCursor) + *pcbCursor = (SQLSMALLINT)len; + + return result; +} diff --git a/sql-odbc/src/odfesqlodbc/setup.c b/sql-odbc/src/odfesqlodbc/setup.c new file mode 100644 index 0000000000..0f9cc0bc51 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/setup.c @@ -0,0 +1,714 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifdef WIN32 +#include "elasticenlist.h" +#include "es_odbc.h" +#include "loadlib.h" +#include "misc.h" // strncpy_null + +//#include "environ.h" +#ifdef WIN32 +#include +#endif +#include +#include + +#include "dlg_specific.h" +#include "es_apifunc.h" +#include "resource.h" +#include "win_setup.h" + +#define INTFUNC __stdcall + +extern HINSTANCE s_hModule; /* Saved module handle. */ + +/* Constants */ +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + +#define MAXKEYLEN (32 + 1) /* Max keyword length */ +#define MAXDESC (255 + 1) /* Max description length */ +#define MAXDSNAME (32 + 1) /* Max data source name length */ + +static void ParseAttributes(LPCSTR lpszAttributes, LPSETUPDLG lpsetupdlg); +static BOOL SetDSNAttributes(HWND hwndParent, LPSETUPDLG lpsetupdlg, + DWORD *errcode); +static BOOL SetDriverAttributes(LPCSTR lpszDriver, DWORD *pErrorCode, + LPSTR pErrorMessage, WORD cbMessage); +static void CenterDialog(HWND hdlg); + +/*-------- + * ConfigDSN + * + * Description: ODBC Setup entry point + * This entry point is called by the ODBC Installer + * (see file header for more details) + * Input : hwnd ----------- Parent window handle + * fRequest ------- Request type (i.e., add, config, or remove) + * lpszDriver ----- Driver name + * lpszAttributes - data source attribute string + * Output : TRUE success, FALSE otherwise + *-------- + */ +BOOL CALLBACK ConfigDSN(HWND hwnd, WORD fRequest, LPCSTR lpszDriver, + LPCSTR lpszAttributes) { + BOOL fSuccess; /* Success/fail flag */ + GLOBALHANDLE hglbAttr; + LPSETUPDLG lpsetupdlg; + + /* Allocate attribute array */ + hglbAttr = GlobalAlloc(GMEM_MOVEABLE | GMEM_ZEROINIT, sizeof(SETUPDLG)); + if (!hglbAttr) + return FALSE; + lpsetupdlg = (LPSETUPDLG)GlobalLock(hglbAttr); + + /* First of all, parse attribute string only for DSN entry */ + CC_conninfo_init(&(lpsetupdlg->ci), INIT_GLOBALS); + if (lpszAttributes) + ParseAttributes(lpszAttributes, lpsetupdlg); + + /* Save original data source name */ + if (lpsetupdlg->ci.dsn[0]) + STRCPY_FIXED(lpsetupdlg->szDSN, lpsetupdlg->ci.dsn); + else + lpsetupdlg->szDSN[0] = '\0'; + + /* Remove data source */ + if (ODBC_REMOVE_DSN == fRequest) { + /* Fail if no data source name was supplied */ + if (!lpsetupdlg->ci.dsn[0]) + fSuccess = FALSE; + + /* Otherwise remove data source from ODBC.INI */ + else + fSuccess = SQLRemoveDSNFromIni(lpsetupdlg->ci.dsn); + } + /* Add or Configure data source */ + else { + /* Save passed variables for global access (e.g., dialog access) */ + lpsetupdlg->hwndParent = hwnd; + lpsetupdlg->lpszDrvr = lpszDriver; + lpsetupdlg->fNewDSN = (ODBC_ADD_DSN == fRequest); + lpsetupdlg->fDefault = !lstrcmpi(lpsetupdlg->ci.dsn, INI_DSN); + + /* Cleanup conninfo and restore data source name */ + CC_conninfo_init(&(lpsetupdlg->ci), CLEANUP_FOR_REUSE | INIT_GLOBALS); + STRCPY_FIXED(lpsetupdlg->ci.dsn, lpsetupdlg->szDSN); + /* Get common attributes of Data Source */ + getDSNinfo(&(lpsetupdlg->ci), lpsetupdlg->lpszDrvr); + /* + * Parse attribute string again + * + * NOTE: Values supplied in the attribute string will always + * override settings in ODBC.INI + */ + if (lpszAttributes) + ParseAttributes(lpszAttributes, lpsetupdlg); + + /* + * Display the appropriate dialog (if parent window handle + * supplied) + */ + if (hwnd) { + /* Display dialog(s) */ + fSuccess = + (IDOK + == DialogBoxParam(s_hModule, MAKEINTRESOURCE(DLG_CONFIG), hwnd, + ConfigDlgProc, (LPARAM)lpsetupdlg)); + } else if (lpsetupdlg->ci.dsn[0]) { + MYLOG(ES_DEBUG, "SetDSNAttributes\n"); + fSuccess = SetDSNAttributes(hwnd, lpsetupdlg, NULL); + } else + fSuccess = FALSE; + } + + CC_conninfo_release(&(lpsetupdlg->ci)); + GlobalUnlock(hglbAttr); + GlobalFree(hglbAttr); + + return fSuccess; +} + +/*-------- + * ConfigDriver + * + * Description: ODBC Setup entry point + * This entry point is called by the ODBC Installer + * (see file header for more details) + * Arguments : hwnd ----------- Parent window handle + * fRequest ------- Request type (i.e., add, config, or remove) + * lpszDriver ----- Driver name + * lpszArgs ------- A null-terminated string containing + arguments for a driver specific fRequest + * lpszMsg -------- A null-terimated string containing + an output message from the driver setup + * cnMsgMax ------- Length of lpszMSg + * pcbMsgOut ------ Total number of bytes available to + return in lpszMsg + * Returns : TRUE success, FALSE otherwise + *-------- + */ +BOOL CALLBACK ConfigDriver(HWND hwnd, WORD fRequest, LPCSTR lpszDriver, + LPCSTR lpszArgs, LPSTR lpszMsg, WORD cbMsgMax, + WORD *pcbMsgOut) { + UNUSED(lpszArgs, hwnd); + DWORD errorCode = 0; + BOOL fSuccess = TRUE; /* Success/fail flag */ + + if (cbMsgMax > 0 && NULL != lpszMsg) + *lpszMsg = '\0'; + if (NULL != pcbMsgOut) + *pcbMsgOut = 0; + + /* Add the driver */ + switch (fRequest) { + case ODBC_INSTALL_DRIVER: + fSuccess = + SetDriverAttributes(lpszDriver, &errorCode, lpszMsg, cbMsgMax); + if (cbMsgMax > 0 && NULL != lpszMsg) + *pcbMsgOut = (WORD)strlen(lpszMsg); + break; + case ODBC_REMOVE_DRIVER: + break; + default: + errorCode = ODBC_ERROR_INVALID_REQUEST_TYPE; + fSuccess = FALSE; + } + + if (!fSuccess) + SQLPostInstallerError(errorCode, lpszMsg); + return fSuccess; +} + +/*------- + * CenterDialog + * + * Description: Center the dialog over the frame window + * Input : hdlg -- Dialog window handle + * Output : None + *------- + */ +static void CenterDialog(HWND hdlg) { + HWND hwndFrame; + RECT rcDlg, rcScr, rcFrame; + int cx, cy; + + hwndFrame = GetParent(hdlg); + + GetWindowRect(hdlg, &rcDlg); + cx = rcDlg.right - rcDlg.left; + cy = rcDlg.bottom - rcDlg.top; + + GetClientRect(hwndFrame, &rcFrame); + ClientToScreen(hwndFrame, (LPPOINT)(&rcFrame.left)); + ClientToScreen(hwndFrame, (LPPOINT)(&rcFrame.right)); + rcDlg.top = rcFrame.top + (((rcFrame.bottom - rcFrame.top) - cy) >> 1); + rcDlg.left = rcFrame.left + (((rcFrame.right - rcFrame.left) - cx) >> 1); + rcDlg.bottom = rcDlg.top + cy; + rcDlg.right = rcDlg.left + cx; + + GetWindowRect(GetDesktopWindow(), &rcScr); + if (rcDlg.bottom > rcScr.bottom) { + rcDlg.bottom = rcScr.bottom; + rcDlg.top = rcDlg.bottom - cy; + } + if (rcDlg.right > rcScr.right) { + rcDlg.right = rcScr.right; + rcDlg.left = rcDlg.right - cx; + } + + if (rcDlg.left < 0) + rcDlg.left = 0; + if (rcDlg.top < 0) + rcDlg.top = 0; + + MoveWindow(hdlg, rcDlg.left, rcDlg.top, cx, cy, TRUE); + return; +} + +/*------- + * ConfigDlgProc + * Description: Manage add data source name dialog + * Input : hdlg --- Dialog window handle + * wMsg --- Message + * wParam - Message parameter + * lParam - Message parameter + * Output : TRUE if message processed, FALSE otherwise + *------- + */ +INT_PTR CALLBACK ConfigDlgProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam) { + LPSETUPDLG lpsetupdlg; + ConnInfo *ci; + DWORD cmd; + + switch (wMsg) { + /* Initialize the dialog */ + case WM_INITDIALOG: + lpsetupdlg = (LPSETUPDLG)lParam; + ci = &lpsetupdlg->ci; + + SetWindowLongPtr(hdlg, DWLP_USER, lParam); + CenterDialog(hdlg); /* Center dialog */ + + /* Initialize dialog fields */ + SetDlgStuff(hdlg, ci); + + /* Save drivername */ + if (!(lpsetupdlg->ci.drivername[0])) + STRCPY_FIXED(lpsetupdlg->ci.drivername, lpsetupdlg->lpszDrvr); + + if (lpsetupdlg->fNewDSN || !ci->dsn[0]) + EnableWindow(GetDlgItem(hdlg, IDC_DSNAME), TRUE); + if (lpsetupdlg->fDefault) { + EnableWindow(GetDlgItem(hdlg, IDC_DSNAME), FALSE); + } else + SendDlgItemMessage(hdlg, IDC_DSNAME, EM_LIMITTEXT, + (WPARAM)(MAXDSNAME - 1), 0L); + + SendDlgItemMessage(hdlg, IDC_DESC, EM_LIMITTEXT, + (WPARAM)(MAXDESC - 1), 0L); + + if (!stricmp(ci->authtype, AUTHTYPE_IAM)) { + SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 0, + (WPARAM)0); + } else if (!stricmp(ci->authtype, AUTHTYPE_BASIC)) { + SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 1, + (WPARAM)0); + } else { // AUTHTYPE_NONE + SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 2, + (WPARAM)0); + } + + return TRUE; /* Focus was not set */ + + /* Process buttons */ + case WM_COMMAND: + lpsetupdlg = (LPSETUPDLG)GetWindowLongPtr(hdlg, DWLP_USER); + switch (cmd = GET_WM_COMMAND_ID(wParam, lParam)) { + /* + * Ensure the OK button is enabled only when a data + * source name + */ + /* is entered */ + case IDC_DSNAME: + if (GET_WM_COMMAND_CMD(wParam, lParam) == EN_CHANGE) { + char szItem[MAXDSNAME]; /* Edit control text */ + + /* Enable/disable the OK button */ + EnableWindow(GetDlgItem(hdlg, IDOK), + GetDlgItemText(hdlg, IDC_DSNAME, szItem, + sizeof(szItem))); + return TRUE; + } + break; + + /* Accept results */ + case IDOK: + /* Retrieve dialog values */ + if (!lpsetupdlg->fDefault) + GetDlgItemText(hdlg, IDC_DSNAME, lpsetupdlg->ci.dsn, + sizeof(lpsetupdlg->ci.dsn)); + + /* Get Dialog Values */ + GetDlgStuff(hdlg, &lpsetupdlg->ci); + /* Update ODBC.INI */ + SetDSNAttributes(hdlg, lpsetupdlg, NULL); + + case IDCANCEL: + EndDialog(hdlg, wParam); + return TRUE; + + case IDOK2: // <== TEST butter + { + /* Get Dialog Values */ + GetDlgStuff(hdlg, &lpsetupdlg->ci); + test_connection(lpsetupdlg->hwndParent, &lpsetupdlg->ci, + FALSE); + return TRUE; + break; + } + case ID_ADVANCED_OPTIONS: { + if (DialogBoxParam( + s_hModule, MAKEINTRESOURCE(DLG_ADVANCED_OPTIONS), + hdlg, advancedOptionsProc, (LPARAM)&lpsetupdlg->ci) > 0) + EndDialog(hdlg, 0); + break; + } + case ID_LOG_OPTIONS: { + if (DialogBoxParam( + s_hModule, MAKEINTRESOURCE(DLG_LOG_OPTIONS), hdlg, + logOptionsProc, (LPARAM)&lpsetupdlg->ci) > 0) + EndDialog(hdlg, 0); + break; + } + case IDC_AUTHTYPE: { + SetAuthenticationVisibility(hdlg, GetCurrentAuthMode(hdlg)); + } + } + break; + case WM_CTLCOLORSTATIC: + if (lParam == (LPARAM)GetDlgItem(hdlg, IDC_NOTICE_USER)) { + HBRUSH hBrush = (HBRUSH)GetStockObject(LTGRAY_BRUSH); + SetTextColor((HDC)wParam, RGB(255, 0, 0)); + return (LRESULT)hBrush; + } + break; + } + + /* Message not processed */ + return FALSE; +} + +#ifdef USE_PROC_ADDRESS +#define SQLALLOCHANDLEFUNC sqlallochandle +#define SQLSETENVATTRFUNC sqlsetenvattr +#define SQLDISCONNECTFUNC sqldisconnect +#define SQLFREEHANDLEFUNC sqlfreehandle +#ifdef UNICODE_SUPPORT +#define SQLGETDIAGRECFUNC sqlgetdiagrecw +#define SQLDRIVERCONNECTFUNC sqldriverconnectw +#define SQLSETCONNECTATTRFUNC sqlsetconnectattrw +#else +#define SQLGETDIAGRECFUNC sqlgetdiagrec +#define SQLDRIVERCONNECTFUNC sqldriverconnect +#define SQLSETCONNECTATTRFUNC sqlsetconnectAttr +#endif /* UNICODE_SUPPORT */ +#else +#define SQLALLOCHANDLEFUNC SQLAllocHandle +#define SQLSETENVATTRFUNC SQLSetEnvAttr +#define SQLDISCONNECTFUNC SQLDisconnect +#define SQLFREEHANDLEFUNC SQLFreeHandle +#ifdef UNICODE_SUPPORT +#define SQLGETDIAGRECFUNC SQLGetDiagRecW +#define SQLDRIVERCONNECTFUNC SQLDriverConnectW +#define SQLSETCONNECTATTRFUNC SQLSetConnectAttrW +#else +#define SQLGETDIAGRECFUNC SQLGetDiagRec +#define SQLDRIVERCONNECTFUNC SQLDriverConnect +#define SQLSETCONNECTATTRFUNC SQLSetConnectAttr +#endif /* UNICODE_SUPPORT */ +#endif /* USE_PROC_ADDRESS */ + +#define MAX_CONNECT_STRING_LEN 2048 +#ifdef UNICODE_SUPPORT +#define MESSAGEBOXFUNC MessageBoxW +#define _T(str) L##str +#define SNTPRINTF _snwprintf +#else +#define MESSAGEBOXFUNC MessageBoxA +#define _T(str) str +#define SNTPRINTF snprintf +#endif /* UNICODE_SUPPORT */ + +void test_connection(HANDLE hwnd, ConnInfo *ci, BOOL withDTC) { + SQLINTEGER errnum; + char out_conn[MAX_CONNECT_STRING_LEN]; + SQLRETURN ret; + SQLHENV env = SQL_NULL_HANDLE; + SQLHDBC conn = SQL_NULL_HANDLE; + SQLSMALLINT str_len; + char dsn_1st; + BOOL connected = FALSE; +#ifdef UNICODE_SUPPORT + SQLWCHAR wout_conn[MAX_CONNECT_STRING_LEN]; + SQLWCHAR szMsg[SQL_MAX_MESSAGE_LENGTH]; + const SQLWCHAR *ermsg = NULL; + SQLWCHAR *conn_str; +#else + SQLCHAR szMsg[SQL_MAX_MESSAGE_LENGTH]; + const SQLCHAR *ermsg = NULL; + SQLCHAR *conn_str; +#endif /* UNICODE_SUPPORT */ + + dsn_1st = ci->dsn[0]; + ci->dsn[0] = '\0'; + makeConnectString(out_conn, ci, sizeof(out_conn)); + MYLOG(ES_DEBUG, "conn_string=%s\n", out_conn); +#ifdef UNICODE_SUPPORT + MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED, out_conn, -1, wout_conn, + sizeof(wout_conn) / sizeof(wout_conn[0])); + conn_str = wout_conn; +#else + conn_str = out_conn; +#endif /* UNICODE_SUPPORT */ + ci->dsn[0] = dsn_1st; + if (!SQL_SUCCEEDED( + ret = SQLALLOCHANDLEFUNC(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env))) { + ermsg = _T("SQLAllocHandle for env error"); + goto cleanup; + } + if (!SQL_SUCCEEDED(ret = SQLSETENVATTRFUNC(env, SQL_ATTR_ODBC_VERSION, + (SQLPOINTER)SQL_OV_ODBC3, 0))) { + SNTPRINTF(szMsg, _countof(szMsg), _T("SQLAllocHandle for env error=%d"), + ret); + goto cleanup; + } + if (!SQL_SUCCEEDED(ret = SQLALLOCHANDLEFUNC(SQL_HANDLE_DBC, env, &conn))) { + SQLGETDIAGRECFUNC(SQL_HANDLE_ENV, env, 1, NULL, &errnum, szMsg, + _countof(szMsg), &str_len); + ermsg = szMsg; + goto cleanup; + } + if (!SQL_SUCCEEDED(ret = SQLDRIVERCONNECTFUNC(conn, hwnd, conn_str, SQL_NTS, + NULL, MAX_CONNECT_STRING_LEN, + &str_len, + SQL_DRIVER_NOPROMPT))) { + SQLGETDIAGRECFUNC(SQL_HANDLE_DBC, conn, 1, NULL, &errnum, szMsg, + _countof(szMsg), &str_len); + ermsg = szMsg; + goto cleanup; + } + connected = TRUE; + ermsg = _T("Connection successful"); + + if (withDTC) { +#ifdef _HANDLE_ENLIST_IN_DTC_ + HRESULT res; + void *pObj = NULL; + + pObj = CALL_GetTransactionObject(&res); + if (NULL != pObj) { + SQLRETURN ret = SQLSETCONNECTATTRFUNC(conn, SQL_ATTR_ENLIST_IN_DTC, + (SQLPOINTER)pObj, 0); + if (SQL_SUCCEEDED(ret)) { + SQLSETCONNECTATTRFUNC(conn, SQL_ATTR_ENLIST_IN_DTC, + SQL_DTC_DONE, 0); + SNTPRINTF(szMsg, _countof(szMsg), + _T("%s\nenlistment was successful\n"), ermsg); + ermsg = szMsg; + } else { + int strl; + + SNTPRINTF(szMsg, _countof(szMsg), _T("%s\nMSDTC error:"), + ermsg); + for (strl = 0; strl < SQL_MAX_MESSAGE_LENGTH; strl++) { + if (!szMsg[strl]) + break; + } + SQLGETDIAGRECFUNC( + SQL_HANDLE_DBC, conn, 1, NULL, &errnum, szMsg + strl, + (SQLSMALLINT)(_countof(szMsg) - strl), &str_len); + ermsg = szMsg; + } + CALL_ReleaseTransactionObject(pObj); + } else if (FAILED(res)) { + SNTPRINTF(szMsg, _countof(szMsg), + _T("%s\nDistibuted Transaction enlistment error %x"), + ermsg, res); + ermsg = szMsg; + } +#else /* _HANDLE_ENLIST_IN_DTC_ */ + SNTPRINTF(szMsg, _countof(szMsg), + _T("%s\nDistibuted Transaction enlistment not supported by ") + _T("this driver"), + ermsg); + ermsg = szMsg; +#endif + } + +cleanup: + if (NULL != ermsg && NULL != hwnd) { + MESSAGEBOXFUNC(hwnd, ermsg, _T("Connection Test"), + MB_ICONEXCLAMATION | MB_OK); + } + +#undef _T + + if (NULL != conn) { + if (connected) + SQLDISCONNECTFUNC(conn); + SQLFREEHANDLEFUNC(SQL_HANDLE_DBC, conn); + } + if (env) + SQLFREEHANDLEFUNC(SQL_HANDLE_ENV, env); + + return; +} + +/*------- + * ParseAttributes + * + * Description: Parse attribute string moving values into the aAttr array + * Input : lpszAttributes - Pointer to attribute string + * Output : None (global aAttr normally updated) + *------- + */ +static void ParseAttributes(LPCSTR lpszAttributes, LPSETUPDLG lpsetupdlg) { + LPCSTR lpsz; + LPCSTR lpszStart; + char aszKey[MAXKEYLEN]; + size_t cbKey; + char value[MAXESPATH]; + + for (lpsz = lpszAttributes; *lpsz; lpsz++) { + /* + * Extract key name (e.g., DSN), it must be terminated by an + * equals + */ + lpszStart = lpsz; + for (;; lpsz++) { + if (!*lpsz) + return; /* No key was found */ + else if (*lpsz == '=') + break; /* Valid key found */ + } + /* Determine the key's index in the key table (-1 if not found) */ + cbKey = lpsz - lpszStart; + if (cbKey < sizeof(aszKey)) { + memcpy(aszKey, lpszStart, cbKey); + aszKey[cbKey] = '\0'; + } + + /* Locate end of key value */ + lpszStart = ++lpsz; + for (; *lpsz; lpsz++) + ; + + /* lpsetupdlg->aAttr[iElement].fSupplied = TRUE; */ + memcpy(value, lpszStart, MIN(lpsz - lpszStart + 1, MAXESPATH)); + + MYLOG(ES_DEBUG, "aszKey='%s', value='%s'\n", aszKey, value); + + /* Copy the appropriate value to the conninfo */ + copyConnAttributes(&lpsetupdlg->ci, aszKey, value); + } + return; +} + +/*-------- + * SetDSNAttributes + * + * Description: Write data source attributes to ODBC.INI + * Input : hwnd - Parent window handle (plus globals) + * Output : TRUE if successful, FALSE otherwise + *-------- + */ +static BOOL SetDSNAttributes(HWND hwndParent, LPSETUPDLG lpsetupdlg, + DWORD *errcode) { + LPCSTR lpszDSN; /* Pointer to data source name */ + + lpszDSN = lpsetupdlg->ci.dsn; + + if (errcode) + *errcode = 0; + /* Validate arguments */ + if (lpsetupdlg->fNewDSN && !*lpsetupdlg->ci.dsn) + return FALSE; + + /* Write the data source name */ + if (!SQLWriteDSNToIni(lpszDSN, lpsetupdlg->lpszDrvr)) { + RETCODE ret = SQL_ERROR; + DWORD err = (DWORD)SQL_ERROR; + char szMsg[SQL_MAX_MESSAGE_LENGTH]; + + ret = SQLInstallerError(1, &err, szMsg, sizeof(szMsg), NULL); + if (hwndParent) { + char szBuf[MAXESPATH]; + + if (SQL_SUCCESS != ret) { + LoadString(s_hModule, IDS_BADDSN, szBuf, sizeof(szBuf)); + SPRINTF_FIXED(szMsg, szBuf, lpszDSN); + } + LoadString(s_hModule, IDS_MSGTITLE, szBuf, sizeof(szBuf)); + MessageBox(hwndParent, szMsg, szBuf, MB_ICONEXCLAMATION | MB_OK); + } + if (errcode) + *errcode = err; + return FALSE; + } + + /* Update ODBC.INI */ + write_Ci_Drivers(ODBC_INI, lpsetupdlg->ci.dsn, &(lpsetupdlg->ci.drivers)); + writeDSNinfo(&lpsetupdlg->ci); + + /* If the data source name has changed, remove the old name */ + if (lstrcmpi(lpsetupdlg->szDSN, lpsetupdlg->ci.dsn)) + SQLRemoveDSNFromIni(lpsetupdlg->szDSN); + return TRUE; +} + +/*-------- + * SetDriverAttributes + * + * Description: Write driver information attributes to ODBCINST.INI + * Input : lpszDriver - The driver name + * Output : TRUE if successful, FALSE otherwise + *-------- + */ +static BOOL SetDriverAttributes(LPCSTR lpszDriver, DWORD *pErrorCode, + LPSTR message, WORD cbMessage) { + BOOL ret = FALSE; + char ver_string[8]; + + /* Validate arguments */ + if (!lpszDriver || !lpszDriver[0]) { + if (pErrorCode) + *pErrorCode = ODBC_ERROR_INVALID_NAME; + strncpy_null(message, "Driver name not specified", cbMessage); + return FALSE; + } + + if (!SQLWritePrivateProfileString(lpszDriver, "APILevel", "1", + ODBCINST_INI)) + goto cleanup; + if (!SQLWritePrivateProfileString(lpszDriver, "ConnectFunctions", "YYN", + ODBCINST_INI)) + goto cleanup; + SPRINTF_FIXED(ver_string, "%02x.%02x", ODBCVER / 256, ODBCVER % 256); + if (!SQLWritePrivateProfileString(lpszDriver, "DriverODBCVer", ver_string, + ODBCINST_INI)) + goto cleanup; + if (!SQLWritePrivateProfileString(lpszDriver, "FileUsage", "0", + ODBCINST_INI)) + goto cleanup; + if (!SQLWritePrivateProfileString(lpszDriver, "SQLLevel", "1", + ODBCINST_INI)) + goto cleanup; + + ret = TRUE; +cleanup: + if (!ret) { + if (pErrorCode) + *pErrorCode = ODBC_ERROR_REQUEST_FAILED; + strncpy_null(message, "Failed to WritePrivateProfileString", cbMessage); + } + return ret; +} + +BOOL INTFUNC ChangeDriverName(HWND hwndParent, LPSETUPDLG lpsetupdlg, + LPCSTR driver_name) { + DWORD err = 0; + ConnInfo *ci = &lpsetupdlg->ci; + + if (!ci->dsn[0]) { + err = IDS_BADDSN; + } else if (!driver_name || strnicmp(driver_name, "elasticsearch", 13)) { + err = IDS_BADDSN; + } else { + LPCSTR lpszDrvr = lpsetupdlg->lpszDrvr; + + lpsetupdlg->lpszDrvr = driver_name; + if (!SetDSNAttributes(hwndParent, lpsetupdlg, &err)) { + if (!err) + err = IDS_BADDSN; + lpsetupdlg->lpszDrvr = lpszDrvr; + } + } + return (err == 0); +} + +#endif /* WIN32 */ diff --git a/sql-odbc/src/odfesqlodbc/statement.c b/sql-odbc/src/odfesqlodbc/statement.c new file mode 100644 index 0000000000..af5edf49fc --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/statement.c @@ -0,0 +1,1479 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "statement.h" +#include "misc.h" // strncpy_null + +#include "bind.h" +#include "es_connection.h" +#include "multibyte.h" +#include "qresult.h" +#include "convert.h" +#include "environ.h" +#include "loadlib.h" + +#include +#include +#include + +#include "es_apifunc.h" +#include "es_helper.h" +#include "es_statement.h" +// clang-format on + +/* Map sql commands to statement types */ +static const struct { + int type; + char *s; +} Statement_Type[] = + + {{STMT_TYPE_SELECT, "SELECT"}, + {STMT_TYPE_INSERT, "INSERT"}, + {STMT_TYPE_UPDATE, "UPDATE"}, + {STMT_TYPE_DELETE, "DELETE"}, + {STMT_TYPE_PROCCALL, "{"}, + {STMT_TYPE_SET, "SET"}, + {STMT_TYPE_RESET, "RESET"}, + {STMT_TYPE_CREATE, "CREATE"}, + {STMT_TYPE_DECLARE, "DECLARE"}, + {STMT_TYPE_FETCH, "FETCH"}, + {STMT_TYPE_MOVE, "MOVE"}, + {STMT_TYPE_CLOSE, "CLOSE"}, + {STMT_TYPE_PREPARE, "PREPARE"}, + {STMT_TYPE_EXECUTE, "EXECUTE"}, + {STMT_TYPE_DEALLOCATE, "DEALLOCATE"}, + {STMT_TYPE_DROP, "DROP"}, + {STMT_TYPE_START, "BEGIN"}, + {STMT_TYPE_START, "START"}, + {STMT_TYPE_TRANSACTION, "SAVEPOINT"}, + {STMT_TYPE_TRANSACTION, "RELEASE"}, + {STMT_TYPE_TRANSACTION, "COMMIT"}, + {STMT_TYPE_TRANSACTION, "END"}, + {STMT_TYPE_TRANSACTION, "ROLLBACK"}, + {STMT_TYPE_TRANSACTION, "ABORT"}, + {STMT_TYPE_LOCK, "LOCK"}, + {STMT_TYPE_ALTER, "ALTER"}, + {STMT_TYPE_GRANT, "GRANT"}, + {STMT_TYPE_REVOKE, "REVOKE"}, + {STMT_TYPE_COPY, "COPY"}, + {STMT_TYPE_ANALYZE, "ANALYZE"}, + {STMT_TYPE_NOTIFY, "NOTIFY"}, + {STMT_TYPE_EXPLAIN, "EXPLAIN"} + + /* + * Special-commands that cannot be run in a transaction block. This isn't + * as granular as it could be. VACUUM can never be run in a transaction + * block, but some variants of REINDEX and CLUSTER can be. CHECKPOINT + * doesn't throw an error if you do, but it cannot be rolled back so + * there's no point in beginning a new transaction for it. + */ + , + {STMT_TYPE_SPECIAL, "VACUUM"}, + {STMT_TYPE_SPECIAL, "REINDEX"}, + {STMT_TYPE_SPECIAL, "CLUSTER"}, + {STMT_TYPE_SPECIAL, "CHECKPOINT"} + + , + {STMT_TYPE_WITH, "WITH"}, + {0, NULL}}; + +static void SC_set_error_if_not_set(StatementClass *self, int errornumber, + const char *errmsg, const char *func); + +RETCODE SQL_API ESAPI_AllocStmt(HDBC hdbc, HSTMT *phstmt, UDWORD flag) { + CSTR func = "ESAPI_AllocStmt"; + ConnectionClass *conn = (ConnectionClass *)hdbc; + StatementClass *stmt; + ARDFields *ardopts; + + MYLOG(ES_TRACE, "entering...\n"); + + if (!conn) { + CC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + + stmt = SC_Constructor(conn); + + MYLOG(ES_DEBUG, "**** : hdbc = %p, stmt = %p\n", hdbc, stmt); + + if (!stmt) { + CC_set_error(conn, CONN_STMT_ALLOC_ERROR, + "No more memory to allocate a further SQL-statement", + func); + *phstmt = SQL_NULL_HSTMT; + return SQL_ERROR; + } + + if (!CC_add_statement(conn, stmt)) { + CC_set_error(conn, CONN_STMT_ALLOC_ERROR, + "Maximum number of statements exceeded.", func); + SC_Destructor(stmt); + *phstmt = SQL_NULL_HSTMT; + return SQL_ERROR; + } + + *phstmt = (HSTMT)stmt; + + stmt->iflag = flag; + /* Copy default statement options based from Connection options */ + if (0 != (PODBC_INHERIT_CONNECT_OPTIONS & flag)) { + stmt->options = stmt->options_orig = conn->stmtOptions; + stmt->ardi.ardf = conn->ardOptions; + } else { + InitializeStatementOptions(&stmt->options_orig); + stmt->options = stmt->options_orig; + InitializeARDFields(&stmt->ardi.ardf); + } + ardopts = SC_get_ARDF(stmt); + ARD_AllocBookmark(ardopts); + + /* Save the handle for later */ + stmt->phstmt = phstmt; + + return SQL_SUCCESS; +} + +RETCODE SQL_API ESAPI_FreeStmt(HSTMT hstmt, SQLUSMALLINT fOption) { + CSTR func = "ESAPI_FreeStmt"; + StatementClass *stmt = (StatementClass *)hstmt; + + MYLOG(ES_TRACE, "entering...hstmt=%p, fOption=%hi\n", hstmt, fOption); + + if (!stmt) { + SC_log_error(func, "", NULL); + return SQL_INVALID_HANDLE; + } + SC_clear_error(stmt); + + if (fOption == SQL_DROP) { + ConnectionClass *conn = stmt->hdbc; + + ESStopRetrieval(conn->esconn); + + /* Remove the statement from the connection's statement list */ + if (conn) { + QResultClass *res; + + if (STMT_EXECUTING == stmt->status) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Statement is currently executing a transaction.", + func); + return SQL_ERROR; /* stmt may be executing a transaction */ + } + if (conn->unnamed_prepared_stmt == stmt) + conn->unnamed_prepared_stmt = NULL; + + res = SC_get_Result(stmt); + QR_Destructor(res); + SC_init_Result(stmt); + if (!CC_remove_statement(conn, stmt)) { + SC_set_error(stmt, STMT_SEQUENCE_ERROR, + "Statement is currently executing a transaction.", + func); + return SQL_ERROR; /* stmt may be executing a + * transaction */ + } + } + + if (stmt->execute_delegate) { + ESAPI_FreeStmt(stmt->execute_delegate, SQL_DROP); + stmt->execute_delegate = NULL; + } + if (stmt->execute_parent) + stmt->execute_parent->execute_delegate = NULL; + /* Destroy the statement and free any results, cursors, etc. */ + SC_Destructor(stmt); + } else if (fOption == SQL_UNBIND) + SC_unbind_cols(stmt); + else if (fOption == SQL_CLOSE) { + ESStopRetrieval(stmt->hdbc->esconn); + + /* + * this should discard all the results, but leave the statement + * itself in place (it can be executed again) + */ + stmt->transition_status = STMT_TRANSITION_ALLOCATED; + if (stmt->execute_delegate) { + ESAPI_FreeStmt(stmt->execute_delegate, SQL_DROP); + stmt->execute_delegate = NULL; + } + if (!SC_recycle_statement(stmt)) { + return SQL_ERROR; + } + SC_set_Curres(stmt, NULL); + } else if (fOption == SQL_RESET_PARAMS) + ; + else { + SC_set_error(stmt, STMT_OPTION_OUT_OF_RANGE_ERROR, + "Invalid option passed to ESAPI_FreeStmt.", func); + return SQL_ERROR; + } + + return SQL_SUCCESS; +} + +/* + * StatementClass implementation + */ +void InitializeStatementOptions(StatementOptions *opt) { + memset(opt, 0, sizeof(StatementOptions)); + opt->scroll_concurrency = SQL_CONCUR_READ_ONLY; + opt->cursor_type = SQL_CURSOR_FORWARD_ONLY; + opt->retrieve_data = SQL_RD_ON; + opt->use_bookmarks = SQL_UB_OFF; + opt->metadata_id = SQL_FALSE; +} + +static void SC_clear_parse_status(StatementClass *self, ConnectionClass *conn) { + UNUSED(self, conn); + self->parse_status = STMT_PARSE_NONE; +} + +static void SC_init_discard_output_params(StatementClass *self) { + ConnectionClass *conn = SC_get_conn(self); + + if (!conn) + return; + self->discard_output_params = 0; +} + +static void SC_init_parse_method(StatementClass *self) { + ConnectionClass *conn = SC_get_conn(self); + + self->parse_method = 0; + if (!conn) + return; + if (0 == (PODBC_EXTERNAL_STATEMENT & self->iflag)) + return; + if (self->catalog_result) + return; +} + +StatementClass *SC_Constructor(ConnectionClass *conn) { + StatementClass *rv; + + rv = (StatementClass *)malloc(sizeof(StatementClass)); + if (rv) { + rv->hdbc = conn; + rv->phstmt = NULL; + rv->result = NULL; + rv->curres = NULL; + rv->catalog_result = FALSE; + rv->prepare = NON_PREPARE_STATEMENT; + rv->prepared = NOT_PREPARED; + rv->status = STMT_ALLOCATED; + rv->external = FALSE; + rv->iflag = 0; + rv->plan_name = NULL; + rv->transition_status = STMT_TRANSITION_UNALLOCATED; + rv->multi_statement = -1; /* unknown */ + rv->num_params = -1; /* unknown */ + rv->processed_statements = NULL; + + rv->__error_message = NULL; + rv->__error_number = 0; + rv->eserror = NULL; + + rv->statement = NULL; + rv->load_statement = NULL; + rv->statement_type = STMT_TYPE_UNKNOWN; + + rv->currTuple = -1; + rv->rowset_start = 0; + SC_set_rowset_start(rv, -1, FALSE); + rv->current_col = -1; + rv->bind_row = 0; + rv->from_pos = rv->load_from_pos = rv->where_pos = -1; + rv->last_fetch_count = rv->last_fetch_count_include_ommitted = 0; + rv->save_rowset_size = -1; + + rv->data_at_exec = -1; + rv->put_data = FALSE; + rv->ref_CC_error = FALSE; + rv->join_info = 0; + SC_init_parse_method(rv); + + rv->lobj_fd = -1; + INIT_NAME(rv->cursor_name); + + /* Parse Stuff */ + rv->ti = NULL; + rv->ntab = 0; + rv->num_key_fields = -1; /* unknown */ + SC_clear_parse_status(rv, conn); + rv->proc_return = -1; + SC_init_discard_output_params(rv); + rv->cancel_info = 0; + + /* Clear Statement Options -- defaults will be set in AllocStmt */ + memset(&rv->options, 0, sizeof(StatementOptions)); + InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->ardi), rv, + SQL_ATTR_APP_ROW_DESC); + InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->apdi), rv, + SQL_ATTR_APP_PARAM_DESC); + InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->irdi), rv, + SQL_ATTR_IMP_ROW_DESC); + InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->ipdi), rv, + SQL_ATTR_IMP_PARAM_DESC); + + rv->miscinfo = 0; + rv->execinfo = 0; + rv->rb_or_tc = 0; + SC_reset_updatable(rv); + rv->diag_row_count = 0; + rv->stmt_time = 0; + rv->execute_delegate = NULL; + rv->execute_parent = NULL; + rv->allocated_callbacks = 0; + rv->num_callbacks = 0; + rv->callbacks = NULL; + GetDataInfoInitialize(SC_get_GDTI(rv)); + PutDataInfoInitialize(SC_get_PDTI(rv)); + rv->lock_CC_for_rb = FALSE; + INIT_STMT_CS(rv); + } + return rv; +} + +char SC_Destructor(StatementClass *self) { + CSTR func = "SC_Destructor"; + QResultClass *res = SC_get_Result(self); + + MYLOG(ES_TRACE, "entering self=%p, self->result=%p, self->hdbc=%p\n", self, + res, self->hdbc); + SC_clear_error(self); + if (STMT_EXECUTING == self->status) { + SC_set_error(self, STMT_SEQUENCE_ERROR, + "Statement is currently executing a transaction.", func); + return FALSE; + } + + if (res) { + if (!self->hdbc) + res->conn = NULL; /* prevent any dbase activity */ + + QR_Destructor(res); + } + + SC_initialize_stmts(self, TRUE); + + /* Free the parsed table information */ + SC_initialize_cols_info(self, FALSE, TRUE); + + NULL_THE_NAME(self->cursor_name); + /* Free the parsed field information */ + DC_Destructor((DescriptorClass *)SC_get_ARDi(self)); + DC_Destructor((DescriptorClass *)SC_get_APDi(self)); + DC_Destructor((DescriptorClass *)SC_get_IRDi(self)); + DC_Destructor((DescriptorClass *)SC_get_IPDi(self)); + GDATA_unbind_cols(SC_get_GDTI(self), TRUE); + PDATA_free_params(SC_get_PDTI(self), STMT_FREE_PARAMS_ALL); + + if (self->__error_message) + free(self->__error_message); + if (self->eserror) + ER_Destructor(self->eserror); + cancelNeedDataState(self); + if (self->callbacks) + free(self->callbacks); + + DELETE_STMT_CS(self); + free(self); + + MYLOG(ES_TRACE, "leaving\n"); + + return TRUE; +} + +void SC_init_Result(StatementClass *self) { + self->result = self->curres = NULL; + MYLOG(ES_TRACE, "leaving(%p)\n", self); +} + +void SC_set_Result(StatementClass *self, QResultClass *res) { + if (res != self->result) { + MYLOG(ES_DEBUG, "(%p, %p)\n", self, res); + QR_Destructor(self->result); + self->result = self->curres = res; + } +} + +int statement_type(const char *statement) { + int i; + + /* ignore leading whitespace in query string */ + while (*statement && (isspace((UCHAR)*statement) || *statement == '(')) + statement++; + + for (i = 0; Statement_Type[i].s; i++) + if (!strnicmp(statement, Statement_Type[i].s, + strlen(Statement_Type[i].s))) + return Statement_Type[i].type; + + return STMT_TYPE_OTHER; +} + +void SC_set_planname(StatementClass *stmt, const char *plan_name) { + if (stmt->plan_name) + free(stmt->plan_name); + if (plan_name && plan_name[0]) + stmt->plan_name = strdup(plan_name); + else + stmt->plan_name = NULL; +} + +void SC_set_rowset_start(StatementClass *stmt, SQLLEN start, BOOL valid_base) { + QResultClass *res = SC_get_Curres(stmt); + SQLLEN incr = start - stmt->rowset_start; + + MYLOG(ES_DEBUG, "%p->SC_set_rowstart " FORMAT_LEN "->" FORMAT_LEN "(%s) ", + stmt, stmt->rowset_start, start, valid_base ? "valid" : "unknown"); + if (res != NULL) { + BOOL valid = QR_has_valid_base(res); + MYPRINTF(ES_DEBUG, ":(%p)QR is %s", res, + QR_has_valid_base(res) ? "valid" : "unknown"); + + if (valid) { + if (valid_base) + QR_inc_rowstart_in_cache(res, incr); + else + QR_set_no_valid_base(res); + } else if (valid_base) { + QR_set_has_valid_base(res); + if (start < 0) + QR_set_rowstart_in_cache(res, -1); + else + QR_set_rowstart_in_cache(res, start); + } + if (!QR_get_cursor(res)) + res->key_base = start; + MYPRINTF(ES_DEBUG, ":(%p)QR result=" FORMAT_LEN "(%s)", res, + QR_get_rowstart_in_cache(res), + QR_has_valid_base(res) ? "valid" : "unknown"); + } + stmt->rowset_start = start; + MYPRINTF(ES_DEBUG, ":stmt result=" FORMAT_LEN "\n", stmt->rowset_start); +} +void SC_inc_rowset_start(StatementClass *stmt, SQLLEN inc) { + SQLLEN start = stmt->rowset_start + inc; + + SC_set_rowset_start(stmt, start, TRUE); +} +int SC_set_current_col(StatementClass *stmt, int col) { + if (col == stmt->current_col) + return col; + if (col >= 0) + reset_a_getdata_info(SC_get_GDTI(stmt), col + 1); + stmt->current_col = (short)col; + + return stmt->current_col; +} + +void SC_set_prepared(StatementClass *stmt, int prepared) { + if (NOT_PREPARED == prepared) + SC_set_planname(stmt, NULL); + + // po_ind_t -> char + stmt->prepared = (po_ind_t)prepared; +} + +/* + * Initialize stmt_with_params and load_statement member pointer + * deallocating corresponding prepared plan. Also initialize + * statement member pointer if specified. + */ +RETCODE +SC_initialize_stmts(StatementClass *self, BOOL initializeOriginal) { + ProcessedStmt *pstmt; + ProcessedStmt *next_pstmt; + + if (self->lock_CC_for_rb) { + LEAVE_CONN_CS(SC_get_conn(self)); + self->lock_CC_for_rb = FALSE; + } + if (initializeOriginal) { + if (self->statement) { + free(self->statement); + self->statement = NULL; + } + + pstmt = self->processed_statements; + while (pstmt) { + if (pstmt->query) + free(pstmt->query); + next_pstmt = pstmt->next; + free(pstmt); + pstmt = next_pstmt; + } + self->processed_statements = NULL; + + self->prepare = NON_PREPARE_STATEMENT; + SC_set_prepared(self, NOT_PREPARED); + self->statement_type = STMT_TYPE_UNKNOWN; /* unknown */ + self->multi_statement = -1; /* unknown */ + self->num_params = -1; /* unknown */ + self->proc_return = -1; /* unknown */ + self->join_info = 0; + SC_init_parse_method(self); + SC_init_discard_output_params(self); + } + if (self->load_statement) { + free(self->load_statement); + self->load_statement = NULL; + } + + return 0; +} + +BOOL SC_opencheck(StatementClass *self, const char *func) { + QResultClass *res; + + if (!self) + return FALSE; + if (self->status == STMT_EXECUTING) { + SC_set_error(self, STMT_SEQUENCE_ERROR, + "Statement is currently executing a transaction.", func); + return TRUE; + } + /* + * We can dispose the result of Describe-only any time. + */ + if (self->prepare && self->status == STMT_DESCRIBED) { + MYLOG(ES_DEBUG, "self->prepare && self->status == STMT_DESCRIBED\n"); + return FALSE; + } + if (res = SC_get_Curres(self), NULL != res) { + if (QR_command_maybe_successful(res) && res->backend_tuples) { + SC_set_error(self, STMT_SEQUENCE_ERROR, "The cursor is open.", + func); + return TRUE; + } + } + + return FALSE; +} + +RETCODE +SC_initialize_and_recycle(StatementClass *self) { + SC_initialize_stmts(self, TRUE); + if (!SC_recycle_statement(self)) + return SQL_ERROR; + + return SQL_SUCCESS; +} + +void SC_reset_result_for_rerun(StatementClass *self) { + QResultClass *res; + ColumnInfoClass *flds; + + if (!self) + return; + if (res = SC_get_Result(self), NULL == res) + return; + flds = QR_get_fields(res); + if (NULL == flds || 0 == CI_get_num_fields(flds)) + SC_set_Result(self, NULL); + else { + QR_reset_for_re_execute(res); + SC_set_Curres(self, NULL); + } +} + +/* + * Called from SQLPrepare if STMT_PREMATURE, or + * from SQLExecute if STMT_FINISHED, or + * from SQLFreeStmt(SQL_CLOSE) + */ +char SC_recycle_statement(StatementClass *self) { + CSTR func = "SC_recycle_statement"; + ConnectionClass *conn; + + MYLOG(ES_TRACE, "entering self=%p\n", self); + + SC_clear_error(self); + /* This would not happen */ + if (self->status == STMT_EXECUTING) { + SC_set_error(self, STMT_SEQUENCE_ERROR, + "Statement is currently executing a transaction.", func); + return FALSE; + } + + if (SC_get_conn(self)->unnamed_prepared_stmt == self) + SC_get_conn(self)->unnamed_prepared_stmt = NULL; + + conn = SC_get_conn(self); + switch (self->status) { + case STMT_ALLOCATED: + /* this statement does not need to be recycled */ + return TRUE; + + case STMT_READY: + break; + + case STMT_DESCRIBED: + break; + + case STMT_FINISHED: + break; + + default: + SC_set_error(self, STMT_INTERNAL_ERROR, + "An internal error occured while recycling statements", + func); + return FALSE; + } + + switch (self->prepared) { + case NOT_PREPARED: + /* Free the parsed table/field information */ + SC_initialize_cols_info(self, TRUE, TRUE); + + MYLOG(ES_DEBUG, "SC_clear_parse_status\n"); + SC_clear_parse_status(self, conn); + break; + } + + /* Free any cursors */ + if (SC_get_Result(self)) + SC_set_Result(self, NULL); + self->miscinfo = 0; + self->execinfo = 0; + /* self->rbonerr = 0; Never clear the bits here */ + + /* + * Reset only parameters that have anything to do with results + */ + self->status = STMT_READY; + self->catalog_result = FALSE; /* not very important */ + + self->currTuple = -1; + SC_set_rowset_start(self, -1, FALSE); + SC_set_current_col(self, -1); + self->bind_row = 0; + MYLOG(ES_DEBUG, "statement=%p ommitted=0\n", self); + self->last_fetch_count = self->last_fetch_count_include_ommitted = 0; + + self->__error_message = NULL; + self->__error_number = 0; + + self->lobj_fd = -1; + + SC_initialize_stmts(self, FALSE); + cancelNeedDataState(self); + self->cancel_info = 0; + /* + * reset the current attr setting to the original one. + */ + self->options.scroll_concurrency = self->options_orig.scroll_concurrency; + self->options.cursor_type = self->options_orig.cursor_type; + self->options.keyset_size = self->options_orig.keyset_size; + self->options.maxLength = self->options_orig.maxLength; + self->options.maxRows = self->options_orig.maxRows; + + return TRUE; +} + +/* This is only called from SQLFreeStmt(SQL_UNBIND) */ +char SC_unbind_cols(StatementClass *self) { + ARDFields *opts = SC_get_ARDF(self); + GetDataInfo *gdata = SC_get_GDTI(self); + BindInfoClass *bookmark; + + ARD_unbind_cols(opts, FALSE); + GDATA_unbind_cols(gdata, FALSE); + if (bookmark = opts->bookmark, bookmark != NULL) { + bookmark->buffer = NULL; + bookmark->used = NULL; + } + + return 1; +} + +void SC_clear_error(StatementClass *self) { + QResultClass *res; + + self->__error_number = 0; + if (self->__error_message) { + free(self->__error_message); + self->__error_message = NULL; + } + if (self->eserror) { + ER_Destructor(self->eserror); + self->eserror = NULL; + } + self->diag_row_count = 0; + if (res = SC_get_Curres(self), res) { + QR_set_message(res, NULL); + QR_set_notice(res, NULL); + res->sqlstate[0] = '\0'; + } + self->stmt_time = 0; + memset(&self->localtime, 0, sizeof(self->localtime)); + self->localtime.tm_sec = -1; + SC_unref_CC_error(self); +} + +/* + * This function creates an error info which is the concatenation + * of the result, statement, connection, and socket messages. + */ + +/* Map sql commands to statement types */ +static const struct { + int number; + const char ver3str[6]; + const char ver2str[6]; +} Statement_sqlstate[] = + + {{STMT_ERROR_IN_ROW, "01S01", "01S01"}, + {STMT_OPTION_VALUE_CHANGED, "01S02", "01S02"}, + {STMT_ROW_VERSION_CHANGED, "01001", "01001"}, /* data changed */ + {STMT_POS_BEFORE_RECORDSET, "01S06", "01S06"}, + {STMT_TRUNCATED, "01004", "01004"}, /* data truncated */ + {STMT_INFO_ONLY, "00000", + "00000"}, /* just an information that is returned, no error */ + + {STMT_OK, "00000", "00000"}, /* OK */ + {STMT_EXEC_ERROR, "HY000", "S1000"}, /* also a general error */ + {STMT_STATUS_ERROR, "HY010", "S1010"}, + {STMT_SEQUENCE_ERROR, "HY010", "S1010"}, /* Function sequence error */ + {STMT_NO_MEMORY_ERROR, "HY001", "S1001"}, /* memory allocation failure */ + {STMT_COLNUM_ERROR, "07009", "S1002"}, /* invalid column number */ + {STMT_NO_STMTSTRING, "HY001", + "S1001"}, /* having no stmtstring is also a malloc problem */ + {STMT_ERROR_TAKEN_FROM_BACKEND, "HY000", "S1000"}, /* general error */ + {STMT_INTERNAL_ERROR, "HY000", "S1000"}, /* general error */ + {STMT_STILL_EXECUTING, "HY010", "S1010"}, + {STMT_NOT_IMPLEMENTED_ERROR, "HYC00", "S1C00"}, /* == 'driver not + * capable' */ + {STMT_BAD_PARAMETER_NUMBER_ERROR, "07009", "S1093"}, + {STMT_OPTION_OUT_OF_RANGE_ERROR, "HY092", "S1092"}, + {STMT_INVALID_COLUMN_NUMBER_ERROR, "07009", "S1002"}, + {STMT_RESTRICTED_DATA_TYPE_ERROR, "07006", "07006"}, + {STMT_INVALID_CURSOR_STATE_ERROR, "07005", "24000"}, + {STMT_CREATE_TABLE_ERROR, "42S01", "S0001"}, /* table already exists */ + {STMT_NO_CURSOR_NAME, "S1015", "S1015"}, + {STMT_INVALID_CURSOR_NAME, "34000", "34000"}, + {STMT_INVALID_ARGUMENT_NO, "HY024", "S1009"}, /* invalid argument value */ + {STMT_ROW_OUT_OF_RANGE, "HY107", "S1107"}, + {STMT_OPERATION_CANCELLED, "HY008", "S1008"}, + {STMT_INVALID_CURSOR_POSITION, "HY109", "S1109"}, + {STMT_VALUE_OUT_OF_RANGE, "HY019", "22003"}, + {STMT_OPERATION_INVALID, "HY011", "S1011"}, + {STMT_PROGRAM_TYPE_OUT_OF_RANGE, "?????", "?????"}, + {STMT_BAD_ERROR, "08S01", "08S01"}, /* communication link failure */ + {STMT_INVALID_OPTION_IDENTIFIER, "HY092", "HY092"}, + {STMT_RETURN_NULL_WITHOUT_INDICATOR, "22002", "22002"}, + {STMT_INVALID_DESCRIPTOR_IDENTIFIER, "HY091", "HY091"}, + {STMT_OPTION_NOT_FOR_THE_DRIVER, "HYC00", "HYC00"}, + {STMT_FETCH_OUT_OF_RANGE, "HY106", "S1106"}, + {STMT_COUNT_FIELD_INCORRECT, "07002", "07002"}, + {STMT_INVALID_NULL_ARG, "HY009", "S1009"}, + {STMT_NO_RESPONSE, "08S01", "08S01"}, + {STMT_COMMUNICATION_ERROR, "08S01", "08S01"}}; + +static ES_ErrorInfo *SC_create_errorinfo(const StatementClass *self, + ES_ErrorInfo *eserror_fail_safe) { + QResultClass *res = SC_get_Curres(self); + ConnectionClass *conn = SC_get_conn(self); + Int4 errornum; + size_t pos; + BOOL resmsg = FALSE, detailmsg = FALSE, msgend = FALSE; + BOOL looponce, loopend; + char msg[4096], *wmsg; + char *ermsg = NULL, *sqlstate = NULL; + ES_ErrorInfo *eserror; + + if (self->eserror) + return self->eserror; + errornum = self->__error_number; + if (errornum == 0) + return NULL; + + looponce = (SC_get_Result(self) != res); + msg[0] = '\0'; + for (loopend = FALSE; (NULL != res) && !loopend; res = res->next) { + if (looponce) + loopend = TRUE; + if ('\0' != res->sqlstate[0]) { + if (NULL != sqlstate && strnicmp(res->sqlstate, "00", 2) == 0) + continue; + sqlstate = res->sqlstate; + if ('0' != sqlstate[0] || '1' < sqlstate[1]) + loopend = TRUE; + } + if (NULL != res->message) { + STRCPY_FIXED(msg, res->message); + detailmsg = resmsg = TRUE; + } else if (NULL != res->messageref) { + STRCPY_FIXED(msg, res->messageref); + detailmsg = resmsg = TRUE; + } + if (msg[0]) + ermsg = msg; + else if (QR_get_notice(res)) { + char *notice = QR_get_notice(res); + size_t len = strlen(notice); + if (len < sizeof(msg)) { + memcpy(msg, notice, len); + msg[len] = '\0'; + ermsg = msg; + } else { + ermsg = notice; + msgend = TRUE; + } + } + } + if (!msgend && (wmsg = SC_get_errormsg(self), wmsg) && wmsg[0]) { + pos = strlen(msg); + + snprintf(&msg[pos], sizeof(msg) - pos, "%s%s", detailmsg ? ";\n" : "", + wmsg); + ermsg = msg; + detailmsg = TRUE; + } + if (!self->ref_CC_error) + msgend = TRUE; + + if (conn && !msgend) { + if (!resmsg && (wmsg = CC_get_errormsg(conn), wmsg) + && wmsg[0] != '\0') { + pos = strlen(msg); + snprintf(&msg[pos], sizeof(msg) - pos, ";\n%s", + CC_get_errormsg(conn)); + } + + ermsg = msg; + } + eserror = ER_Constructor(self->__error_number, ermsg); + if (!eserror) { + if (eserror_fail_safe) { + memset(eserror_fail_safe, 0, sizeof(*eserror_fail_safe)); + eserror = eserror_fail_safe; + eserror->status = self->__error_number; + eserror->errorsize = sizeof(eserror->__error_message); + STRCPY_FIXED(eserror->__error_message, ermsg); + eserror->recsize = -1; + } else + return NULL; + } + if (sqlstate) + STRCPY_FIXED(eserror->sqlstate, sqlstate); + else if (conn) { + if (!msgend && conn->sqlstate[0]) + STRCPY_FIXED(eserror->sqlstate, conn->sqlstate); + else { + EnvironmentClass *env = (EnvironmentClass *)CC_get_env(conn); + + errornum -= LOWEST_STMT_ERROR; + if (errornum < 0 + || (unsigned long long)errornum + >= sizeof(Statement_sqlstate) + / sizeof(Statement_sqlstate[0])) { + errornum = 1 - LOWEST_STMT_ERROR; + } + STRCPY_FIXED(eserror->sqlstate, + EN_is_odbc3(env) + ? Statement_sqlstate[errornum].ver3str + : Statement_sqlstate[errornum].ver2str); + } + } + + return eserror; +} + +void SC_reset_delegate(RETCODE retcode, StatementClass *stmt) { + UNUSED(retcode); + StatementClass *delegate = stmt->execute_delegate; + + if (!delegate) + return; + ESAPI_FreeStmt(delegate, SQL_DROP); +} + +void SC_set_error(StatementClass *self, int number, const char *message, + const char *func) { + if (self->__error_message) + free(self->__error_message); + self->__error_number = number; + self->__error_message = message ? strdup(message) : NULL; + if (func && number != STMT_OK && number != STMT_INFO_ONLY) + SC_log_error(func, "", self); +} + +void SC_set_errormsg(StatementClass *self, const char *message) { + if (self->__error_message) + free(self->__error_message); + self->__error_message = message ? strdup(message) : NULL; +} + +void SC_error_copy(StatementClass *self, const StatementClass *from, + BOOL check) { + QResultClass *self_res, *from_res; + BOOL repstate; + + MYLOG(ES_TRACE, "entering %p->%p check=%i\n", from, self, check); + if (!from) + return; /* for safety */ + if (self == from) + return; /* for safety */ + if (check) { + if (0 == from->__error_number) /* SQL_SUCCESS */ + return; + if (0 > from->__error_number && /* SQL_SUCCESS_WITH_INFO */ + 0 < self->__error_number) + return; + } + self->__error_number = from->__error_number; + if (!check || from->__error_message) { + if (self->__error_message) + free(self->__error_message); + self->__error_message = + from->__error_message ? strdup(from->__error_message) : NULL; + } + if (self->eserror) { + ER_Destructor(self->eserror); + self->eserror = NULL; + } + self_res = SC_get_Curres(self); + from_res = SC_get_Curres(from); + if (!self_res || !from_res) + return; + QR_add_message(self_res, QR_get_message(from_res)); + QR_add_notice(self_res, QR_get_notice(from_res)); + repstate = FALSE; + if (!check) + repstate = TRUE; + else if (from_res->sqlstate[0]) { + if (!self_res->sqlstate[0] || strncmp(self_res->sqlstate, "00", 2) == 0) + repstate = TRUE; + else if (strncmp(from_res->sqlstate, "01", 2) >= 0) + repstate = TRUE; + } + if (repstate) + STRCPY_FIXED(self_res->sqlstate, from_res->sqlstate); +} + +void SC_full_error_copy(StatementClass *self, const StatementClass *from, + BOOL allres) { + ES_ErrorInfo *eserror; + + MYLOG(ES_TRACE, "entering %p->%p\n", from, self); + if (!from) + return; /* for safety */ + if (self == from) + return; /* for safety */ + if (self->__error_message) { + free(self->__error_message); + self->__error_message = NULL; + } + if (from->__error_message) + self->__error_message = strdup(from->__error_message); + self->__error_number = from->__error_number; + if (from->eserror) { + if (self->eserror) + ER_Destructor(self->eserror); + self->eserror = ER_Dup(from->eserror); + return; + } else if (!allres) + return; + eserror = SC_create_errorinfo(from, NULL); + if (!eserror || !eserror->__error_message[0]) { + ER_Destructor(eserror); + return; + } + if (self->eserror) + ER_Destructor(self->eserror); + self->eserror = eserror; +} + +/* Returns the next SQL error information. */ +RETCODE SQL_API ESAPI_StmtError(SQLHSTMT hstmt, SQLSMALLINT RecNumber, + SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, + SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, + SQLSMALLINT *pcbErrorMsg, UWORD flag) { + /* CC: return an error of a hdesc */ + ES_ErrorInfo *eserror, error; + StatementClass *stmt = (StatementClass *)hstmt; + int errnum = SC_get_errornumber(stmt); + + if (eserror = SC_create_errorinfo(stmt, &error), NULL == eserror) + return SQL_NO_DATA_FOUND; + if (eserror != &error) + stmt->eserror = eserror; + if (STMT_NO_MEMORY_ERROR == errnum && !eserror->__error_message[0]) + STRCPY_FIXED(eserror->__error_message, "Memory Allocation Error??"); + return ER_ReturnError(eserror, RecNumber, szSqlState, pfNativeError, + szErrorMsg, cbErrorMsgMax, pcbErrorMsg, flag); +} + +time_t SC_get_time(StatementClass *stmt) { + if (!stmt) + return time(NULL); + if (0 == stmt->stmt_time) + stmt->stmt_time = time(NULL); + return stmt->stmt_time; +} + +struct tm *SC_get_localtime(StatementClass *stmt) { +#ifndef HAVE_LOCALTIME_R + struct tm *tim; +#endif /* HAVE_LOCALTIME_R */ + + if (stmt->localtime.tm_sec < 0) { + SC_get_time(stmt); +#ifdef HAVE_LOCALTIME_R + localtime_r(&stmt->stmt_time, &(stmt->localtime)); +#else + tim = localtime(&stmt->stmt_time); + stmt->localtime = *tim; +#endif /* HAVE_LOCALTIME_R */ + } + + return &(stmt->localtime); +} + +RETCODE +SC_fetch(StatementClass *self) { + CSTR func = "SC_fetch"; + QResultClass *res = SC_get_Curres(self); + ARDFields *opts; + GetDataInfo *gdata; + int retval; + RETCODE result; + + Int2 num_cols, lf; + OID type; + int atttypmod; + char *value; + ColumnInfoClass *coli; + BindInfoClass *bookmark; + BOOL useCursor = FALSE; + KeySet *keyset = NULL; + + /* TupleField *tupleField; */ + + MYLOG(ES_TRACE, "entering statement=%p res=%p ommitted=0\n", self, res); + self->last_fetch_count = self->last_fetch_count_include_ommitted = 0; + if (!res) + return SQL_ERROR; + coli = QR_get_fields(res); /* the column info */ + + MYLOG(ES_DEBUG, "fetch_cursor=%d, %p->total_read=" FORMAT_LEN "\n", + SC_is_fetchcursor(self), res, res->num_total_read); + + if (self->currTuple >= (Int4)QR_get_num_total_tuples(res) - 1 + || (self->options.maxRows > 0 + && self->currTuple == self->options.maxRows - 1)) { + /* + * if at the end of the tuples, return "no data found" and set + * the cursor past the end of the result set + */ + self->currTuple = QR_get_num_total_tuples(res); + return SQL_NO_DATA_FOUND; + } + + MYLOG(ES_DEBUG, "**** : non-cursor_result\n"); + (self->currTuple)++; + + num_cols = QR_NumPublicResultCols(res); + + result = SQL_SUCCESS; + self->last_fetch_count++; + MYLOG(ES_DEBUG, "stmt=%p ommitted++\n", self); + self->last_fetch_count_include_ommitted++; + + opts = SC_get_ARDF(self); + /* + * If the bookmark column was bound then return a bookmark. Since this + * is used with SQLExtendedFetch, and the rowset size may be greater + * than 1, and an application can use row or column wise binding, use + * the code in copy_and_convert_field() to handle that. + */ + if ((bookmark = opts->bookmark, bookmark) && bookmark->buffer) { + SC_set_current_col(self, -1); + SC_Create_bookmark(self, bookmark, (int)self->bind_row, + (int)self->currTuple, keyset); + } + + if (self->options.retrieve_data == SQL_RD_OFF) /* data isn't required */ + return SQL_SUCCESS; + /* The following adjustment would be needed after SQLMoreResults() */ + if (opts->allocated < num_cols) + extend_column_bindings(opts, num_cols); + gdata = SC_get_GDTI(self); + if (gdata->allocated != opts->allocated) + extend_getdata_info(gdata, opts->allocated, TRUE); + for (lf = 0; lf < num_cols; lf++) { + MYLOG(ES_DEBUG, + "fetch: cols=%d, lf=%d, opts = %p, opts->bindings = %p, buffer[] " + "= %p\n", + num_cols, lf, opts, opts->bindings, opts->bindings[lf].buffer); + + /* reset for SQLGetData */ + GETDATA_RESET(gdata->gdata[lf]); + + if (NULL == opts->bindings) + continue; + if (opts->bindings[lf].buffer != NULL) { + /* this column has a binding */ + + /* type = QR_get_field_type(res, lf); */ + type = CI_get_oid(coli, lf); /* speed things up */ + atttypmod = CI_get_atttypmod(coli, lf); /* speed things up */ + + MYLOG(ES_DEBUG, "type = %d, atttypmod = %d\n", type, atttypmod); + + if (useCursor) + value = QR_get_value_backend(res, lf); + else { + SQLLEN curt = GIdx2CacheIdx(self->currTuple, self, res); + MYLOG(ES_DEBUG, + "%p->base=" FORMAT_LEN " curr=" FORMAT_LEN + " st=" FORMAT_LEN " valid=%d\n", + res, QR_get_rowstart_in_cache(res), self->currTuple, + SC_get_rowset_start(self), QR_has_valid_base(res)); + MYLOG(ES_DEBUG, "curt=" FORMAT_LEN "\n", curt); + value = QR_get_value_backend_row(res, curt, lf); + } + + MYLOG(ES_DEBUG, "value = '%s'\n", + (value == NULL) ? "" : value); + + retval = copy_and_convert_field_bindinfo(self, type, atttypmod, + value, lf); + + MYLOG(ES_DEBUG, "copy_and_convert: retval = %d\n", retval); + + switch (retval) { + case COPY_OK: + break; /* OK, do next bound column */ + + case COPY_UNSUPPORTED_TYPE: + SC_set_error( + self, STMT_RESTRICTED_DATA_TYPE_ERROR, + "Received an unsupported type from Elasticsearch.", + func); + result = SQL_ERROR; + break; + + case COPY_UNSUPPORTED_CONVERSION: + SC_set_error( + self, STMT_RESTRICTED_DATA_TYPE_ERROR, + "Couldn't handle the necessary data type conversion.", + func); + result = SQL_ERROR; + break; + + case COPY_RESULT_TRUNCATED: + SC_set_error(self, STMT_TRUNCATED, + "Fetched item was truncated.", func); + MYLOG(ES_DEBUG, "The %dth item was truncated\n", lf + 1); + MYLOG(ES_DEBUG, "The buffer size = " FORMAT_LEN, + opts->bindings[lf].buflen); + MYLOG(ES_DEBUG, " and the value is '%s'\n", value); + result = SQL_SUCCESS_WITH_INFO; + break; + + case COPY_INVALID_STRING_CONVERSION: /* invalid string */ + SC_set_error(self, STMT_STRING_CONVERSION_ERROR, + "invalid string conversion occured.", func); + result = SQL_ERROR; + break; + + /* error msg already filled in */ + case COPY_GENERAL_ERROR: + result = SQL_ERROR; + break; + + /* This would not be meaningful in SQLFetch. */ + case COPY_NO_DATA_FOUND: + break; + + default: + SC_set_error(self, STMT_INTERNAL_ERROR, + "Unrecognized return value from " + "copy_and_convert_field.", + func); + result = SQL_ERROR; + break; + } + } + } + + return result; +} + +#include "dlg_specific.h" + +#define CALLBACK_ALLOC_ONCE 4 + +RETCODE dequeueNeedDataCallback(RETCODE retcode, StatementClass *stmt) { + RETCODE ret; + NeedDataCallfunc func; + void *data; + int i, cnt; + + MYLOG(ES_TRACE, "entering ret=%d count=%d\n", retcode, stmt->num_callbacks); + if (SQL_NEED_DATA == retcode) + return retcode; + if (stmt->num_callbacks <= 0) + return retcode; + func = stmt->callbacks[0].func; + data = stmt->callbacks[0].data; + for (i = 1; i < stmt->num_callbacks; i++) + stmt->callbacks[i - 1] = stmt->callbacks[i]; + cnt = --stmt->num_callbacks; + ret = (*func)(retcode, data); + free(data); + if (SQL_NEED_DATA != ret && cnt > 0) + ret = dequeueNeedDataCallback(ret, stmt); + return ret; +} + +void cancelNeedDataState(StatementClass *stmt) { + int cnt = stmt->num_callbacks, i; + + stmt->num_callbacks = 0; + for (i = 0; i < cnt; i++) { + if (stmt->callbacks[i].data) + free(stmt->callbacks[i].data); + } + SC_reset_delegate(SQL_ERROR, stmt); +} + +void SC_log_error(const char *func, const char *desc, + const StatementClass *self) { + const char *head; +#define NULLCHECK(a) (a ? a : "(NULL)") + if (self) { + QResultClass *res = SC_get_Result(self); + const ARDFields *opts = SC_get_ARDF(self); + const APDFields *apdopts = SC_get_APDF(self); + SQLLEN rowsetSize; + const int level = 9; + + rowsetSize = (STMT_TRANSITION_EXTENDED_FETCH == self->transition_status + ? opts->size_of_rowset_odbc2 + : opts->size_of_rowset); + if (SC_get_errornumber(self) <= 0) + head = "STATEMENT WARNING"; + else { + head = "STATEMENT ERROR"; + QLOG(level, "%s: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", + head, func, desc, self->__error_number, + NULLCHECK(self->__error_message)); + } + MYLOG(ES_DEBUG, "%s: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", + head, func, desc, self->__error_number, + NULLCHECK(self->__error_message)); + if (SC_get_errornumber(self) > 0) { + QLOG(level, + " " + "------------------------------------------------------------" + "\n"); + QLOG(level, " hdbc=%p, stmt=%p, result=%p\n", + self->hdbc, self, res); + QLOG(level, " prepare=%d, external=%d\n", + self->prepare, self->external); + QLOG(level, " bindings=%p, bindings_allocated=%d\n", + opts->bindings, opts->allocated); + QLOG(level, + " parameters=%p, parameters_allocated=%d\n", + apdopts->parameters, apdopts->allocated); + QLOG(level, " statement_type=%d, statement='%s'\n", + self->statement_type, NULLCHECK(self->statement)); + QLOG(level, + " currTuple=" FORMAT_LEN + ", current_col=%d, lobj_fd=%d\n", + self->currTuple, self->current_col, self->lobj_fd); + QLOG(level, + " maxRows=" FORMAT_LEN + ", rowset_size=" FORMAT_LEN ", keyset_size=" FORMAT_LEN + ", cursor_type=" FORMAT_UINTEGER + ", scroll_concurrency=" FORMAT_UINTEGER "\n", + self->options.maxRows, rowsetSize, self->options.keyset_size, + self->options.cursor_type, self->options.scroll_concurrency); + QLOG(level, " cursor_name='%s'\n", + SC_cursor_name(self)); + + QLOG(level, + " ----------------QResult Info " + "-------------------------------\n"); + + if (res) { + QLOG(level, + " fields=%p, backend_tuples=%p, " + "tupleField=%p, conn=%p\n", + QR_get_fields(res), res->backend_tuples, res->tupleField, + res->conn); + QLOG(level, + " fetch_count=" FORMAT_LEN + ", num_total_rows=" FORMAT_ULEN + ", num_fields=%d, cursor='%s'\n", + res->fetch_number, QR_get_num_total_tuples(res), + res->num_fields, NULLCHECK(QR_get_cursor(res))); + QLOG(level, + " message='%s', command='%s', " + "notice='%s'\n", + NULLCHECK(QR_get_message(res)), NULLCHECK(res->command), + NULLCHECK(res->notice)); + QLOG(level, " status=%d\n", + QR_get_rstatus(res)); + } + + /* Log the connection error if there is one */ + CC_log_error(func, desc, self->hdbc); + } + } else { + MYLOG(ES_DEBUG, "INVALID STATEMENT HANDLE ERROR: func=%s, desc='%s'\n", + func, desc); + } +} + +extern void *common_cs; + +BOOL SC_SetExecuting(StatementClass *self, BOOL on) { + BOOL exeSet = FALSE; + ENTER_COMMON_CS; /* short time blocking */ + if (on) { + if (0 == (self->cancel_info & CancelRequestSet)) { + self->status = STMT_EXECUTING; + exeSet = TRUE; + } + } else { + self->cancel_info = 0; + self->status = STMT_FINISHED; + exeSet = TRUE; + } + LEAVE_COMMON_CS; + return exeSet; +} + +#ifdef NOT_USED +BOOL SC_SetCancelRequest(StatementClass *self) { + BOOL enteredCS = FALSE; + + ENTER_COMMON_CS; + if (0 != (self->cancel_info & CancelCompleted)) + ; + else if (STMT_EXECUTING == self->status) { + self->cancel_info |= CancelRequestSet; + } else { + /* try to acquire */ + if (TRY_ENTER_STMT_CS(self)) + enteredCS = TRUE; + else + self->cancel_info |= CancelRequestSet; + } + LEAVE_COMMON_CS; + return enteredCS; +} +#endif /* NOT_USED */ + +static void SC_set_error_if_not_set(StatementClass *self, int errornumber, + const char *errmsg, const char *func) { + int errnum = SC_get_errornumber(self); + + if (errnum <= 0) { + const char *emsg = SC_get_errormsg(self); + + if (emsg && 0 == errnum) + SC_set_errornumber(self, errornumber); + else + SC_set_error(self, errornumber, errmsg, func); + } +} + +void SC_set_errorinfo(StatementClass *self, QResultClass *res, int errkind) { + ConnectionClass *conn = SC_get_conn(self); + + if (CC_not_connected(conn)) { + SC_set_error_if_not_set(self, STMT_COMMUNICATION_ERROR, + "The connection has been lost", __FUNCTION__); + return; + } + + switch (QR_get_rstatus(res)) { + case PORES_NO_MEMORY_ERROR: + SC_set_error_if_not_set(self, STMT_NO_MEMORY_ERROR, + "memory allocation error???", __FUNCTION__); + break; + case PORES_BAD_RESPONSE: + SC_set_error_if_not_set(self, STMT_COMMUNICATION_ERROR, + "communication error occured", + __FUNCTION__); + break; + case PORES_INTERNAL_ERROR: + SC_set_error_if_not_set(self, STMT_INTERNAL_ERROR, + "Internal error fetching next row", + __FUNCTION__); + break; + default: + switch (errkind) { + case 1: + SC_set_error_if_not_set( + self, STMT_EXEC_ERROR, + "Error while fetching the next result", __FUNCTION__); + break; + default: + SC_set_error_if_not_set(self, STMT_EXEC_ERROR, + "Error while executing the query", + __FUNCTION__); + break; + } + break; + } +} + +int SC_Create_bookmark(StatementClass *self, BindInfoClass *bookmark, + Int4 bind_row, Int4 currTuple, const KeySet *keyset) { + ARDFields *opts = SC_get_ARDF(self); + SQLUINTEGER bind_size = opts->bind_size; + SQLULEN offset = opts->row_offset_ptr ? *opts->row_offset_ptr : 0; + size_t cvtlen = sizeof(Int4); + ES_BM ES_bm; + + MYLOG(ES_TRACE, "entering type=%d buflen=" FORMAT_LEN " buf=%p\n", + bookmark->returntype, bookmark->buflen, bookmark->buffer); + memset(&ES_bm, 0, sizeof(ES_bm)); + if (SQL_C_BOOKMARK == bookmark->returntype) + ; + else if (bookmark->buflen >= (SQLLEN)sizeof(ES_bm)) + cvtlen = sizeof(ES_bm); + else if (bookmark->buflen >= 12) + cvtlen = 12; + ES_bm.index = SC_make_int4_bookmark(currTuple); + if (keyset) + ES_bm.keys = *keyset; + memcpy(CALC_BOOKMARK_ADDR(bookmark, offset, bind_size, bind_row), &ES_bm, + cvtlen); + if (bookmark->used) { + SQLLEN *used = LENADDR_SHIFT(bookmark->used, offset); + + if (bind_size > 0) + used = (SQLLEN *)((char *)used + (bind_row * bind_size)); + else + used = (SQLLEN *)((char *)used + (bind_row * sizeof(SQLLEN))); + *used = cvtlen; + } + MYLOG(ES_TRACE, "leaving cvtlen=" FORMAT_SIZE_T " ix(bl,of)=%d(%d,%d)\n", + cvtlen, ES_bm.index, ES_bm.keys.blocknum, ES_bm.keys.offset); + + return COPY_OK; +} diff --git a/sql-odbc/src/odfesqlodbc/statement.h b/sql-odbc/src/odfesqlodbc/statement.h new file mode 100644 index 0000000000..5c8b6160e6 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/statement.h @@ -0,0 +1,523 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __STATEMENT_H__ +#define __STATEMENT_H__ + +#include + +#include "bind.h" +#include "descriptor.h" +#include "es_helper.h" +#include "es_odbc.h" +#include "es_types.h" +#include "tuple.h" + +// C Interface +#ifdef __cplusplus +extern "C" { +#endif + +enum { + CancelRequestSet = 1L, + CancelRequestAccepted = (1L << 1), + CancelCompleted = (1L << 2) +}; + +typedef enum { + STMT_ALLOCATED, /* The statement handle is allocated, but + * not used so far */ + STMT_READY, /* the statement is waiting to be executed */ + STMT_DESCRIBED, /* ODBC states that it is legal to call + * e.g. SQLDescribeCol before a call to + * SQLExecute, but after SQLPrepare. To + * get all the necessary information in + * such a case, we parse the query _before_ + * the actual call to SQLExecute, and the + * result set contains only column information, + * but no actual data. */ + STMT_FINISHED, /* statement execution has finished */ + STMT_EXECUTING /* statement execution is still going on */ +} STMT_Status; +/* + * ERROR status code + * + * The code for warnings must be minus + * and LOWEST_STMT_ERROR must be set to + * the least code number. + * The code for STMT_OK is 0 and error + * codes follow after it. + */ +enum { + LOWEST_STMT_ERROR = (-6) + /* minus values mean warning returns */ + , + STMT_ERROR_IN_ROW = (-6), + STMT_OPTION_VALUE_CHANGED = (-5), + STMT_ROW_VERSION_CHANGED = (-4), + STMT_POS_BEFORE_RECORDSET = (-3), + STMT_TRUNCATED = (-2), + STMT_INFO_ONLY = (-1) + /* not an error message, + * just a notification + * to be returned by + * SQLError + */ + , + STMT_OK = 0, + STMT_EXEC_ERROR, + STMT_STATUS_ERROR, + STMT_SEQUENCE_ERROR, + STMT_NO_MEMORY_ERROR, + STMT_COLNUM_ERROR, + STMT_NO_STMTSTRING, + STMT_ERROR_TAKEN_FROM_BACKEND, + STMT_INTERNAL_ERROR, + STMT_STILL_EXECUTING, + STMT_NOT_IMPLEMENTED_ERROR, + STMT_BAD_PARAMETER_NUMBER_ERROR, + STMT_OPTION_OUT_OF_RANGE_ERROR, + STMT_INVALID_COLUMN_NUMBER_ERROR, + STMT_RESTRICTED_DATA_TYPE_ERROR, + STMT_INVALID_CURSOR_STATE_ERROR, + STMT_CREATE_TABLE_ERROR, + STMT_NO_CURSOR_NAME, + STMT_INVALID_CURSOR_NAME, + STMT_INVALID_ARGUMENT_NO, + STMT_ROW_OUT_OF_RANGE, + STMT_OPERATION_CANCELLED, + STMT_INVALID_CURSOR_POSITION, + STMT_VALUE_OUT_OF_RANGE, + STMT_OPERATION_INVALID, + STMT_PROGRAM_TYPE_OUT_OF_RANGE, + STMT_BAD_ERROR, + STMT_INVALID_OPTION_IDENTIFIER, + STMT_RETURN_NULL_WITHOUT_INDICATOR, + STMT_INVALID_DESCRIPTOR_IDENTIFIER, + STMT_OPTION_NOT_FOR_THE_DRIVER, + STMT_FETCH_OUT_OF_RANGE, + STMT_COUNT_FIELD_INCORRECT, + STMT_INVALID_NULL_ARG, + STMT_NO_RESPONSE, + STMT_COMMUNICATION_ERROR, + STMT_STRING_CONVERSION_ERROR +}; + +/* statement types */ +enum { + STMT_TYPE_UNKNOWN = -2, + STMT_TYPE_OTHER = -1, + STMT_TYPE_SELECT = 0, + STMT_TYPE_WITH, + STMT_TYPE_PROCCALL, + STMT_TYPE_TRANSACTION, + STMT_TYPE_DECLARE, + STMT_TYPE_FETCH, + STMT_TYPE_CLOSE, + STMT_TYPE_INSERT, + STMT_TYPE_UPDATE, + STMT_TYPE_DELETE, + STMT_TYPE_CREATE, + STMT_TYPE_ALTER, + STMT_TYPE_DROP, + STMT_TYPE_GRANT, + STMT_TYPE_REVOKE, + STMT_TYPE_LOCK, + STMT_TYPE_PREPARE, + STMT_TYPE_EXECUTE, + STMT_TYPE_DEALLOCATE, + STMT_TYPE_ANALYZE, + STMT_TYPE_NOTIFY, + STMT_TYPE_EXPLAIN, + STMT_TYPE_SET, + STMT_TYPE_RESET, + STMT_TYPE_MOVE, + STMT_TYPE_COPY, + STMT_TYPE_START, + STMT_TYPE_SPECIAL +}; + +#define STMT_UPDATE(stmt) ((stmt)->statement_type > STMT_TYPE_PROCCALL) + +/* Parsing status */ +enum { + STMT_PARSE_NONE = 0, + STMT_PARSE_COMPLETE /* the driver parsed the statement */ + , + STMT_PARSE_INCOMPLETE, + STMT_PARSE_FATAL, + STMT_PARSE_MASK = 3L, + STMT_PARSED_OIDS = (1L << 2), + STMT_FOUND_KEY = (1L << 3), + STMT_HAS_ROW_DESCRIPTION = (1L << 4) /* already got the col info */ + , + STMT_REFLECTED_ROW_DESCRIPTION = (1L << 5) +}; + +/* transition status */ +enum { + STMT_TRANSITION_UNALLOCATED = 0, + STMT_TRANSITION_ALLOCATED = 1, + STMT_TRANSITION_FETCH_SCROLL = 6, + STMT_TRANSITION_EXTENDED_FETCH = 7 +}; + +/* Result style */ +enum { STMT_FETCH_NONE = 0, STMT_FETCH_NORMAL, STMT_FETCH_EXTENDED }; + +#define ES_NUM_NORMAL_KEYS 2 + +typedef RETCODE (*NeedDataCallfunc)(RETCODE, void *); +typedef struct { + NeedDataCallfunc func; + void *data; +} NeedDataCallback; + +/* + * ProcessedStmt represents a fragment of the original SQL query, after + * converting ? markers to $n style, processing ODBC escapes, and splitting + * a multi-statement into individual statements. Each individual statement + * is represented by one ProcessedStmt struct. + */ +struct ProcessedStmt { + struct ProcessedStmt *next; + char *query; + int num_params; /* number of parameter markers in this, + * fragment or -1 if not known */ +}; +typedef struct ProcessedStmt ProcessedStmt; + +/******** Statement Handle ***********/ +struct StatementClass_ { + ConnectionClass *hdbc; /* pointer to ConnectionClass this + * statement belongs to */ + QResultClass *result; /* result of the current statement */ + QResultClass *curres; /* the current result in the chain */ + HSTMT *phstmt; + StatementOptions options; + StatementOptions options_orig; + /* attached descriptor handles */ + DescriptorClass *ard; + DescriptorClass *apd; + DescriptorClass *ird; + DescriptorClass *ipd; + /* implicit descriptor handles */ + DescriptorClass ardi; + DescriptorClass irdi; + DescriptorClass apdi; + DescriptorClass ipdi; + + STMT_Status status; + char *__error_message; + int __error_number; + ES_ErrorInfo *eserror; + + SQLLEN currTuple; /* current absolute row number (GetData, + * SetPos, SQLFetch) */ + GetDataInfo gdata_info; + SQLLEN save_rowset_size; /* saved rowset size in case of + * change/FETCH_NEXT */ + SQLLEN rowset_start; /* start of rowset (an absolute row + * number) */ + SQLSETPOSIROW bind_row; /* current offset for Multiple row/column + * binding */ + Int2 current_col; /* current column for GetData -- used to + * handle multiple calls */ + SQLLEN last_fetch_count; /* number of rows retrieved in + * last fetch/extended fetch */ + int lobj_fd; /* fd of the current large object */ + + char *statement; /* if non--null pointer to the SQL + * statement that has been executed */ + /* + * processed_statements is the SQL after splitting multi-statement into + * parts, and replacing ? markers with $n style markers, or injecting the + * values in UseServerSidePrepare=0 mode. + */ + ProcessedStmt *processed_statements; + + TABLE_INFO **ti; + Int2 ntab; + Int2 num_key_fields; + Int2 statement_type; /* According to the defines above */ + Int2 num_params; + Int2 data_at_exec; /* Number of params needing SQLPutData */ + UDWORD iflag; /* ESAPI_AllocStmt parameter */ + PutDataInfo pdata_info; + po_ind_t parse_status; + po_ind_t proc_return; + po_ind_t put_data; /* Has SQLPutData been called ? */ + po_ind_t catalog_result; /* Is this a result of catalog function ? */ + po_ind_t prepare; /* is this a prepared statement ? */ + po_ind_t prepared; /* is this statement already + * prepared at the server ? */ + po_ind_t external; /* Allocated via SQLAllocHandle() */ + po_ind_t transition_status; /* Transition status */ + po_ind_t multi_statement; /* -1:unknown 0:single 1:multi */ + po_ind_t rb_or_tc; /* rollback on error */ + po_ind_t + discard_output_params; /* discard output parameters on parse stage */ + po_ind_t cancel_info; /* cancel information */ + po_ind_t ref_CC_error; /* refer to CC_error ? */ + po_ind_t lock_CC_for_rb; /* lock CC for statement rollback ? */ + po_ind_t join_info; /* have joins ? */ + po_ind_t parse_method; /* parse_statement is forced or ? */ + esNAME cursor_name; + char *plan_name; + unsigned char miscinfo; + unsigned char execinfo; + po_ind_t updatable; + SQLLEN diag_row_count; + char *load_statement; /* to (re)load updatable individual rows */ + ssize_t from_pos; + ssize_t load_from_pos; + ssize_t where_pos; + SQLLEN last_fetch_count_include_ommitted; + time_t stmt_time; + struct tm localtime; + /* SQL_NEED_DATA Callback list */ + StatementClass *execute_delegate; + StatementClass *execute_parent; + UInt2 allocated_callbacks; + UInt2 num_callbacks; + NeedDataCallback *callbacks; + void *cs; +}; + +#define SC_get_conn(a) ((a)->hdbc) +void SC_init_Result(StatementClass *self); +void SC_set_Result(StatementClass *self, QResultClass *res); +#define SC_get_Result(a) ((a)->result) +#define SC_set_Curres(a, b) ((a)->curres = b) +#define SC_get_Curres(a) ((a)->curres) +#define SC_get_ARD(a) ((a)->ard) +#define SC_get_APD(a) ((a)->apd) +#define SC_get_IRD(a) ((a)->ird) +#define SC_get_IPD(a) ((a)->ipd) +#define SC_get_ARDF(a) (&(SC_get_ARD(a)->ardf)) +#define SC_get_APDF(a) (&(SC_get_APD(a)->apdf)) +#define SC_get_IRDF(a) (&(SC_get_IRD(a)->irdf)) +#define SC_get_IPDF(a) (&(SC_get_IPD(a)->ipdf)) +#define SC_get_ARDi(a) (&((a)->ardi)) +#define SC_get_APDi(a) (&((a)->apdi)) +#define SC_get_IRDi(a) (&((a)->irdi)) +#define SC_get_IPDi(a) (&((a)->ipdi)) +#define SC_get_GDTI(a) (&((a)->gdata_info)) +#define SC_get_PDTI(a) (&((a)->pdata_info)) + +#define SC_get_errornumber(a) ((a)->__error_number) +#define SC_set_errornumber(a, n) ((a)->__error_number = n) +#define SC_get_errormsg(a) ((a)->__error_message) +#define SC_is_prepare_statement(a) (0 != ((a)->prepare & PREPARE_STATEMENT)) +#define SC_get_prepare_method(a) ((a)->prepare & (~PREPARE_STATEMENT)) + +#define SC_parsed_status(a) ((a)->parse_status & STMT_PARSE_MASK) +#define SC_set_parse_status(a, s) ((a)->parse_status |= s) +#define SC_update_not_ready(a) \ + (SC_parsed_status(a) == STMT_PARSE_NONE \ + || 0 == ((a)->parse_status & STMT_PARSED_OIDS)) +#define SC_update_ready(a) \ + (SC_parsed_status(a) == STMT_PARSE_COMPLETE \ + && 0 != ((a)->parse_status & STMT_FOUND_KEY) && (a)->updatable) +#define SC_set_checked_hasoids(a, b) \ + ((a)->parse_status |= (STMT_PARSED_OIDS | (b ? STMT_FOUND_KEY : 0))) +#define SC_checked_hasoids(a) (0 != ((a)->parse_status & STMT_PARSED_OIDS)) +#define SC_set_delegate(p, c) \ + ((p)->execute_delegate = c, (c)->execute_parent = p) + +#define SC_is_updatable(s) (0 < ((s)->updatable)) +#define SC_reset_updatable(s) ((s)->updatable = -1) +#define SC_set_updatable(s, b) ((s)->updatable = (b)) +#define SC_clear_parse_method(s) ((s)->parse_method = 0) +#define SC_is_parse_forced(s) (0 != ((s)->parse_method & 1L)) +#define SC_set_parse_forced(s) ((s)->parse_method |= 1L) + +#define SC_cursor_is_valid(s) (NAME_IS_VALID((s)->cursor_name)) +#define SC_cursor_name(s) (SAFE_NAME((s)->cursor_name)) + +void SC_reset_delegate(RETCODE, StatementClass *); + +#define SC_is_lower_case(a, b) \ + ((a)->options.metadata_id || (b)->connInfo.lower_case_identifier) + +#define SC_MALLOC_return_with_error(t, tp, s, a, m, r) \ + do { \ + if (t = (tp *)malloc(s), NULL == t) { \ + SC_set_error(a, STMT_NO_MEMORY_ERROR, m, "SC_MALLOC"); \ + return r; \ + } \ + } while (0) +#define SC_MALLOC_gexit_with_error(t, tp, s, a, m, r) \ + do { \ + if (t = (tp *)malloc(s), NULL == t) { \ + SC_set_error(a, STMT_NO_MEMORY_ERROR, m, "SC_MALLOC"); \ + r; \ + goto cleanup; \ + } \ + } while (0) +#define SC_REALLOC_return_with_error(t, tp, s, a, m, r) \ + do { \ + tp *tmp; \ + if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ + SC_set_error(a, STMT_NO_MEMORY_ERROR, m, "SC_REALLOC"); \ + return r; \ + } \ + t = tmp; \ + } while (0) +#define SC_REALLOC_gexit_with_error(t, tp, s, a, m, r) \ + do { \ + tp *tmp; \ + if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ + SC_set_error(a, STMT_NO_MEMORY_ERROR, m, __FUNCTION__); \ + r; \ + goto cleanup; \ + } \ + t = tmp; \ + } while (0) + +/* options for SC_free_params() */ +#define STMT_FREE_PARAMS_ALL 0 +#define STMT_FREE_PARAMS_DATA_AT_EXEC_ONLY 1 + +/* prepare state */ +enum { + NON_PREPARE_STATEMENT = 0, + PREPARE_STATEMENT = 1, + PREPARE_BY_THE_DRIVER = (1L << 1), + NAMED_PARSE_REQUEST = (3L << 1), + PARSE_TO_EXEC_ONCE = (4L << 1), + PARSE_REQ_FOR_INFO = (5L << 1) +}; + +/* prepared state */ +enum { + NOT_PREPARED = 0, + PREPARED, + EXECUTED +}; + +/* misc info */ +#define SC_set_fetchcursor(a) ((a)->miscinfo |= (1L << 1)) +#define SC_no_fetchcursor(a) ((a)->miscinfo &= ~(1L << 1)) +#define SC_is_fetchcursor(a) (((a)->miscinfo & (1L << 1)) != 0) +#define SC_miscinfo_clear(a) ((a)->miscinfo = 0) +#define SC_set_with_hold(a) ((a)->execinfo |= 1L) +#define SC_set_without_hold(a) ((a)->execinfo &= (~1L)) +#define SC_is_with_hold(a) (((a)->execinfo & 1L) != 0) +#define SC_set_readonly(a) ((a)->execinfo |= (1L << 1)) +#define SC_set_no_readonly(a) ((a)->execinfo &= ~(1L << 1)) +#define SC_is_readonly(a) (((a)->execinfo & (1L << 1)) != 0) +#define SC_execinfo_clear(a) (((a)->execinfo = 0) +#define STMT_HAS_OUTER_JOIN 1L +#define STMT_HAS_INNER_JOIN (1L << 1) +#define SC_has_join(a) (0 != (a)->join_info) +#define SC_has_outer_join(a) (0 != (STMT_HAS_OUTER_JOIN & (a)->join_info)) +#define SC_has_inner_join(a) (0 != (STMT_HAS_INNER_JOIN & (a)->join_info)) +#define SC_set_outer_join(a) ((a)->join_info |= STMT_HAS_OUTER_JOIN) +#define SC_set_inner_join(a) ((a)->join_info |= STMT_HAS_INNER_JOIN) + +#define SC_start_tc_stmt(a) ((a)->rb_or_tc = (1L << 1)) +#define SC_is_tc_stmt(a) (((a)->rb_or_tc & (1L << 1)) != 0) +#define SC_start_rb_stmt(a) ((a)->rb_or_tc = (1L << 2)) +#define SC_is_rb_stmt(a) (((a)->rb_or_tc & (1L << 2)) != 0) +#define SC_unref_CC_error(a) (((a)->ref_CC_error) = FALSE) +#define SC_ref_CC_error(a) (((a)->ref_CC_error) = TRUE) +#define SC_can_parse_statement(a) (STMT_TYPE_SELECT == (a)->statement_type) +/* + * DECLARE CURSOR + FETCH can only be used with SELECT-type queries. And + * it's not currently supported with array-bound parameters. + */ +#define SC_may_use_cursor(a) \ + (SC_get_APDF(a)->paramset_size <= 1 \ + && (STMT_TYPE_SELECT == (a)->statement_type \ + || STMT_TYPE_WITH == (a)->statement_type)) +#define SC_may_fetch_rows(a) \ + (STMT_TYPE_SELECT == (a)->statement_type \ + || STMT_TYPE_WITH == (a)->statement_type) + +/* For Multi-thread */ +#define INIT_STMT_CS(x) XPlatformInitializeCriticalSection(&((x)->cs)) +#define ENTER_STMT_CS(x) XPlatformEnterCriticalSection(((x)->cs)) +#define TRY_ENTER_STMT_CS(x) XPlatformTryEnterCriticalSection(&((x)->cs)) +#define LEAVE_STMT_CS(x) XPlatformLeaveCriticalSection(((x)->cs)) +#define DELETE_STMT_CS(x) XPlatformDeleteCriticalSection(&((x)->cs)) + +/* Statement prototypes */ +StatementClass *SC_Constructor(ConnectionClass *); +void InitializeStatementOptions(StatementOptions *opt); +char SC_Destructor(StatementClass *self); +BOOL SC_opencheck(StatementClass *self, const char *func); +RETCODE SC_initialize_and_recycle(StatementClass *self); +void SC_initialize_cols_info(StatementClass *self, BOOL DCdestroy, + BOOL parseReset); +void SC_reset_result_for_rerun(StatementClass *self); +int statement_type(const char *statement); +char SC_unbind_cols(StatementClass *self); +char SC_recycle_statement(StatementClass *self); +void SC_clear_error(StatementClass *self); +void SC_set_error(StatementClass *self, int errnum, const char *msg, + const char *func); +void SC_set_errormsg(StatementClass *self, const char *msg); +void SC_error_copy(StatementClass *self, const StatementClass *from, BOOL); +void SC_full_error_copy(StatementClass *self, const StatementClass *from, BOOL); +void SC_set_prepared(StatementClass *self, int); +void SC_set_planname(StatementClass *self, const char *plan_name); +void SC_set_rowset_start(StatementClass *self, SQLLEN, BOOL); +void SC_inc_rowset_start(StatementClass *self, SQLLEN); +RETCODE SC_initialize_stmts(StatementClass *self, BOOL); +RETCODE SC_fetch(StatementClass *self); +void SC_log_error(const char *func, const char *desc, + const StatementClass *self); +time_t SC_get_time(StatementClass *self); +struct tm *SC_get_localtime(StatementClass *self); +int SC_Create_bookmark(StatementClass *stmt, BindInfoClass *bookmark, + Int4 row_pos, Int4 currTuple, const KeySet *keyset); +int SC_set_current_col(StatementClass *self, int col); +BOOL SC_SetExecuting(StatementClass *self, BOOL on); +BOOL SC_SetCancelRequest(StatementClass *self); + +BOOL SC_connection_lost_check(StatementClass *stmt, const char *funcname); +void SC_set_errorinfo(StatementClass *self, QResultClass *res, int errkind); +RETCODE dequeueNeedDataCallback(RETCODE, StatementClass *self); +void cancelNeedDataState(StatementClass *self); + +/* + * Macros to convert global index <-> relative index in resultset/rowset + */ +/* a global index to the relative index in a rowset */ +#define SC_get_rowset_start(stmt) ((stmt)->rowset_start) +#define GIdx2RowIdx(gidx, stmt) (gidx - (stmt)->rowset_start) +/* a global index to the relative index in a resultset(not a rowset) */ +#define GIdx2CacheIdx(gidx, s, r) \ + (gidx - (QR_has_valid_base(r) ? ((s)->rowset_start - (r)->base) : 0)) +#define GIdx2KResIdx(gidx, s, r) \ + (gidx - (QR_has_valid_base(r) ? ((s)->rowset_start - (r)->key_base) : 0)) +/* a relative index in a rowset to the global index */ +#define RowIdx2GIdx(ridx, stmt) (ridx + (stmt)->rowset_start) +/* a relative index in a resultset to the global index */ +#define CacheIdx2GIdx(ridx, stmt, res) \ + (ridx - (res)->base + (stmt)->rowset_start) +#define KResIdx2GIdx(ridx, stmt, res) \ + (ridx - (res)->key_base + (stmt)->rowset_start) + +#define BOOKMARK_SHIFT 1 +#define SC_make_int4_bookmark(b) ((b < 0) ? (b) : (b + BOOKMARK_SHIFT)) +#define SC_resolve_int4_bookmark(b) ((b < 0) ? (b) : (b - BOOKMARK_SHIFT)) + +#ifdef __cplusplus +} +#endif +#endif /* __STATEMENT_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/tuple.c b/sql-odbc/src/odfesqlodbc/tuple.c new file mode 100644 index 0000000000..593671f518 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/tuple.c @@ -0,0 +1,59 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// clang-format off +#include "tuple.h" +#include "misc.h" + +#include +#include +// clang-format on + +void set_tuplefield_null(TupleField *tuple_field) { + tuple_field->len = 0; + // Changing value to strdup("") from NULL to fix error + // "Object cannot be cast from DBNull to other types" in Excel & Power BI + tuple_field->value = strdup(""); /* NULL; */ +} + +void set_tuplefield_string(TupleField *tuple_field, const char *string) { + if (string) { + tuple_field->len = (Int4)strlen(string); /* ES restriction */ + tuple_field->value = strdup(string); + } + if (!tuple_field->value) + set_tuplefield_null(tuple_field); +} + +void set_tuplefield_int2(TupleField *tuple_field, Int2 value) { + char buffer[10]; + + ITOA_FIXED(buffer, value); + + tuple_field->len = (Int4)(strlen(buffer) + 1); + /* +1 ... is this correct (better be on the save side-...) */ + tuple_field->value = strdup(buffer); +} + +void set_tuplefield_int4(TupleField *tuple_field, Int4 value) { + char buffer[15]; + + ITOA_FIXED(buffer, value); + + tuple_field->len = (Int4)(strlen(buffer) + 1); + /* +1 ... is this correct (better be on the save side-...) */ + tuple_field->value = strdup(buffer); +} diff --git a/sql-odbc/src/odfesqlodbc/tuple.h b/sql-odbc/src/odfesqlodbc/tuple.h new file mode 100644 index 0000000000..c9623a82ca --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/tuple.h @@ -0,0 +1,85 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __TUPLE_H__ +#define __TUPLE_H__ + +#include "es_odbc.h" + +// C Interface +#ifdef __cplusplus +extern "C" { +#endif + +/* Used by backend data AND manual result sets */ +struct TupleField_ { + Int4 len; /* ES length of the current Tuple */ + void *value; /* an array representing the value */ +}; + +/* keyset(TID + OID) info */ +struct KeySet_ { + UWORD status; + UInt2 offset; + UInt4 blocknum; + OID oid; +}; +/* Rollback(index + original TID) info */ +struct Rollback_ { + SQLLEN index; + UInt4 blocknum; + UInt2 offset; + OID oid; + UWORD option; +}; +#define KEYSET_INFO_PUBLIC 0x07 +#define CURS_SELF_ADDING (1L << 3) +#define CURS_SELF_DELETING (1L << 4) +#define CURS_SELF_UPDATING (1L << 5) +#define CURS_SELF_ADDED (1L << 6) +#define CURS_SELF_DELETED (1L << 7) +#define CURS_SELF_UPDATED (1L << 8) +#define CURS_NEEDS_REREAD (1L << 9) +#define CURS_IN_ROWSET (1L << 10) +#define CURS_OTHER_DELETED (1L << 11) + +/* These macros are wrappers for the corresponding set_tuplefield functions + but these handle automatic NULL determination and call set_tuplefield_null() + if appropriate for the datatype (used by SQLGetTypeInfo). +*/ +#define set_nullfield_string(FLD, VAL) \ + ((VAL) ? set_tuplefield_string(FLD, (VAL)) : set_tuplefield_null(FLD)) +#define set_nullfield_int2(FLD, VAL) \ + ((VAL) != -1 ? set_tuplefield_int2(FLD, (VAL)) : set_tuplefield_null(FLD)) +#define set_nullfield_int4(FLD, VAL) \ + ((VAL) != -1 ? set_tuplefield_int4(FLD, (VAL)) : set_tuplefield_null(FLD)) + +void set_tuplefield_null(TupleField *tuple_field); +void set_tuplefield_string(TupleField *tuple_field, const char *string); +void set_tuplefield_int2(TupleField *tuple_field, Int2 value); +void set_tuplefield_int4(TupleField *tuple_field, Int4 value); +SQLLEN ClearCachedRows(TupleField *tuple, int num_fields, SQLLEN num_rows); + +typedef struct _ES_BM_ { + Int4 index; + KeySet keys; +} ES_BM; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sql-odbc/src/odfesqlodbc/unicode_support.h b/sql-odbc/src/odfesqlodbc/unicode_support.h new file mode 100644 index 0000000000..1e73e8c65d --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/unicode_support.h @@ -0,0 +1,46 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __UNICODE_SUPPORT_H__ +#define __UNICODE_SUPPORT_H__ + +#include "es_odbc.h" + +#ifdef UNICODE_SUPPORT +#define WCLEN sizeof(SQLWCHAR) +enum { CONVTYPE_UNKNOWN, WCSTYPE_UTF16_LE, WCSTYPE_UTF32_LE, C16TYPE_UTF16_LE }; +char *ucs2_to_utf8(const SQLWCHAR *ucs2str, SQLLEN ilen, SQLLEN *olen, + BOOL tolower); +SQLULEN utf8_to_ucs2_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, + SQLWCHAR *ucs2str, SQLULEN buflen, BOOL errcheck); +int get_convtype(void); +#define utf8_to_ucs2(utf8str, ilen, ucs2str, buflen) \ + utf8_to_ucs2_lf(utf8str, ilen, FALSE, ucs2str, buflen, FALSE) + +SQLLEN bindcol_hybrid_estimate(const char *ldt, BOOL lf_conv, char **wcsbuf); +SQLLEN bindcol_hybrid_exec(SQLWCHAR *utf16, const char *ldt, size_t n, + BOOL lf_conv, char **wcsbuf); +SQLLEN bindcol_localize_estimate(const char *utf8dt, BOOL lf_conv, + char **wcsbuf); +SQLLEN bindcol_localize_exec(char *ldt, size_t n, BOOL lf_conv, char **wcsbuf); +SQLLEN bindpara_msg_to_utf8(const char *ldt, char **wcsbuf, SQLLEN used); +SQLLEN bindpara_wchar_to_msg(const SQLWCHAR *utf16, char **wcsbuf, SQLLEN used); + +SQLLEN locale_to_sqlwchar(SQLWCHAR *utf16, const char *ldt, size_t n, + BOOL lf_conv); +#endif /* UNICODE_SUPPORT */ + +#endif /* __UNICODE_SUPPORT_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/version.h b/sql-odbc/src/odfesqlodbc/version.h new file mode 100644 index 0000000000..fa68acea22 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/version.h @@ -0,0 +1,38 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __VERSION_H__ +#define __VERSION_H__ + +/* + * BuildAll may pass ELASTICDRIVERVERSION, ELASTIC_RESOURCE_VERSION + * and ES_DRVFILE_VERSION via winbuild/elasticodbc.vcxproj. + */ +#ifdef ES_ODBC_VERSION + +#ifndef ELASTICSEARCHDRIVERVERSION +#define ELASTICSEARCHDRIVERVERSION ES_ODBC_VERSION +#endif +#ifndef ELASTICSEARCH_RESOURCE_VERSION +#define ELASTICSEARCH_RESOURCE_VERSION ELASTICSEARCHDRIVERVERSION +#endif +#ifndef ES_DRVFILE_VERSION +#define ES_DRVFILE_VERSION ES_ODBC_DRVFILE_VERSION +#endif + +#endif // ES_ODBC_VERSION + +#endif diff --git a/sql-odbc/src/odfesqlodbc/win_setup.h b/sql-odbc/src/odfesqlodbc/win_setup.h new file mode 100644 index 0000000000..c1fb958ef6 --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/win_setup.h @@ -0,0 +1,44 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef _WIN_SETUP_H__ +#define _WIN_SETUP_H__ + +#ifndef INTFUNC +#define INTFUNC __stdcall +#endif /* INTFUNC */ +#define MAXDSNAME (32 + 1) /* Max data source name length */ +/* Globals */ +/* NOTE: All these are used by the dialog procedures */ +typedef struct tagSETUPDLG { + HWND hwndParent; /* Parent window handle */ + LPCSTR lpszDrvr; /* Driver description */ + ConnInfo ci; + char szDSN[MAXDSNAME]; /* Original data source name */ + BOOL fNewDSN; /* New data source flag */ + BOOL fDefault; /* Default data source flag */ + +} SETUPDLG, *LPSETUPDLG; + +/* Prototypes */ +INT_PTR CALLBACK ConfigDlgProc(HWND hdlg, UINT wMsg, WPARAM wParam, + LPARAM lParam); +BOOL INTFUNC ChangeDriverName(HWND hwnd, LPSETUPDLG lpsetupdlg, + LPCSTR driver_name); + +void test_connection(HANDLE hwnd, ConnInfo *ci, BOOL withDTC); + +#endif /* _WIN_SETUP_H__ */ diff --git a/sql-odbc/src/odfesqlodbc/win_unicode.c b/sql-odbc/src/odfesqlodbc/win_unicode.c new file mode 100644 index 0000000000..729238c85a --- /dev/null +++ b/sql-odbc/src/odfesqlodbc/win_unicode.c @@ -0,0 +1,1168 @@ +/* + * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifdef UNICODE_SUPPORT + +#include +#include +#include +#include "unicode_support.h" + +#ifdef WIN32 +#define FORMAT_SIZE_T "%Iu" +#else +#define FORMAT_SIZE_T "%zu" +#endif + +#if (defined(__STDC_ISO_10646__) && defined(HAVE_MBSTOWCS) \ + && defined(HAVE_WCSTOMBS)) \ + || defined(WIN32) +#define __WCS_ISO10646__ +static BOOL use_wcs = FALSE; +#endif + +#if (defined(__STDC_UTF_16__) && defined(HAVE_UCHAR_H) \ + && defined(HAVE_MBRTOC16) && defined(HAVE_C16RTOMB)) +#define __CHAR16_UTF_16__ +#include +static BOOL use_c16 = FALSE; +#endif + +static int convtype = -1; + +int get_convtype(void) { + const UCHAR *cdt; + (void)(cdt); +#if defined(__WCS_ISO10646__) + if (convtype < 0) { + wchar_t *wdt = L"a"; + int sizeof_w = sizeof(wchar_t); + + cdt = (UCHAR *)wdt; + switch (sizeof_w) { + case 2: + if ('a' == cdt[0] && '\0' == cdt[1] && '\0' == cdt[2] + && '\0' == cdt[3]) { + MYLOG(ES_DEBUG, " UTF-16LE detected\n"); + convtype = WCSTYPE_UTF16_LE; + use_wcs = TRUE; + } + break; + case 4: + if ('a' == cdt[0] && '\0' == cdt[1] && '\0' == cdt[2] + && '\0' == cdt[3] && '\0' == cdt[4] && '\0' == cdt[5] + && '\0' == cdt[6] && '\0' == cdt[7]) { + MYLOG(ES_DEBUG, " UTF32-LE detected\n"); + convtype = WCSTYPE_UTF32_LE; + use_wcs = TRUE; + } + break; + } + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (convtype < 0) { + char16_t *c16dt = u"a"; + + cdt = (UCHAR *)c16dt; + if ('a' == cdt[0] && '\0' == cdt[1] && '\0' == cdt[2] + && '\0' == cdt[3]) { + MYLOG(ES_DEBUG, " C16_UTF-16LE detected\n"); + convtype = C16TYPE_UTF16_LE; + use_c16 = TRUE; + } + } +#endif /* __CHAR16_UTF_16__ */ + if (convtype < 0) + convtype = CONVTYPE_UNKNOWN; /* unknown */ + return convtype; +} + +#define byte3check 0xfffff800 +#define byte2_base 0x80c0 +#define byte2_mask1 0x07c0 +#define byte2_mask2 0x003f +#define byte3_base 0x8080e0 +#define byte3_mask1 0xf000 +#define byte3_mask2 0x0fc0 +#define byte3_mask3 0x003f + +#define surrog_check 0xfc00 +#define surrog1_bits 0xd800 +#define surrog2_bits 0xdc00 +#define byte4_base 0x808080f0 +#define byte4_sr1_mask1 0x0700 +#define byte4_sr1_mask2 0x00fc +#define byte4_sr1_mask3 0x0003 +#define byte4_sr2_mask1 0x03c0 +#define byte4_sr2_mask2 0x003f +#define surrogate_adjust (0x10000 >> 10) + +static int little_endian = -1; + +SQLULEN ucs2strlen(const SQLWCHAR *ucs2str) { + SQLULEN len; + for (len = 0; ucs2str[len]; len++) + ; + return len; +} +char *ucs2_to_utf8(const SQLWCHAR *ucs2str, SQLLEN ilen, SQLLEN *olen, + BOOL lower_identifier) { + char *utf8str; + int len = 0; + MYLOG(ES_DEBUG, "%p ilen=" FORMAT_LEN " ", ucs2str, ilen); + + if (!ucs2str) { + if (olen) + *olen = SQL_NULL_DATA; + return NULL; + } + if (little_endian < 0) { + int crt = 1; + little_endian = (0 != ((char *)&crt)[0]); + } + if (ilen < 0) + ilen = ucs2strlen(ucs2str); + MYPRINTF(0, " newlen=" FORMAT_LEN, ilen); + utf8str = (char *)malloc(ilen * 4 + 1); + if (utf8str) { + int i = 0; + UInt2 byte2code; + Int4 byte4code, surrd1, surrd2; + const SQLWCHAR *wstr; + + for (i = 0, wstr = ucs2str; i < ilen; i++, wstr++) { + if (!*wstr) + break; + else if (0 == (*wstr & 0xffffff80)) /* ASCII */ + { + if (lower_identifier) + utf8str[len++] = (char)tolower(*wstr); + else + utf8str[len++] = (char)*wstr; + } else if ((*wstr & byte3check) == 0) { + byte2code = byte2_base | ((byte2_mask1 & *wstr) >> 6) + | ((byte2_mask2 & *wstr) << 8); + if (little_endian) + memcpy(utf8str + len, (char *)&byte2code, + sizeof(byte2code)); + else { + utf8str[len] = ((char *)&byte2code)[1]; + utf8str[len + 1] = ((char *)&byte2code)[0]; + } + len += sizeof(byte2code); + } + /* surrogate pair check for non ucs-2 code */ + else if (surrog1_bits == (*wstr & surrog_check)) { + surrd1 = (*wstr & ~surrog_check) + surrogate_adjust; + wstr++; + i++; + surrd2 = (*wstr & ~surrog_check); + byte4code = byte4_base | ((byte4_sr1_mask1 & surrd1) >> 8) + | ((byte4_sr1_mask2 & surrd1) << 6) + | ((byte4_sr1_mask3 & surrd1) << 20) + | ((byte4_sr2_mask1 & surrd2) << 10) + | ((byte4_sr2_mask2 & surrd2) << 24); + if (little_endian) + memcpy(utf8str + len, (char *)&byte4code, + sizeof(byte4code)); + else { + utf8str[len] = ((char *)&byte4code)[3]; + utf8str[len + 1] = ((char *)&byte4code)[2]; + utf8str[len + 2] = ((char *)&byte4code)[1]; + utf8str[len + 3] = ((char *)&byte4code)[0]; + } + len += sizeof(byte4code); + } else { + byte4code = byte3_base | ((byte3_mask1 & *wstr) >> 12) + | ((byte3_mask2 & *wstr) << 2) + | ((byte3_mask3 & *wstr) << 16); + if (little_endian) + memcpy(utf8str + len, (char *)&byte4code, 3); + else { + utf8str[len] = ((char *)&byte4code)[3]; + utf8str[len + 1] = ((char *)&byte4code)[2]; + utf8str[len + 2] = ((char *)&byte4code)[1]; + } + len += 3; + } + } + utf8str[len] = '\0'; + if (olen) + *olen = len; + } + MYPRINTF(0, " olen=%d utf8str=%s\n", len, utf8str ? utf8str : ""); + return utf8str; +} + +#define byte3_m1 0x0f +#define byte3_m2 0x3f +#define byte3_m3 0x3f +#define byte2_m1 0x1f +#define byte2_m2 0x3f +#define byte4_m1 0x07 +#define byte4_m2 0x3f +#define byte4_m31 0x30 +#define byte4_m32 0x0f +#define byte4_m4 0x3f + +/* + * Convert a string from UTF-8 encoding to UCS-2. + * + * utf8str - input string in UTF-8 + * ilen - length of input string in bytes (or minus) + * lfconv - TRUE if line feeds (LF) should be converted to CR + LF + * ucs2str - output buffer + * bufcount - size of output buffer + * errcheck - if TRUE, check for invalidly encoded input characters + * + * Returns the number of SQLWCHARs copied to output buffer. If the output + * buffer is too small, the output is truncated. The output string is + * NULL-terminated, except when the output is truncated. + */ +SQLULEN +utf8_to_ucs2_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, + SQLWCHAR *ucs2str, SQLULEN bufcount, BOOL errcheck) { + int i; + SQLULEN rtn, ocount, wcode; + const UCHAR *str; + + MYLOG(ES_DEBUG, "ilen=" FORMAT_LEN " bufcount=" FORMAT_ULEN, ilen, + bufcount); + if (!utf8str) + return 0; + MYPRINTF(ES_DEBUG, " string=%s", utf8str); + + if (!bufcount) + ucs2str = NULL; + else if (!ucs2str) + bufcount = 0; + if (ilen < 0) + ilen = strlen(utf8str); + for (i = 0, ocount = 0, str = (SQLCHAR *)utf8str; i < ilen && *str;) { + if ((*str & 0x80) == 0) { + if (lfconv && ES_LINEFEED == *str + && (i == 0 || ES_CARRIAGE_RETURN != str[-1])) { + if (ocount < bufcount) + ucs2str[ocount] = ES_CARRIAGE_RETURN; + ocount++; + } + if (ocount < bufcount) + ucs2str[ocount] = *str; + ocount++; + i++; + str++; + } else if (0xf8 == (*str & 0xf8)) /* more than 5 byte code */ + { + ocount = (SQLULEN)-1; + goto cleanup; + } else if (0xf0 == (*str & 0xf8)) /* 4 byte code */ + { + if (errcheck) { + if (i + 4 > ilen || 0 == (str[1] & 0x80) || 0 == (str[2] & 0x80) + || 0 == (str[3] & 0x80)) { + ocount = (SQLULEN)-1; + goto cleanup; + } + } + if (ocount < bufcount) { + wcode = (surrog1_bits | ((((UInt4)*str) & byte4_m1) << 8) + | ((((UInt4)str[1]) & byte4_m2) << 2) + | ((((UInt4)str[2]) & byte4_m31) >> 4)) + - surrogate_adjust; + ucs2str[ocount] = (SQLWCHAR)wcode; + } + ocount++; + if (ocount < bufcount) { + wcode = surrog2_bits | ((((UInt4)str[2]) & byte4_m32) << 6) + | (((UInt4)str[3]) & byte4_m4); + ucs2str[ocount] = (SQLWCHAR)wcode; + } + ocount++; + i += 4; + str += 4; + } else if (0xe0 == (*str & 0xf0)) /* 3 byte code */ + { + if (errcheck) { + if (i + 3 > ilen || 0 == (str[1] & 0x80) + || 0 == (str[2] & 0x80)) { + ocount = (SQLULEN)-1; + goto cleanup; + } + } + if (ocount < bufcount) { + wcode = ((((UInt4)*str) & byte3_m1) << 12) + | ((((UInt4)str[1]) & byte3_m2) << 6) + | (((UInt4)str[2]) & byte3_m3); + ucs2str[ocount] = (SQLWCHAR)wcode; + } + ocount++; + i += 3; + str += 3; + } else if (0xc0 == (*str & 0xe0)) /* 2 byte code */ + { + if (errcheck) { + if (i + 2 > ilen || 0 == (str[1] & 0x80)) { + ocount = (SQLULEN)-1; + goto cleanup; + } + } + if (ocount < bufcount) { + wcode = ((((UInt4)*str) & byte2_m1) << 6) + | (((UInt4)str[1]) & byte2_m2); + ucs2str[ocount] = (SQLWCHAR)wcode; + } + ocount++; + i += 2; + str += 2; + } else { + ocount = (SQLULEN)-1; + goto cleanup; + } + } +cleanup: + rtn = ocount; + if (ocount == (SQLULEN)-1) { + if (!errcheck) + rtn = 0; + ocount = 0; + } + if (ocount < bufcount && ucs2str) + ucs2str[ocount] = 0; + MYPRINTF(ES_ALL, " ocount=" FORMAT_ULEN "\n", ocount); + return rtn; +} + +#ifdef __WCS_ISO10646__ + +/* UCS4 => utf8 */ +#define byte4check 0xffff0000 +#define byte4_check 0x10000 +#define byte4_mask1 0x1c0000 +#define byte4_mask2 0x3f000 +#define byte4_mask3 0x0fc0 +#define byte4_mask4 0x003f + +#define byte4_m3 0x3f + +static SQLULEN ucs4strlen(const UInt4 *ucs4str) { + SQLULEN len; + for (len = 0; ucs4str[len]; len++) + ; + return len; +} + +static char *ucs4_to_utf8(const UInt4 *ucs4str, SQLLEN ilen, SQLLEN *olen, + BOOL lower_identifier) { + char *utf8str; + int len = 0; + MYLOG(ES_DEBUG, " %p ilen=" FORMAT_LEN "\n", ucs4str, ilen); + + if (!ucs4str) { + if (olen) + *olen = SQL_NULL_DATA; + return NULL; + } + if (little_endian < 0) { + int crt = 1; + little_endian = (0 != ((char *)&crt)[0]); + } + if (ilen < 0) + ilen = ucs4strlen(ucs4str); + MYLOG(ES_DEBUG, " newlen=" FORMAT_LEN "\n", ilen); + utf8str = (char *)malloc(ilen * 4 + 1); + if (utf8str) { + int i; + UInt2 byte2code; + Int4 byte4code; + const UInt4 *wstr; + + for (i = 0, wstr = ucs4str; i < ilen; i++, wstr++) { + if (!*wstr) + break; + else if (0 == (*wstr & 0xffffff80)) /* ASCII */ + { + if (lower_identifier) + utf8str[len++] = (char)tolower(*wstr); + else + utf8str[len++] = (char)*wstr; + } else if ((*wstr & byte3check) == 0) { + byte2code = byte2_base | ((byte2_mask1 & *wstr) >> 6) + | ((byte2_mask2 & *wstr) << 8); + if (little_endian) + memcpy(utf8str + len, (char *)&byte2code, + sizeof(byte2code)); + else { + utf8str[len] = ((char *)&byte2code)[1]; + utf8str[len + 1] = ((char *)&byte2code)[0]; + } + len += sizeof(byte2code); + } else if ((*wstr & byte4check) == 0) { + byte4code = byte3_base | ((byte3_mask1 & *wstr) >> 12) + | ((byte3_mask2 & *wstr) << 2) + | ((byte3_mask3 & *wstr) << 16); + if (little_endian) + memcpy(utf8str + len, (char *)&byte4code, 3); + else { + utf8str[len] = ((char *)&byte4code)[3]; + utf8str[len + 1] = ((char *)&byte4code)[2]; + utf8str[len + 2] = ((char *)&byte4code)[1]; + } + len += 3; + } else { + byte4code = byte4_base | ((byte4_mask1 & *wstr) >> 18) + | ((byte4_mask2 & *wstr) >> 4) + | ((byte4_mask3 & *wstr) << 10) + | ((byte4_mask4 & *wstr) << 24); + /* MYLOG(ES_DEBUG, " %08x->%08x\n", *wstr, byte4code); */ + if (little_endian) + memcpy(utf8str + len, (char *)&byte4code, + sizeof(byte4code)); + else { + utf8str[len] = ((char *)&byte4code)[3]; + utf8str[len + 1] = ((char *)&byte4code)[2]; + utf8str[len + 2] = ((char *)&byte4code)[1]; + utf8str[len + 3] = ((char *)&byte4code)[0]; + } + len += sizeof(byte4code); + } + } + utf8str[len] = '\0'; + if (olen) + *olen = len; + } + MYLOG(ES_DEBUG, " olen=%d %s\n", len, utf8str ? utf8str : ""); + return utf8str; +} + +/* + * Convert a string from UTF-8 encoding to UTF-32. + * + * utf8str - input string in UTF-8 + * ilen - length of input string in bytes (or minus) + * lfconv - TRUE if line feeds (LF) should be converted to CR + LF + * ucs4str - output buffer + * bufcount - size of output buffer + * errcheck - if TRUE, check for invalidly encoded input characters + * + * Returns the number of UInt4s copied to output buffer. If the output + * buffer is too small, the output is truncated. The output string is + * NULL-terminated, except when the output is truncated. + */ +static SQLULEN utf8_to_ucs4_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, + UInt4 *ucs4str, SQLULEN bufcount, + BOOL errcheck) { + int i; + SQLULEN rtn, ocount, wcode; + const UCHAR *str; + + MYLOG(ES_DEBUG, " ilen=" FORMAT_LEN " bufcount=" FORMAT_ULEN "\n", ilen, bufcount); + if (!utf8str) + return 0; + MYLOG(99, " string=%s\n", utf8str); + + if (!bufcount) + ucs4str = NULL; + else if (!ucs4str) + bufcount = 0; + if (ilen < 0) + ilen = strlen(utf8str); + for (i = 0, ocount = 0, str = (SQLCHAR *)utf8str; i < ilen && *str;) { + if ((*str & 0x80) == 0) { + if (lfconv && ES_LINEFEED == *str + && (i == 0 || ES_CARRIAGE_RETURN != str[-1])) { + if (ocount < bufcount) + ucs4str[ocount] = ES_CARRIAGE_RETURN; + ocount++; + } + if (ocount < bufcount) + ucs4str[ocount] = *str; + ocount++; + i++; + str++; + } else if (0xf8 == (*str & 0xf8)) /* more than 5 byte code */ + { + ocount = (SQLULEN)-1; + goto cleanup; + } else if (0xf0 == (*str & 0xf8)) /* 4 byte code */ + { + if (errcheck) { + if (i + 4 > ilen || 0 == (str[1] & 0x80) || 0 == (str[2] & 0x80) + || 0 == (str[3] & 0x80)) { + ocount = (SQLULEN)-1; + goto cleanup; + } + } + if (ocount < bufcount) { + wcode = (((((UInt4)*str) & byte4_m1) << 18) + | ((((UInt4)str[1]) & byte4_m2) << 12) + | ((((UInt4)str[2]) & byte4_m3) << 6)) + | (((UInt4)str[3]) & byte4_m4); + ucs4str[ocount] = (unsigned int)wcode; + } + ocount++; + i += 4; + str += 4; + } else if (0xe0 == (*str & 0xf0)) /* 3 byte code */ + { + if (errcheck) { + if (i + 3 > ilen || 0 == (str[1] & 0x80) + || 0 == (str[2] & 0x80)) { + ocount = (SQLULEN)-1; + goto cleanup; + } + } + if (ocount < bufcount) { + wcode = ((((UInt4)*str) & byte3_m1) << 12) + | ((((UInt4)str[1]) & byte3_m2) << 6) + | (((UInt4)str[2]) & byte3_m3); + ucs4str[ocount] = (unsigned int)wcode; + } + ocount++; + i += 3; + str += 3; + } else if (0xc0 == (*str & 0xe0)) /* 2 byte code */ + { + if (errcheck) { + if (i + 2 > ilen || 0 == (str[1] & 0x80)) { + ocount = (SQLULEN)-1; + goto cleanup; + } + } + if (ocount < bufcount) { + wcode = ((((UInt4)*str) & byte2_m1) << 6) + | (((UInt4)str[1]) & byte2_m2); + ucs4str[ocount] = (SQLWCHAR)wcode; + } + ocount++; + i += 2; + str += 2; + } else { + ocount = (SQLULEN)-1; + goto cleanup; + } + } +cleanup: + rtn = ocount; + if (ocount == (SQLULEN)-1) { + if (!errcheck) + rtn = 0; + ocount = 0; + } + if (ocount < bufcount && ucs4str) + ucs4str[ocount] = 0; + MYLOG(ES_DEBUG, " ocount=" FORMAT_ULEN "\n", ocount); + return rtn; +} + +#define SURROGATE_CHECK 0xfc +#define SURROG1_BYTE 0xd8 +#define SURROG2_BYTE 0xdc + +static int ucs4_to_ucs2_lf(const unsigned int *ucs4str, SQLLEN ilen, + SQLWCHAR *ucs2str, int bufcount, BOOL lfconv) { + int outlen = 0, i; + UCHAR *ucdt; + SQLWCHAR *sqlwdt, dmy_wchar; + UCHAR *const udt = (UCHAR *)&dmy_wchar; + unsigned int uintdt; + + MYLOG(ES_DEBUG, " ilen=" FORMAT_LEN " bufcount=%d\n", ilen, bufcount); + if (ilen < 0) + ilen = ucs4strlen(ucs4str); + for (i = 0; i < ilen && (uintdt = ucs4str[i], uintdt); i++) { + sqlwdt = (SQLWCHAR *)&uintdt; + ucdt = (UCHAR *)&uintdt; + if (0 == sqlwdt[1]) { + if (lfconv && ES_LINEFEED == ucdt[0] + && (i == 0 + || ES_CARRIAGE_RETURN != *((UCHAR *)&ucs4str[i - 1]))) { + if (outlen < bufcount) { + udt[0] = ES_CARRIAGE_RETURN; + udt[1] = 0; + ucs2str[outlen] = *((SQLWCHAR *)udt); + } + outlen++; + } + if (outlen < bufcount) + ucs2str[outlen] = sqlwdt[0]; + outlen++; + continue; + } + sqlwdt[1]--; + udt[0] = ((0xfc & ucdt[1]) >> 2) | ((0x3 & ucdt[2]) << 6); + // printf("%02x", udt[0]); + udt[1] = SURROG1_BYTE | ((0xc & ucdt[2]) >> 2); + // printf("%02x", udt[1]); + if (outlen < bufcount) + ucs2str[outlen] = *((SQLWCHAR *)udt); + outlen++; + udt[0] = ucdt[0]; + // printf("%02x", udt[0]); + udt[1] = SURROG2_BYTE | (0x3 & ucdt[1]); + // printf("%02x\n", udt[1]); + if (outlen < bufcount) + ucs2str[outlen] = *((SQLWCHAR *)udt); + outlen++; + } + if (outlen < bufcount) + ucs2str[outlen] = 0; + + return outlen; +} +static int ucs2_to_ucs4(const SQLWCHAR *ucs2str, SQLLEN ilen, + unsigned int *ucs4str, int bufcount) { + int outlen = 0, i; + UCHAR *ucdt; + SQLWCHAR sqlwdt; + unsigned int dmy_uint; + UCHAR *const udt = (UCHAR *)&dmy_uint; + + MYLOG(ES_DEBUG, " ilen=" FORMAT_LEN " bufcount=%d\n", ilen, bufcount); + if (ilen < 0) + ilen = ucs2strlen(ucs2str); + udt[3] = 0; /* always */ + for (i = 0; i < ilen && (sqlwdt = ucs2str[i], sqlwdt); i++) { + ucdt = (UCHAR *)(ucs2str + i); + // printf("IN=%x\n", sqlwdt); + if ((ucdt[1] & SURROGATE_CHECK) != SURROG1_BYTE) { + // printf("SURROG1=%2x\n", ucdt[1] & SURROG1_BYTE); + if (outlen < bufcount) { + udt[0] = ucdt[0]; + udt[1] = ucdt[1]; + udt[2] = 0; + ucs4str[outlen] = *((unsigned int *)udt); + } + outlen++; + continue; + } + /* surrogate pair */ + udt[0] = ucdt[2]; + udt[1] = (ucdt[3] & 0x3) | ((ucdt[0] & 0x3f) << 2); + udt[2] = (((ucdt[0] & 0xc0) >> 6) | ((ucdt[1] & 0x3) << 2)) + 1; + // udt[3] = 0; needless + if (outlen < bufcount) + ucs4str[outlen] = *((unsigned int *)udt); + outlen++; + i++; + } + if (outlen < bufcount) + ucs4str[outlen] = 0; + + return outlen; +} +#endif /* __WCS_ISO10646__ */ + +#if defined(__WCS_ISO10646__) + +static SQLULEN utf8_to_wcs_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, + wchar_t *wcsstr, SQLULEN bufcount, + BOOL errcheck) { + switch (get_convtype()) { + case WCSTYPE_UTF16_LE: + return utf8_to_ucs2_lf(utf8str, ilen, lfconv, (SQLWCHAR *)wcsstr, + bufcount, errcheck); + case WCSTYPE_UTF32_LE: + return utf8_to_ucs4_lf(utf8str, ilen, lfconv, (UInt4 *)wcsstr, + bufcount, errcheck); + } + return (SQLULEN)~0; +} + +static char *wcs_to_utf8(const wchar_t *wcsstr, SQLLEN ilen, SQLLEN *olen, + BOOL lower_identifier) { + switch (get_convtype()) { + case WCSTYPE_UTF16_LE: + return ucs2_to_utf8((const SQLWCHAR *)wcsstr, ilen, olen, + lower_identifier); + case WCSTYPE_UTF32_LE: + return ucs4_to_utf8((const UInt4 *)wcsstr, ilen, olen, + lower_identifier); + } + + return NULL; +} + +/* + * Input strings must be NULL terminated. + * Output wide character strings would be NULL terminated. The result + * outmsg would be truncated when the buflen is small. + * + * The output NULL terminator is counted as buflen. + * if outmsg is NULL or buflen is 0, only output length is returned. + * As for return values, NULL terminators aren't counted. + */ +static int msgtowstr(const char *inmsg, wchar_t *outmsg, int buflen) { + int outlen = -1; + + MYLOG(ES_DEBUG, " inmsg=%p buflen=%d\n", inmsg, buflen); +#ifdef WIN32 + if (NULL == outmsg) + buflen = 0; + if ((outlen = + MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED | MB_ERR_INVALID_CHARS, + inmsg, -1, outmsg, buflen)) + > 0) + outlen--; + else if (ERROR_INSUFFICIENT_BUFFER == GetLastError()) + outlen = + MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED | MB_ERR_INVALID_CHARS, + inmsg, -1, NULL, 0) + - 1; + else + outlen = -1; +#else + if (0 == buflen) + outmsg = NULL; + outlen = mbstowcs((wchar_t *)outmsg, inmsg, buflen); +#endif /* WIN32 */ + if (outmsg && outlen >= buflen) { + outmsg[buflen - 1] = 0; + MYLOG(ES_DEBUG, " out=%dchars truncated to %d\n", outlen, buflen - 1); + } + MYLOG(ES_DEBUG, " buf=%dchars out=%dchars\n", buflen, outlen); + + return outlen; +} + +/* + * Input wide character strings must be NULL terminated. + * Output strings would be NULL terminated. The result outmsg would be + * truncated when the buflen is small. + * + * The output NULL terminator is counted as buflen. + * if outmsg is NULL or buflen is 0, only output length is returned. + * As for return values, NULL terminators aren't counted. + */ +static int wstrtomsg(const wchar_t *wstr, char *outmsg, int buflen) { + int outlen = -1; + + MYLOG(ES_DEBUG, " wstr=%p buflen=%d\n", wstr, buflen); +#ifdef WIN32 + if (NULL == outmsg) + buflen = 0; + if ((outlen = WideCharToMultiByte(CP_ACP, 0, wstr, -1, outmsg, buflen, NULL, + NULL)) + > 0) + outlen--; + else if (ERROR_INSUFFICIENT_BUFFER == GetLastError()) + outlen = + WideCharToMultiByte(CP_ACP, 0, wstr, -1, NULL, 0, NULL, NULL) - 1; + else + outlen = -1; +#else + if (0 == buflen) + outmsg = NULL; + outlen = wcstombs(outmsg, wstr, buflen); +#endif /* WIN32 */ + if (outmsg && outlen >= buflen) { + outmsg[buflen - 1] = 0; + MYLOG(ES_DEBUG, " out=%dbytes truncated to %d\n", outlen, buflen - 1); + } + MYLOG(ES_DEBUG, " buf=%dbytes outlen=%dbytes\n", buflen, outlen); + + return outlen; +} +#endif /* __WCS_ISO10646__ */ + +#if defined(__CHAR16_UTF_16__) + +static mbstate_t initial_state; + +static SQLLEN mbstoc16_lf(char16_t *c16dt, const char *c8dt, size_t n, + BOOL lf_conv) { + int i; + size_t brtn; + const char *cdt; + mbstate_t mbst = initial_state; + + MYLOG(ES_DEBUG, " c16dt=%p size=" FORMAT_SIZE_T "\n", c16dt, n); + for (i = 0, cdt = c8dt; i < n || (!c16dt); i++) { + if (lf_conv && ES_LINEFEED == *cdt && i > 0 + && ES_CARRIAGE_RETURN != cdt[-1]) { + if (c16dt) + c16dt[i] = ES_CARRIAGE_RETURN; + i++; + } + brtn = mbrtoc16(c16dt ? c16dt + i : NULL, cdt, 4, &mbst); + if (0 == brtn) + break; + if (brtn == (size_t)-1 || brtn == (size_t)-2) + return -1; + if (brtn == (size_t)-3) + continue; + cdt += brtn; + } + if (c16dt && i >= n) + c16dt[n - 1] = 0; + + return i; +} + +static SQLLEN c16tombs(char *c8dt, const char16_t *c16dt, size_t n) { + int i; + SQLLEN result = 0; + size_t brtn; + char *cdt, c4byte[4]; + mbstate_t mbst = initial_state; + + MYLOG(ES_DEBUG, " c8dt=%p size=" FORMAT_SIZE_T "u\n", c8dt, n); + if (!c8dt) + n = 0; + for (i = 0, cdt = c8dt; c16dt[i] && (result < n || (!cdt)); i++) { + if (NULL != cdt && result + 4 < n) + brtn = c16rtomb(cdt, c16dt[i], &mbst); + else { + brtn = c16rtomb(c4byte, c16dt[i], &mbst); + if (brtn < 5) { + SQLLEN result_n = result + brtn; + + if (result_n < n) + memcpy(cdt, c4byte, brtn); + else { + if (cdt && n > 0) { + c8dt[result] = '\0'; /* truncate */ + return result_n; + } + } + } + } + /* + printf("c16dt=%04X brtn=%lu result=%ld cdt=%02X%02X%02X%02X\n", + c16dt[i], brtn, result, (UCHAR) cdt[0], (UCHAR) cdt[1], (UCHAR) + cdt[2], (UCHAR) cdt[3]); + */ + if (brtn == (size_t)-1) { + if (n > 0) + c8dt[n - 1] = '\0'; + return -1; + } + if (cdt) + cdt += brtn; + result += brtn; + } + if (cdt) + *cdt = '\0'; + + return result; +} +#endif /* __CHAR16_UTF_16__ */ + +// +// SQLBindParameter SQL_C_CHAR to UTF-8 case +// the current locale => UTF-8 +// +SQLLEN bindpara_msg_to_utf8(const char *ldt, char **wcsbuf, SQLLEN used) { + SQLLEN l = (-2); + char *utf8 = NULL, *ldt_nts, *alloc_nts = NULL, ntsbuf[128]; + int count; + + if (SQL_NTS == used) { + count = (int)strlen(ldt); + ldt_nts = (char *)ldt; + } else if (used < 0) { + return -1; + } else { + count = (int)used; + if (used < (SQLLEN)sizeof(ntsbuf)) + ldt_nts = ntsbuf; + else { + if (NULL == (alloc_nts = malloc(used + 1))) + return l; + ldt_nts = alloc_nts; + } + memcpy(ldt_nts, ldt, used); + ldt_nts[used] = '\0'; + } + + get_convtype(); + MYLOG(ES_DEBUG, " \n"); +#if defined(__WCS_ISO10646__) + if (use_wcs) { + wchar_t *wcsdt = (wchar_t *)malloc((count + 1) * sizeof(wchar_t)); + + if ((l = msgtowstr(ldt_nts, (wchar_t *)wcsdt, count + 1)) >= 0) + utf8 = wcs_to_utf8(wcsdt, -1, &l, FALSE); + free(wcsdt); + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (use_c16) { + SQLWCHAR *utf16 = (SQLWCHAR *)malloc((count + 1) * sizeof(SQLWCHAR)); + + if ((l = mbstoc16_lf((char16_t *)utf16, ldt_nts, count + 1, FALSE)) + >= 0) + utf8 = ucs2_to_utf8(utf16, -1, &l, FALSE); + free(utf16); + } +#endif /* __CHAR16_UTF_16__ */ + if (l < 0 && NULL != utf8) + free(utf8); + else + *wcsbuf = (char *)utf8; + + if (NULL != alloc_nts) + free(alloc_nts); + return l; +} + +// +// SQLBindParameter hybrid case +// SQLWCHAR(UTF-16) => the current locale +// +SQLLEN bindpara_wchar_to_msg(const SQLWCHAR *utf16, char **wcsbuf, + SQLLEN used) { + SQLLEN l = (-2); + char *ldt = NULL; + SQLWCHAR *utf16_nts, *alloc_nts = NULL, ntsbuf[128]; + int count; + + if (SQL_NTS == used) { + count = (int)ucs2strlen(utf16); + utf16_nts = (SQLWCHAR *)utf16; + } else if (used < 0) + return -1; + else { + count = (int)(used / WCLEN); + if (used + WCLEN <= sizeof(ntsbuf)) + utf16_nts = ntsbuf; + else { + if (NULL == (alloc_nts = (SQLWCHAR *)malloc(used + WCLEN))) + return l; + utf16_nts = alloc_nts; + } + memcpy(utf16_nts, utf16, used); + utf16_nts[count] = 0; + } + + get_convtype(); + MYLOG(ES_DEBUG, "\n"); +#if defined(__WCS_ISO10646__) + if (use_wcs) { +#pragma warning(push) +#pragma warning(disable : 4127) + if (sizeof(SQLWCHAR) == sizeof(wchar_t)) +#pragma warning(pop) + { + ldt = (char *)malloc(2 * count + 1); + l = wstrtomsg((wchar_t *)utf16_nts, ldt, 2 * count + 1); + } else { + unsigned int *utf32 = + (unsigned int *)malloc((count + 1) * sizeof(unsigned int)); + + l = ucs2_to_ucs4(utf16_nts, -1, utf32, count + 1); + if ((l = wstrtomsg((wchar_t *)utf32, NULL, 0)) >= 0) { + ldt = (char *)malloc(l + 1); + l = wstrtomsg((wchar_t *)utf32, ldt, (int)l + 1); + } + free(utf32); + } + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (use_c16) { + ldt = (char *)malloc(4 * count + 1); + l = c16tombs(ldt, (const char16_t *)utf16_nts, 4 * count + 1); + } +#endif /* __CHAR16_UTF_16__ */ + if (l < 0 && NULL != ldt) + free(ldt); + else + *wcsbuf = ldt; + + if (NULL != alloc_nts) + free(alloc_nts); + return l; +} + +size_t convert_linefeeds(const char *s, char *dst, size_t max, BOOL convlf, + BOOL *changed); +// +// SQLBindCol hybrid case +// the current locale => SQLWCHAR(UTF-16) +// +SQLLEN bindcol_hybrid_estimate(const char *ldt, BOOL lf_conv, char **wcsbuf) { + UNUSED(ldt, wcsbuf); + SQLLEN l = (-2); + + get_convtype(); + MYLOG(ES_DEBUG, " lf_conv=%d\n", lf_conv); +#if defined(__WCS_ISO10646__) + if (use_wcs) { + unsigned int *utf32 = NULL; + +#pragma warning(push) +#pragma warning(disable : 4127) + if (sizeof(SQLWCHAR) == sizeof(wchar_t)) +#pragma warning(pop) + { + l = msgtowstr(ldt, (wchar_t *)NULL, 0); + if (l >= 0 && lf_conv) { + BOOL changed; + size_t len; + + len = convert_linefeeds(ldt, NULL, 0, TRUE, &changed); + if (changed) { + l += (len - strlen(ldt)); + *wcsbuf = (char *)malloc(len + 1); + convert_linefeeds(ldt, *wcsbuf, len + 1, TRUE, NULL); + } + } + } else { + int count = (int)strlen(ldt); + + utf32 = (unsigned int *)malloc((count + 1) * sizeof(unsigned int)); + if ((l = msgtowstr(ldt, (wchar_t *)utf32, count + 1)) >= 0) { + l = ucs4_to_ucs2_lf(utf32, -1, NULL, 0, lf_conv); + *wcsbuf = (char *)utf32; + } + } + if (l < 0 && NULL != utf32) + free(utf32); + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (use_c16) + l = mbstoc16_lf((char16_t *)NULL, ldt, 0, lf_conv); +#endif /* __CHAR16_UTF_16__ */ + + return l; +} + +SQLLEN bindcol_hybrid_exec(SQLWCHAR *utf16, const char *ldt, size_t n, + BOOL lf_conv, char **wcsbuf) { + UNUSED(ldt, utf16, wcsbuf); + SQLLEN l = (-2); + + get_convtype(); + MYLOG(ES_DEBUG, " size=" FORMAT_SIZE_T " lf_conv=%d\n", n, lf_conv); +#if defined(__WCS_ISO10646__) + if (use_wcs) { + unsigned int *utf32 = NULL; + BOOL midbuf = (wcsbuf && *wcsbuf); + +#pragma warning(push) +#pragma warning(disable : 4127) + if (sizeof(SQLWCHAR) == sizeof(wchar_t)) +#pragma warning(pop) + { + if (midbuf) + l = msgtowstr(*wcsbuf, (wchar_t *)utf16, (int)n); + else + l = msgtowstr(ldt, (wchar_t *)utf16, (int)n); + } else if (midbuf) { + utf32 = (unsigned int *)*wcsbuf; + l = ucs4_to_ucs2_lf(utf32, -1, utf16, (int)n, lf_conv); + } else { + int count = (int)strlen(ldt); + + utf32 = (unsigned int *)malloc((count + 1) * sizeof(unsigned int)); + if ((l = msgtowstr(ldt, (wchar_t *)utf32, count + 1)) >= 0) { + l = ucs4_to_ucs2_lf(utf32, -1, utf16, (int)n, lf_conv); + } + free(utf32); + } + if (midbuf) { + free(*wcsbuf); + *wcsbuf = NULL; + } + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (use_c16) { + l = mbstoc16_lf((char16_t *)utf16, ldt, n, lf_conv); + } +#endif /* __CHAR16_UTF_16__ */ + + return l; +} + +SQLLEN locale_to_sqlwchar(SQLWCHAR *utf16, const char *ldt, size_t n, + BOOL lf_conv) { + return bindcol_hybrid_exec(utf16, ldt, n, lf_conv, NULL); +} + +// +// SQLBindCol localize case +// UTF-8 => the current locale +// +SQLLEN bindcol_localize_estimate(const char *utf8dt, BOOL lf_conv, + char **wcsbuf) { + UNUSED(utf8dt); + SQLLEN l = (-2); + char *convalc = NULL; + + get_convtype(); + MYLOG(ES_DEBUG, " lf_conv=%d\n", lf_conv); +#if defined(__WCS_ISO10646__) + if (use_wcs) { + wchar_t *wcsalc = NULL; + + l = utf8_to_wcs_lf(utf8dt, -1, lf_conv, NULL, 0, FALSE); + wcsalc = (wchar_t *)malloc(sizeof(wchar_t) * (l + 1)); + convalc = (char *)wcsalc; + l = utf8_to_wcs_lf(utf8dt, -1, lf_conv, wcsalc, l + 1, FALSE); + l = wstrtomsg(wcsalc, NULL, 0); + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (use_c16) { + SQLWCHAR *wcsalc = NULL; + + l = utf8_to_ucs2_lf(utf8dt, -1, lf_conv, (SQLWCHAR *)NULL, 0, FALSE); + wcsalc = (SQLWCHAR *)malloc(sizeof(SQLWCHAR) * (l + 1)); + convalc = (char *)wcsalc; + l = utf8_to_ucs2_lf(utf8dt, -1, lf_conv, wcsalc, l + 1, FALSE); + l = c16tombs(NULL, (char16_t *)wcsalc, 0); + } +#endif /* __CHAR16_UTF_16__ */ + if (l < 0 && NULL != convalc) + free(convalc); + else if (NULL != convalc) + *wcsbuf = (char *)convalc; + + MYLOG(ES_DEBUG, " return=" FORMAT_LEN "\n", l); + return l; +} + +SQLLEN bindcol_localize_exec(char *ldt, size_t n, BOOL lf_conv, char **wcsbuf) { + UNUSED(ldt, lf_conv); + SQLLEN l = (-2); + + get_convtype(); + MYLOG(ES_DEBUG, " size=" FORMAT_SIZE_T "\n", n); +#if defined(__WCS_ISO10646__) + if (use_wcs) { + wchar_t *wcsalc = (wchar_t *)*wcsbuf; + + l = wstrtomsg(wcsalc, ldt, (int)n); + } +#endif /* __WCS_ISO10646__ */ +#ifdef __CHAR16_UTF_16__ + if (use_c16) { + char16_t *wcsalc = (char16_t *)*wcsbuf; + + l = c16tombs(ldt, (char16_t *)wcsalc, n); + } +#endif /* __CHAR16_UTF_16__ */ + free(*wcsbuf); + *wcsbuf = NULL; + + MYLOG(ES_DEBUG, " return=" FORMAT_LEN "\n", l); + return l; +} + +#endif /* UNICODE_SUPPORT */ diff --git a/sql-workbench/.kibana-plugin-helpers.json b/sql-workbench/.kibana-plugin-helpers.json new file mode 100644 index 0000000000..91012ec952 --- /dev/null +++ b/sql-workbench/.kibana-plugin-helpers.json @@ -0,0 +1,12 @@ +{ + "styleSheetToCompile": "public/app.scss", + "buildSourcePatterns": [ + "package.json", + "tsconfig.json", + "yarn.lock", + ".yarnrc", + "index.js", + "{lib,public,server,test}/**/*", + "!__tests__" + ] +} diff --git a/sql-workbench/CODE_OF_CONDUCT.md b/sql-workbench/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..c673f8d062 --- /dev/null +++ b/sql-workbench/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Code of Conduct + +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). diff --git a/sql-workbench/CONTRIBUTING.md b/sql-workbench/CONTRIBUTING.md new file mode 100644 index 0000000000..841ab0a8a7 --- /dev/null +++ b/sql-workbench/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/opendistro-for-elasticsearch/sql-workbench/issues), or [recently closed](https://github.com/opendistro-for-elasticsearch/sql-workbench/issues?q=is%3Aissue+is%3Aclosed), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any issue tagged ["good first issue"](https://github.com/opendistro-for-elasticsearch/sql-workbench/issues?q=is%3Aopen+label%3A%22help+wanted%22+label%3A%22good+first+issue%22) is a great place to start. + + +## Code of Conduct + +This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html). + + +## Security issue notifications + +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue. + + +## Licensing + +See the [LICENSE](LICENSE.TXT) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/sql-workbench/CONTRIBUTORS.md b/sql-workbench/CONTRIBUTORS.md new file mode 100644 index 0000000000..4299ae5e4e --- /dev/null +++ b/sql-workbench/CONTRIBUTORS.md @@ -0,0 +1,15 @@ +Contributors in order of last name: + +Peng Huo + +Abbas Hussain + +Anirudh Jadhav + +Joshua Li + +Francesca Paoletti + +Alolita Sharma + +Chloe Zhang diff --git a/sql-workbench/LICENSE.TXT b/sql-workbench/LICENSE.TXT new file mode 100644 index 0000000000..67db858821 --- /dev/null +++ b/sql-workbench/LICENSE.TXT @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/sql-workbench/NOTICE b/sql-workbench/NOTICE new file mode 100644 index 0000000000..3a29e2c458 --- /dev/null +++ b/sql-workbench/NOTICE @@ -0,0 +1,2 @@ +Open Distro for Elasticsearch SQL Kibana Plugin +Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/sql-workbench/README.md b/sql-workbench/README.md new file mode 100644 index 0000000000..a2ff72405a --- /dev/null +++ b/sql-workbench/README.md @@ -0,0 +1,67 @@ +# Open Distro for Elasticsearch SQL Workbench + +The Open Distro for Elasticsearch SQL Workbench enables you to query your Elasticsearch data using SQL syntax from a dedicated Kibana UI. You can download your query results data in JSON, JDBC, CSV and raw text formats. + + +## Documentation + +Please see our technical [documentation](https://opendistro.github.io/for-elasticsearch-docs/) to learn more about its features. + + +## Setup + +1. Download Elasticsearch for the version that matches the [Kibana version specified in package.json](./package.json#L8). +1. Download and install the most recent version of [Open Distro for Elasticsearch SQL plugin](https://github.com/opendistro-for-elasticsearch/sql). +1. Download the Kibana source code for the [version specified in package.json](./package.json#L8) you want to set up. + + See the [Kibana contributing guide](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md#setting-up-your-development-environment) for more instructions on setting up your development environment. + +1. Change your node version to the version specified in `.node-version` inside the Kibana root directory. +1. cd into `plugins` directory in the Kibana source code directory. +1. Check out this package from version control into the `plugins` directory. +1. Run `yarn kbn bootstrap` inside `kibana/plugins/sql-workbench`. + +Ultimately, your directory structure should look like this: + +```md +. +├── kibana +│ └── plugins +│ └── sql-workbench +``` + + +## Build + +To build the plugin's distributable zip simply run `yarn build`. + +Example output: `./build/opendistro-sql-workbench-*.zip` + + +## Run + +- `yarn start` + + Starts Kibana and includes this plugin. Kibana will be available on `localhost:5601`. + +- `NODE_PATH=../../node_modules yarn test:jest` + + Runs the plugin tests. + + +## Contributing to Open Distro for Elasticsearch SQL Workbench + +- Refer to [CONTRIBUTING.md](./CONTRIBUTING.md). +- Since this is a workbench, it can be useful to review the [Kibana contributing guide](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md) alongside the documentation around [workbenchs](https://www.elastic.co/guide/en/kibana/master/kibana-plugins.html) and [plugin development](https://www.elastic.co/guide/en/kibana/master/plugin-development.html). + +## Bugs, Enhancements or Questions + +Please file an issue to report any bugs you may find, enhancements you may need or questions you may have [here](https://github.com/opendistro-for-elasticsearch/sql-workbench/issues). + +## License + +This code is licensed under the Apache 2.0 License. + +## Copyright + +Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/sql-workbench/THIRD-PARTY b/sql-workbench/THIRD-PARTY new file mode 100644 index 0000000000..1496b14e1c --- /dev/null +++ b/sql-workbench/THIRD-PARTY @@ -0,0 +1,397 @@ +** @elastic/eui; version 23.1.0 -- https://elastic.github.io/eui/#/ +Copyright 2020 Elasticsearch BV +** TSLint; version 6.1.2 -- https://github.com/palantir/tslint +Copyright 2017 Palantir Technologies, Inc. +** typescript; version 3.0.3 -- https://github.com/Microsoft/TypeScript +/*! +***************************************************************************** +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** +*/ + +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND +DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, and + distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by the + copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all other + entities that control, are controlled by, or are under common control + with that entity. For the purposes of this definition, "control" means + (i) the power, direct or indirect, to cause the direction or management + of such entity, whether by contract or otherwise, or (ii) ownership of + fifty percent (50%) or more of the outstanding shares, or (iii) + beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity exercising + permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation source, + and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but not limited + to compiled object code, generated documentation, and conversions to + other media types. + + "Work" shall mean the work of authorship, whether in Source or Object + form, made available under the License, as indicated by a copyright + notice that is included in or attached to the work (an example is + provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object form, + that is based on (or derived from) the Work and for which the editorial + revisions, annotations, elaborations, or other modifications represent, + as a whole, an original work of authorship. For the purposes of this + License, Derivative Works shall not include works that remain separable + from, or merely link (or bind by name) to the interfaces of, the Work and + Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including the original + version of the Work and any modifications or additions to that Work or + Derivative Works thereof, that is intentionally submitted to Licensor for + inclusion in the Work by the copyright owner or by an individual or Legal + Entity authorized to submit on behalf of the copyright owner. For the + purposes of this definition, "submitted" means any form of electronic, + verbal, or written communication sent to the Licensor or its + representatives, including but not limited to communication on electronic + mailing lists, source code control systems, and issue tracking systems + that are managed by, or on behalf of, the Licensor for the purpose of + discussing and improving the Work, but excluding communication that is + conspicuously marked or otherwise designated in writing by the copyright + owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity on + behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of this + License, each Contributor hereby grants to You a perpetual, worldwide, + non-exclusive, no-charge, royalty-free, irrevocable copyright license to + reproduce, prepare Derivative Works of, publicly display, publicly perform, + sublicense, and distribute the Work and such Derivative Works in Source or + Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of this + License, each Contributor hereby grants to You a perpetual, worldwide, + non-exclusive, no-charge, royalty-free, irrevocable (except as stated in + this section) patent license to make, have made, use, offer to sell, sell, + import, and otherwise transfer the Work, where such license applies only to + those patent claims licensable by such Contributor that are necessarily + infringed by their Contribution(s) alone or by combination of their + Contribution(s) with the Work to which such Contribution(s) was submitted. + If You institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work or a + Contribution incorporated within the Work constitutes direct or contributory + patent infringement, then any patent licenses granted to You under this + License for that Work shall terminate as of the date such litigation is + filed. + + 4. Redistribution. You may reproduce and distribute copies of the Work or + Derivative Works thereof in any medium, with or without modifications, and + in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a + copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating + that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You + distribute, all copyright, patent, trademark, and attribution notices + from the Source form of the Work, excluding those notices that do not + pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must include + a readable copy of the attribution notices contained within such NOTICE + file, excluding those notices that do not pertain to any part of the + Derivative Works, in at least one of the following places: within a + NOTICE text file distributed as part of the Derivative Works; within the + Source form or documentation, if provided along with the Derivative + Works; or, within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents of the + NOTICE file are for informational purposes only and do not modify the + License. You may add Your own attribution notices within Derivative Works + that You distribute, alongside or as an addendum to the NOTICE text from + the Work, provided that such additional attribution notices cannot be + construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may + provide additional or different license terms and conditions for use, + reproduction, or distribution of Your modifications, or for any such + Derivative Works as a whole, provided Your use, reproduction, and + distribution of the Work otherwise complies with the conditions stated in + this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, any + Contribution intentionally submitted for inclusion in the Work by You to the + Licensor shall be under the terms and conditions of this License, without + any additional terms or conditions. Notwithstanding the above, nothing + herein shall supersede or modify the terms of any separate license agreement + you may have executed with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, except + as required for reasonable and customary use in describing the origin of the + Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in + writing, Licensor provides the Work (and each Contributor provides its + Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied, including, without limitation, any + warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or + FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining + the appropriateness of using or redistributing the Work and assume any risks + associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, whether + in tort (including negligence), contract, or otherwise, unless required by + applicable law (such as deliberate and grossly negligent acts) or agreed to + in writing, shall any Contributor be liable to You for damages, including + any direct, indirect, special, incidental, or consequential damages of any + character arising as a result of this License or out of the use or inability + to use the Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all other + commercial damages or losses), even if such Contributor has been advised of + the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing the Work + or Derivative Works thereof, You may choose to offer, and charge a fee for, + acceptance of support, warranty, indemnity, or other liability obligations + and/or rights consistent with this License. However, in accepting such + obligations, You may act only on Your own behalf and on Your sole + responsibility, not on behalf of any other Contributor, and only if You + agree to indemnify, defend, and hold each Contributor harmless for any + liability incurred by, or claims asserted against, such Contributor by + reason of your accepting any such warranty or additional liability. END OF + TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification +within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software + +distributed under the License is distributed on an "AS IS" BASIS, + +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and + +limitations under the License. + +* For @elastic/eui see also this required NOTICE: + Copyright 2017 Elasticsearch BV +* For TSLint see also this required NOTICE: + Copyright 2017 Palantir Technologies, Inc. +* For typescript see also this required NOTICE: + /*! + ***************************************************************************** + Copyright (c) Microsoft Corporation. All rights reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use + this file except in compliance with the License. You may obtain a copy of + the + License at http://www.apache.org/licenses/LICENSE-2.0 + + THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS + OF ANY + KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED + WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, + MERCHANTABLITY OR NON-INFRINGEMENT. + + See the Apache Version 2.0 License for specific language governing + permissions + and limitations under the License. + ***************************************************************************** + */ + +------ + +** enzyme; version 3.1.0 -- http://airbnb.io/enzyme/ +Copyright (c) 2015 Airbnb, Inc. + +The MIT License (MIT) + +Copyright (c) 2015 Airbnb, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------ + +** react; version 16.3.0 -- https://reactjs.org/ +Copyright (c) Facebook, Inc. and its affiliates. + +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------ + +** expect.js; version 0.3.1 -- https://github.com/Automattic/expect.js/ +Copyright (c) 2011 Guillermo Rauch +Heavily borrows from should.js by TJ Holowaychuck - MIT. + +(The MIT License) + +Copyright (c) 2011 Guillermo Rauch + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the 'Software'), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------ + +** jest; version 23.6.0 -- https://jestjs.io/ +Copyright (c) 2014-present, Facebook, Inc. + +MIT License + +For Jest software + +Copyright (c) 2014-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------ + +** @types/react; version 16.3.14 -- https://www.npmjs.com/package/@types/react +These definitions were written by Asana https://asana.com, AssureSign +http://www.assuresign.com, Microsoft https://microsoft.com, John Reilly +https://github.com/johnnyreilly, Benoit Benezech https://github.com/bbenezech, +Patricio Zavolinsky https://github.com/pzavolinsky, Digiguru +https://github.com/digiguru, Eric Anderson https://github.com/ericanderson, +Tanguy Krotoff https://github.com/tkrotoff, Dovydas Navickas +https://github.com/DovydasNavickas, Stéphane Goetz https://github.com/onigoetz, +Josh Rutherford https://github.com/theruther4d, Guilherme Hübner +https://github.com/guilhermehubner, Ferdy Budhidharma +https://github.com/ferdaber, Johann Rakotoharisoa +https://github.com/jrakotoharisoa, Olivier Pascal +https://github.com/pascaloliv, Martin Hochel https://github.com/hotell, Frank +Li https://github.com/franklixuefei, Jessica Franco +https://github.com/Kovensky, Paul Sherman https://github.com/pshrmn. + +This project is licensed under the MIT license. +Copyrights are respective of each contributor listed at the beginning of each +definition file. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/sql-workbench/babel.config.js b/sql-workbench/babel.config.js new file mode 100644 index 0000000000..b8020f4d08 --- /dev/null +++ b/sql-workbench/babel.config.js @@ -0,0 +1,21 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// babelrc doesn't respect NODE_PATH anymore but using require does. +// Alternative to install them locally in node_modules +module.exports = { + presets: [require("@babel/preset-env"), require("@babel/preset-react"), require("@babel/preset-typescript")], + plugins: [require("@babel/plugin-proposal-class-properties"), require("@babel/plugin-proposal-object-rest-spread"), ["@babel/transform-runtime"]] +}; diff --git a/sql-workbench/index.js b/sql-workbench/index.js new file mode 100644 index 0000000000..086e1eee20 --- /dev/null +++ b/sql-workbench/index.js @@ -0,0 +1,59 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import {resolve} from 'path'; +import {existsSync} from "fs"; + +import query from './server/routes/query'; +import translate from './server/routes/translate'; +import QueryService from './server/services/QueryService'; +import TranslateService from './server/services/TranslateService'; +import { createSqlCluster } from './server/clusters'; +import { DEFAULT_APP_CATEGORIES } from '../../src/core/utils'; + +export const PLUGIN_NAME = 'opendistro-sql-workbench'; + +export default function (kibana) { + return new kibana.Plugin({ + require: ['elasticsearch'], + name: PLUGIN_NAME, + uiExports: { + app: { + title: 'SQL Workbench', + description: 'SQL Workbench', + main: 'plugins/' + PLUGIN_NAME + '/app', + icon:'plugins/' + PLUGIN_NAME + '/icons/sql.svg', + category: DEFAULT_APP_CATEGORIES.kibana, + }, + styleSheetPaths: [resolve(__dirname, 'public/app.scss')].find(p => existsSync(p)) + }, + + config(Joi) { + return Joi.object({ + enabled: Joi.boolean().default(true), + }).default(); + }, + + init(server, options) { // eslint-disable-line no-unused-vars + // Create Clusters + createSqlCluster(server); + const client = server.plugins.elasticsearch; + + // Add server routes and initialize the plugin here + query(server, new QueryService(client)); + translate(server, new TranslateService(client)); + } + }); +} diff --git a/sql-workbench/package.json b/sql-workbench/package.json new file mode 100644 index 0000000000..143539facf --- /dev/null +++ b/sql-workbench/package.json @@ -0,0 +1,98 @@ +{ + "name": "opendistro-sql-workbench", + "version": "1.9.0.2", + "description": "SQL Workbench", + "main": "index.js", + "license": "Apache-2.0", + "homepage": "https://github.com/opendistro-for-elasticsearch/sql-workbench", + "kibana": { + "version": "7.8.0", + "templateVersion": "6.3.3" + }, + "repository": { + "type": "git", + "url": "https://github.com/opendistro-for-elasticsearch/sql-workbench" + }, + "scripts": { + "preinstall": "node ../../preinstall_check", + "kbn": "node ../../scripts/kbn", + "lint": "tslint .", + "start": "plugin-helpers start", + "test:server": "plugin-helpers test:server", + "test:browser": "plugin-helpers test:browser", + "test:jest": "NODE_PATH=../../node_modules ../../node_modules/.bin/jest --config ./test/jest.config.js", + "build": "plugin-helpers build" + }, + "dependencies": { + "brace": "0.11.1", + "lodash": "^4.17.15", + "react-dom": "^16.3.0", + "react-double-scrollbar": "^0.0.15", + "node": "^14.0.0" + }, + "devDependencies": { + "@babel/plugin-proposal-class-properties": "^7.7.4", + "@babel/plugin-proposal-object-rest-spread": "^7.7.4", + "@babel/plugin-transform-runtime": "^7.8.3", + "@babel/preset-env": "^7.7.6", + "@babel/preset-react": "^7.7.4", + "@babel/preset-typescript": "^7.3.3", + "@elastic/elasticsearch": "^7.2.0", + "@elastic/eslint-config-kibana": "link:../../packages/eslint-config-kibana", + "@elastic/eslint-import-resolver-kibana": "link:../../packages/kbn-eslint-import-resolver-kibana", + "@elastic/eui": "^23.1.0", + "@kbn/expect": "link:../../packages/kbn-expect", + "@kbn/i18n": "link:../../packages/kbn-i18n", + "@kbn/plugin-helpers": "link:../../packages/kbn-plugin-helpers", + "@kbn/ui-framework": "link:../../packages/kbn-ui-framework", + "@testing-library/jest-dom": "^5.5.0", + "@testing-library/react": "^10.0.3", + "@testing-library/user-event": "^4.1.0", + "@types/angular": "1.6.50", + "@types/hapi-latest": "npm:@types/hapi@18.0.3", + "@types/jest": "^25.2.1", + "@types/lodash": "^4.14.150", + "@types/node": "^13.13.4", + "@types/react": "^16.3.14", + "@types/react-dom": "^16.0.5", + "@types/react-router-dom": "^5.1.5", + "babel-eslint": "^10.1.0", + "eslint": "^6.8.0", + "eslint-plugin-babel": "^5.2.0", + "eslint-plugin-import": "^2.14.0", + "eslint-plugin-jest": "^23.8.2", + "eslint-plugin-jsx-a11y": "^6.1.2", + "eslint-plugin-mocha": "^6.3.0", + "eslint-plugin-no-unsanitized": "^3.0.2", + "eslint-plugin-prefer-object-spread": "^1.2.1", + "eslint-plugin-react": "^7.11.1", + "husky": "^4.2.5", + "jest": "^25.5.0", + "jest-cli": "^25.5.0", + "jest-raw-loader": "^1.0.1", + "lint-staged": "^10.2.0", + "mutationobserver-shim": "^0.3.3", + "prettier": "^2.0.5", + "ts-jest": "^25.4.0", + "ts-loader": "^7.0.1", + "ts-node": "^8.9.1", + "tslint": "^6.1.2", + "tslint-config-prettier": "^1.18.0", + "tslint-plugin-prettier": "^2.0.1", + "typescript": "3.0.3" + }, + "engines": { + "node": "10.21.0", + "yarn": "^1.21.1" + }, + "resolutions": { + "**/@types/node": "10.12.27", + "@types/react": "16.3.14", + "**/@types/angular": "1.6.50", + "**/@types/jest": "^24.0.9", + "**/@types/react-dom": "^16.0.5", + "**/@types/react-router-dom": "^4.3.1", + "eslint-utils": "^2.0.0", + "**/@types/react": "16.3.14" + } +} diff --git a/sql-workbench/public/ace-themes/sql_console.css b/sql-workbench/public/ace-themes/sql_console.css new file mode 100644 index 0000000000..e5e998a1d8 --- /dev/null +++ b/sql-workbench/public/ace-themes/sql_console.css @@ -0,0 +1,179 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +.ace-sql-console .ace_gutter { + background: rgb(245, 247, 250); /* $euiColorLightestShade */ + color: rgb(105, 112, 125); /* $$euiColorDarkShade */ +} + +.ace-sql-console .ace_print-margin { + width: 1px; + background: #e8e8e8; +} + +.ace-sql-console .ace_fold { + background-color: #6B72E6; +} + +.ace-sql-console { + background-color: rgb(245, 247, 250); /* $euiColorLightestShade */ + color: black; +} + +.ace-sql-console .ace_marker-layer .ace_active-line.ace_active-line { + background-color: rgb(211, 218, 230); /* $euiColorLightShade */; +} + +.ace-sql-console .ace_cursor { + color: black; +} + +.ace-sql-console .ace_invisible { + color: rgb(191, 191, 191); +} + +.ace-sql-console .ace_storage, +.ace-sql-console .ace_keyword { + color: blue; +} + +.ace-sql-console .ace_constant { + color: rgb(197, 6, 11); +} + +.ace-sql-console .ace_constant.ace_buildin { + color: rgb(88, 72, 246); +} + +.ace-sql-console .ace_constant.ace_language { + color: rgb(88, 92, 246); +} + +.ace-sql-console .ace_constant.ace_library { + color: rgb(6, 150, 14); +} + +.ace-sql-console .ace_invalid { + background-color: rgba(255, 0, 0, 0.1); + color: red; +} + +.ace-sql-console .ace_support.ace_function { + color: rgb(60, 76, 114); +} + +.ace-sql-console .ace_support.ace_constant { + color: rgb(6, 150, 14); +} + +.ace-sql-console .ace_support.ace_type, +.ace-sql-console .ace_support.ace_class { + color: rgb(109, 121, 222); +} + +.ace-sql-console .ace_keyword.ace_operator { + color: rgb(104, 118, 135); +} + +.ace-sql-console .ace_string { + color: rgb(3, 106, 7); +} + +.ace-sql-console .ace_comment { + color: rgb(76, 136, 107); +} + +.ace-sql-console .ace_comment.ace_doc { + color: rgb(0, 102, 255); +} + +.ace-sql-console .ace_comment.ace_doc.ace_tag { + color: rgb(128, 159, 191); +} + +.ace-sql-console .ace_constant.ace_numeric { + color: rgb(0, 0, 205); +} + +.ace-sql-console .ace_variable { + color: rgb(49, 132, 149); +} + +.ace-sql-console .ace_xml-pe { + color: rgb(104, 104, 91); +} + +.ace-sql-console .ace_entity.ace_name.ace_function { + color: #0000A2; +} + + +.ace-sql-console .ace_heading { + color: rgb(12, 7, 255); +} + +.ace-sql-console .ace_list { + color:rgb(185, 6, 144); +} + +.ace-sql-console .ace_meta.ace_tag { + color:rgb(0, 22, 142); +} + +.ace-sql-console .ace_string.ace_regex { + color: rgb(255, 0, 0) +} + +.ace-sql-console .ace_marker-layer .ace_selection { + background: rgb(181, 213, 255); +} + +.ace-sql-console.ace_multiselect .ace_selection.ace_start { + box-shadow: 0 0 3px 0px white; +} + +.ace-sql-console .ace_marker-layer .ace_step { + background: rgb(252, 255, 0); +} + +.ace-sql-console .ace_marker-layer .ace_stack { + background: rgb(164, 229, 101); +} + +.ace-sql-console .ace_marker-layer .ace_bracket { + margin: -1px 0 0 -1px; + border: 1px solid rgb(192, 192, 192); +} + +.ace-sql-console .ace_marker-layer .ace_active-line { + background: rgba(0, 0, 0, 0.07); +} + +.ace-sql-console .ace_gutter-active-line { + background-color : rgb(211, 218, 230); /* $euiColorLightShade */ +} + +.ace-sql-console .ace_marker-layer .ace_selected-word { + background: rgb(250, 250, 255); + border: 1px solid rgb(200, 200, 250); +} + +.ace-sql-console .ace_indent-guide { + background: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAE0lEQVQImWP4////f4bLly//BwAmVgd1/w11/gAAAABJRU5ErkJggg==") right repeat-y; +} + +.ace_editor .ace-sql-console { + height: 200px; +} diff --git a/sql-workbench/public/ace-themes/sql_console.js b/sql-workbench/public/ace-themes/sql_console.js new file mode 100644 index 0000000000..fcbddd2c29 --- /dev/null +++ b/sql-workbench/public/ace-themes/sql_console.js @@ -0,0 +1,25 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import * as ace from 'brace'; + +ace.define('ace/theme/sql_console', ['require', 'exports', 'module', 'ace/lib/dom'], function (acequire, exports, module) { + exports.isDark = false; + exports.cssClass = 'ace-sql-console'; + exports.cssText = require('./sql_console.css'); + + const dom = acequire('../lib/dom'); + dom.importCssString(exports.cssText, exports.cssClass); +}); diff --git a/sql-workbench/public/app.js b/sql-workbench/public/app.js new file mode 100644 index 0000000000..92e94e717d --- /dev/null +++ b/sql-workbench/public/app.js @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +import React from 'react'; +import { uiModules } from 'ui/modules'; +import chrome from 'ui/chrome'; +import { render, unmountComponentAtNode } from 'react-dom'; + +import 'ui/autoload/styles'; +import './less/main.less'; +import { Main } from './components/Main/main'; + +const app = uiModules.get('apps/sqlConsole'); + +app.config($locationProvider => { + $locationProvider.html5Mode({ + enabled: false, + requireBase: false, + rewriteLinks: false, + }); +}); +app.config(stateManagementConfigProvider => + stateManagementConfigProvider.disable() +); + +function RootController($scope, $element, $http) { + const domNode = $element[0]; + + // render react to DOM + render(
    , domNode); + // unmount react on controller destroy + $scope.$on('$destroy', () => { + unmountComponentAtNode(domNode); + }); +} + +chrome.setRootController('sqlConsole', RootController); diff --git a/sql-workbench/public/app.scss b/sql-workbench/public/app.scss new file mode 100644 index 0000000000..1518c49845 --- /dev/null +++ b/sql-workbench/public/app.scss @@ -0,0 +1,160 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +@import '../node_modules/@elastic/eui/src/global_styling/variables/colors'; +@import '../node_modules/@elastic/eui/src/global_styling/variables/size'; + +.sql-console-page-header { + padding: $euiSizeS $euiSizeL; + font-weight: 600; +} + +.sql-console-query-container { + padding: $euiSizeL; + height: 1142px; +} + +.sql-console-query-editor { + height: 430px; + max-height: 430px; + .container-panel { + border-radius: 0px; + box-shadow: none; + } + .sql-query-panel { + border-radius: 0px; + border-width: 1px; + border-right-width: 0.5px; + box-shadow: none; + } + .sql-query-panel-header { + padding: $euiSizeS; + background-color: $euiColorLightestShade; + } + .translated-query-panel { + border-radius: 0px; + border-width: 1px; + border-left-width: 0.5px; + box-shadow: none; + } + .translated-query-panel-header { + padding: $euiSizeS; + background-color: $euiColorLightestShade; + } + .action-container { + padding: $euiSizeM; + } + .resize-panel { + resize: vertical; + overflow: auto; + cursor: row-resize; + } + .sql-editor-link:visited { + background-color: rgb(255,255,255); + } + .sql-editor-button { + margin:10px; + } +} +.sql-console-query-result{ + height: 577px; + scroll-behavior: smooth; + .sql-console-results-container { + overflow: auto; + } + .query-result-container { + border: solid 1.5px #d9d9d9; + border-radius: 0px; + border-bottom-width: 0; + } + .tabs-container { + overflow: hidden; + margin: 3px; + } + .table-name { + font-size: 20px; + padding: 20px; + } + .table-header { + /*border-top: solid 1px #d9d9d9;*/ + background-color: #d9d9d9; + } + .sideNav-table { + border: solid 1px rgb(217, 217, 217); + border-collapse: separate; + } + .search-panel { + display: inline-flex; + width:100%; + padding:20px; + } + .search-bar { + width:80%; + } + .pagination-container { + margin-top: 10px; + margin-bottom: 10px; + } + + .tab-arrow-down-container { + padding: 25px; + height: 56px; + vertical-align: middle; + color: rgb(0, 121, 165); + } + + .download-container { + padding: $euiSizeM; + height: 56px; + vertical-align: middle; + border-width: 1px; + margin-bottom: 20px; + } + + .download-button-container { + float:right; + } + + .toggleContainer { + margin: 20px; + } +} + +.expanded-row { + /*background-color: rgb(232,243,246); + /*background-color: rgb(245,247,250);*/ + border-collapse: separate; +} + +.expanded-row:hover { + background-color: rgb(232,243,246); +} + +.no-background { + background-color: rgba(0, 0, 0, 0); +} + +/* Message Tab */ +.code-editor { + color: #006BB4; +} + +.error-message { + color: red; +} + +.successful-message{ + color: #006BB4; +} diff --git a/sql-workbench/public/components/Header/Header.test.tsx b/sql-workbench/public/components/Header/Header.test.tsx new file mode 100644 index 0000000000..3113f5eac0 --- /dev/null +++ b/sql-workbench/public/components/Header/Header.test.tsx @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import React from "react"; +import "@testing-library/jest-dom/extend-expect"; +import { render } from "@testing-library/react"; +import Header from "./Header"; + + +describe("
    spec", () => { + it("renders the component", () => { + render(
    ); + expect(document.body.children[0]).toMatchSnapshot(); + }); +}); diff --git a/sql-workbench/public/components/Header/Header.tsx b/sql-workbench/public/components/Header/Header.tsx new file mode 100644 index 0000000000..a87af45aed --- /dev/null +++ b/sql-workbench/public/components/Header/Header.tsx @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import React from 'react'; +import { EuiHorizontalRule } from '@elastic/eui'; + +const Header = () => { + return ( +
    + +
    + ); +}; + +export default Header; diff --git a/sql-workbench/public/components/Header/__snapshots__/Header.test.tsx.snap b/sql-workbench/public/components/Header/__snapshots__/Header.test.tsx.snap new file mode 100644 index 0000000000..34fa845af8 --- /dev/null +++ b/sql-workbench/public/components/Header/__snapshots__/Header.test.tsx.snap @@ -0,0 +1,11 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`
    spec renders the component 1`] = ` +
    +
    +
    +
    +
    +`; diff --git a/sql-workbench/public/components/Main/__snapshots__/main.test.tsx.snap b/sql-workbench/public/components/Main/__snapshots__/main.test.tsx.snap new file mode 100644 index 0000000000..88f990cfff --- /dev/null +++ b/sql-workbench/public/components/Main/__snapshots__/main.test.tsx.snap @@ -0,0 +1,2977 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`
    spec click clear button 1`] = ` +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    + SQL Query +
    +
    +
    +

    + Press Enter to start editing. +

    +

    + When you're done, press Escape to stop editing. +

    +
    +
    +